1af1a8899SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * raid1.c : Multiple Devices driver for Linux 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * RAID-1 management functions. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000 121da177e4SLinus Torvalds * 1396de0e25SJan Engelhardt * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk> 141da177e4SLinus Torvalds * Various fixes by Neil Brown <neilb@cse.unsw.edu.au> 151da177e4SLinus Torvalds * 16191ea9b2SNeilBrown * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support 17191ea9b2SNeilBrown * bitmapped intelligence in resync: 18191ea9b2SNeilBrown * 19191ea9b2SNeilBrown * - bitmap marked during normal i/o 20191ea9b2SNeilBrown * - bitmap used to skip nondirty blocks during sync 21191ea9b2SNeilBrown * 22191ea9b2SNeilBrown * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology: 23191ea9b2SNeilBrown * - persistent bitmap code 241da177e4SLinus Torvalds */ 251da177e4SLinus Torvalds 265a0e3ad6STejun Heo #include <linux/slab.h> 2725570727SStephen Rothwell #include <linux/delay.h> 28bff61975SNeilBrown #include <linux/blkdev.h> 29056075c7SPaul Gortmaker #include <linux/module.h> 30bff61975SNeilBrown #include <linux/seq_file.h> 318bda470eSChristian Dietrich #include <linux/ratelimit.h> 3269b00b5bSGuoqing Jiang #include <linux/interval_tree_generic.h> 333f07c014SIngo Molnar 34109e3765SNeilBrown #include <trace/events/block.h> 353f07c014SIngo Molnar 3643b2e5d8SNeilBrown #include "md.h" 37ef740c37SChristoph Hellwig #include "raid1.h" 38935fe098SMike Snitzer #include "md-bitmap.h" 39191ea9b2SNeilBrown 40394ed8e4SShaohua Li #define UNSUPPORTED_MDDEV_FLAGS \ 41394ed8e4SShaohua Li ((1L << MD_HAS_JOURNAL) | \ 42ea0213e0SArtur Paszkiewicz (1L << MD_JOURNAL_CLEAN) | \ 43ddc08823SPawel Baldysiak (1L << MD_HAS_PPL) | \ 44ddc08823SPawel Baldysiak (1L << MD_HAS_MULTIPLE_PPLS)) 45394ed8e4SShaohua Li 46fd76863eScolyli@suse.de static void allow_barrier(struct r1conf *conf, sector_t sector_nr); 47fd76863eScolyli@suse.de static void lower_barrier(struct r1conf *conf, sector_t sector_nr); 481da177e4SLinus Torvalds 49578b54adSNeilBrown #define raid1_log(md, fmt, args...) \ 50578b54adSNeilBrown do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) 51578b54adSNeilBrown 52fb0eb5dfSMing Lei #include "raid1-10.c" 53fb0eb5dfSMing Lei 5469b00b5bSGuoqing Jiang #define START(node) ((node)->start) 5569b00b5bSGuoqing Jiang #define LAST(node) ((node)->last) 5669b00b5bSGuoqing Jiang INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last, 5769b00b5bSGuoqing Jiang START, LAST, static inline, raid1_rb); 5869b00b5bSGuoqing Jiang 59d0d2d8baSGuoqing Jiang static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio, 60d0d2d8baSGuoqing Jiang struct serial_info *si, int idx) 613e148a32SGuoqing Jiang { 623e148a32SGuoqing Jiang unsigned long flags; 633e148a32SGuoqing Jiang int ret = 0; 64d0d2d8baSGuoqing Jiang sector_t lo = r1_bio->sector; 65d0d2d8baSGuoqing Jiang sector_t hi = lo + r1_bio->sectors; 66025471f9SGuoqing Jiang struct serial_in_rdev *serial = &rdev->serial[idx]; 673e148a32SGuoqing Jiang 6869b00b5bSGuoqing Jiang spin_lock_irqsave(&serial->serial_lock, flags); 693e148a32SGuoqing Jiang /* collision happened */ 7069b00b5bSGuoqing Jiang if (raid1_rb_iter_first(&serial->serial_rb, lo, hi)) 713e148a32SGuoqing Jiang ret = -EBUSY; 72d0d2d8baSGuoqing Jiang else { 7369b00b5bSGuoqing Jiang si->start = lo; 7469b00b5bSGuoqing Jiang si->last = hi; 7569b00b5bSGuoqing Jiang raid1_rb_insert(si, &serial->serial_rb); 76d0d2d8baSGuoqing Jiang } 7769b00b5bSGuoqing Jiang spin_unlock_irqrestore(&serial->serial_lock, flags); 783e148a32SGuoqing Jiang 793e148a32SGuoqing Jiang return ret; 803e148a32SGuoqing Jiang } 813e148a32SGuoqing Jiang 82d0d2d8baSGuoqing Jiang static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio) 83d0d2d8baSGuoqing Jiang { 84d0d2d8baSGuoqing Jiang struct mddev *mddev = rdev->mddev; 85d0d2d8baSGuoqing Jiang struct serial_info *si; 86d0d2d8baSGuoqing Jiang int idx = sector_to_idx(r1_bio->sector); 87d0d2d8baSGuoqing Jiang struct serial_in_rdev *serial = &rdev->serial[idx]; 88d0d2d8baSGuoqing Jiang 89d0d2d8baSGuoqing Jiang if (WARN_ON(!mddev->serial_info_pool)) 90d0d2d8baSGuoqing Jiang return; 91d0d2d8baSGuoqing Jiang si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO); 92d0d2d8baSGuoqing Jiang wait_event(serial->serial_io_wait, 93d0d2d8baSGuoqing Jiang check_and_add_serial(rdev, r1_bio, si, idx) == 0); 94d0d2d8baSGuoqing Jiang } 95d0d2d8baSGuoqing Jiang 96404659cfSGuoqing Jiang static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi) 973e148a32SGuoqing Jiang { 9869b00b5bSGuoqing Jiang struct serial_info *si; 993e148a32SGuoqing Jiang unsigned long flags; 1003e148a32SGuoqing Jiang int found = 0; 1013e148a32SGuoqing Jiang struct mddev *mddev = rdev->mddev; 102025471f9SGuoqing Jiang int idx = sector_to_idx(lo); 103025471f9SGuoqing Jiang struct serial_in_rdev *serial = &rdev->serial[idx]; 1043e148a32SGuoqing Jiang 10569b00b5bSGuoqing Jiang spin_lock_irqsave(&serial->serial_lock, flags); 10669b00b5bSGuoqing Jiang for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi); 10769b00b5bSGuoqing Jiang si; si = raid1_rb_iter_next(si, lo, hi)) { 10869b00b5bSGuoqing Jiang if (si->start == lo && si->last == hi) { 10969b00b5bSGuoqing Jiang raid1_rb_remove(si, &serial->serial_rb); 11069b00b5bSGuoqing Jiang mempool_free(si, mddev->serial_info_pool); 1113e148a32SGuoqing Jiang found = 1; 1123e148a32SGuoqing Jiang break; 1133e148a32SGuoqing Jiang } 11469b00b5bSGuoqing Jiang } 1153e148a32SGuoqing Jiang if (!found) 116404659cfSGuoqing Jiang WARN(1, "The write IO is not recorded for serialization\n"); 11769b00b5bSGuoqing Jiang spin_unlock_irqrestore(&serial->serial_lock, flags); 11869b00b5bSGuoqing Jiang wake_up(&serial->serial_io_wait); 1193e148a32SGuoqing Jiang } 1203e148a32SGuoqing Jiang 12198d30c58SMing Lei /* 12298d30c58SMing Lei * for resync bio, r1bio pointer can be retrieved from the per-bio 12398d30c58SMing Lei * 'struct resync_pages'. 12498d30c58SMing Lei */ 12598d30c58SMing Lei static inline struct r1bio *get_resync_r1bio(struct bio *bio) 12698d30c58SMing Lei { 12798d30c58SMing Lei return get_resync_pages(bio)->raid_bio; 12898d30c58SMing Lei } 12998d30c58SMing Lei 130dd0fc66fSAl Viro static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 1311da177e4SLinus Torvalds { 1321da177e4SLinus Torvalds struct pool_info *pi = data; 1339f2c9d12SNeilBrown int size = offsetof(struct r1bio, bios[pi->raid_disks]); 1341da177e4SLinus Torvalds 1351da177e4SLinus Torvalds /* allocate a r1bio with room for raid_disks entries in the bios array */ 1367eaceaccSJens Axboe return kzalloc(size, gfp_flags); 1371da177e4SLinus Torvalds } 1381da177e4SLinus Torvalds 1398e005f7cSmajianpeng #define RESYNC_DEPTH 32 1401da177e4SLinus Torvalds #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) 1418e005f7cSmajianpeng #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) 1428e005f7cSmajianpeng #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) 143c40f341fSGoldwyn Rodrigues #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW) 144c40f341fSGoldwyn Rodrigues #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9) 1451da177e4SLinus Torvalds 146dd0fc66fSAl Viro static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) 1471da177e4SLinus Torvalds { 1481da177e4SLinus Torvalds struct pool_info *pi = data; 1499f2c9d12SNeilBrown struct r1bio *r1_bio; 1501da177e4SLinus Torvalds struct bio *bio; 151da1aab3dSNeilBrown int need_pages; 15298d30c58SMing Lei int j; 15398d30c58SMing Lei struct resync_pages *rps; 1541da177e4SLinus Torvalds 1551da177e4SLinus Torvalds r1_bio = r1bio_pool_alloc(gfp_flags, pi); 1567eaceaccSJens Axboe if (!r1_bio) 1571da177e4SLinus Torvalds return NULL; 1581da177e4SLinus Torvalds 1596da2ec56SKees Cook rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages), 16098d30c58SMing Lei gfp_flags); 16198d30c58SMing Lei if (!rps) 16298d30c58SMing Lei goto out_free_r1bio; 16398d30c58SMing Lei 1641da177e4SLinus Torvalds /* 1651da177e4SLinus Torvalds * Allocate bios : 1 for reading, n-1 for writing 1661da177e4SLinus Torvalds */ 1671da177e4SLinus Torvalds for (j = pi->raid_disks ; j-- ; ) { 1686746557fSNeilBrown bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 1691da177e4SLinus Torvalds if (!bio) 1701da177e4SLinus Torvalds goto out_free_bio; 1711da177e4SLinus Torvalds r1_bio->bios[j] = bio; 1721da177e4SLinus Torvalds } 1731da177e4SLinus Torvalds /* 1741da177e4SLinus Torvalds * Allocate RESYNC_PAGES data pages and attach them to 175d11c171eSNeilBrown * the first bio. 176d11c171eSNeilBrown * If this is a user-requested check/repair, allocate 177d11c171eSNeilBrown * RESYNC_PAGES for each bio. 1781da177e4SLinus Torvalds */ 179d11c171eSNeilBrown if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) 180da1aab3dSNeilBrown need_pages = pi->raid_disks; 181d11c171eSNeilBrown else 182da1aab3dSNeilBrown need_pages = 1; 18398d30c58SMing Lei for (j = 0; j < pi->raid_disks; j++) { 18498d30c58SMing Lei struct resync_pages *rp = &rps[j]; 1851da177e4SLinus Torvalds 18698d30c58SMing Lei bio = r1_bio->bios[j]; 18798d30c58SMing Lei 18898d30c58SMing Lei if (j < need_pages) { 18998d30c58SMing Lei if (resync_alloc_pages(rp, gfp_flags)) 190da1aab3dSNeilBrown goto out_free_pages; 19198d30c58SMing Lei } else { 19298d30c58SMing Lei memcpy(rp, &rps[0], sizeof(*rp)); 19398d30c58SMing Lei resync_get_all_pages(rp); 194d11c171eSNeilBrown } 19598d30c58SMing Lei 19698d30c58SMing Lei rp->raid_bio = r1_bio; 19798d30c58SMing Lei bio->bi_private = rp; 198d11c171eSNeilBrown } 1991da177e4SLinus Torvalds 2001da177e4SLinus Torvalds r1_bio->master_bio = NULL; 2011da177e4SLinus Torvalds 2021da177e4SLinus Torvalds return r1_bio; 2031da177e4SLinus Torvalds 204da1aab3dSNeilBrown out_free_pages: 205491221f8SGuoqing Jiang while (--j >= 0) 20698d30c58SMing Lei resync_free_pages(&rps[j]); 207da1aab3dSNeilBrown 2081da177e4SLinus Torvalds out_free_bio: 2091da177e4SLinus Torvalds while (++j < pi->raid_disks) 2101da177e4SLinus Torvalds bio_put(r1_bio->bios[j]); 21198d30c58SMing Lei kfree(rps); 21298d30c58SMing Lei 21398d30c58SMing Lei out_free_r1bio: 214c7afa803SMarcos Paulo de Souza rbio_pool_free(r1_bio, data); 2151da177e4SLinus Torvalds return NULL; 2161da177e4SLinus Torvalds } 2171da177e4SLinus Torvalds 2181da177e4SLinus Torvalds static void r1buf_pool_free(void *__r1_bio, void *data) 2191da177e4SLinus Torvalds { 2201da177e4SLinus Torvalds struct pool_info *pi = data; 22198d30c58SMing Lei int i; 2229f2c9d12SNeilBrown struct r1bio *r1bio = __r1_bio; 22398d30c58SMing Lei struct resync_pages *rp = NULL; 2241da177e4SLinus Torvalds 22598d30c58SMing Lei for (i = pi->raid_disks; i--; ) { 22698d30c58SMing Lei rp = get_resync_pages(r1bio->bios[i]); 22798d30c58SMing Lei resync_free_pages(rp); 2281da177e4SLinus Torvalds bio_put(r1bio->bios[i]); 22998d30c58SMing Lei } 23098d30c58SMing Lei 23198d30c58SMing Lei /* resync pages array stored in the 1st bio's .bi_private */ 23298d30c58SMing Lei kfree(rp); 2331da177e4SLinus Torvalds 234c7afa803SMarcos Paulo de Souza rbio_pool_free(r1bio, data); 2351da177e4SLinus Torvalds } 2361da177e4SLinus Torvalds 237e8096360SNeilBrown static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) 2381da177e4SLinus Torvalds { 2391da177e4SLinus Torvalds int i; 2401da177e4SLinus Torvalds 2418f19ccb2SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 2421da177e4SLinus Torvalds struct bio **bio = r1_bio->bios + i; 2434367af55SNeilBrown if (!BIO_SPECIAL(*bio)) 2441da177e4SLinus Torvalds bio_put(*bio); 2451da177e4SLinus Torvalds *bio = NULL; 2461da177e4SLinus Torvalds } 2471da177e4SLinus Torvalds } 2481da177e4SLinus Torvalds 2499f2c9d12SNeilBrown static void free_r1bio(struct r1bio *r1_bio) 2501da177e4SLinus Torvalds { 251e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 2521da177e4SLinus Torvalds 2531da177e4SLinus Torvalds put_all_bios(conf, r1_bio); 254afeee514SKent Overstreet mempool_free(r1_bio, &conf->r1bio_pool); 2551da177e4SLinus Torvalds } 2561da177e4SLinus Torvalds 2579f2c9d12SNeilBrown static void put_buf(struct r1bio *r1_bio) 2581da177e4SLinus Torvalds { 259e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 260af5f42a7SShaohua Li sector_t sect = r1_bio->sector; 2613e198f78SNeilBrown int i; 2623e198f78SNeilBrown 2638f19ccb2SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 2643e198f78SNeilBrown struct bio *bio = r1_bio->bios[i]; 2653e198f78SNeilBrown if (bio->bi_end_io) 2663e198f78SNeilBrown rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); 2673e198f78SNeilBrown } 2681da177e4SLinus Torvalds 269afeee514SKent Overstreet mempool_free(r1_bio, &conf->r1buf_pool); 2701da177e4SLinus Torvalds 271af5f42a7SShaohua Li lower_barrier(conf, sect); 2721da177e4SLinus Torvalds } 2731da177e4SLinus Torvalds 2749f2c9d12SNeilBrown static void reschedule_retry(struct r1bio *r1_bio) 2751da177e4SLinus Torvalds { 2761da177e4SLinus Torvalds unsigned long flags; 277fd01b88cSNeilBrown struct mddev *mddev = r1_bio->mddev; 278e8096360SNeilBrown struct r1conf *conf = mddev->private; 279fd76863eScolyli@suse.de int idx; 2801da177e4SLinus Torvalds 281fd76863eScolyli@suse.de idx = sector_to_idx(r1_bio->sector); 2821da177e4SLinus Torvalds spin_lock_irqsave(&conf->device_lock, flags); 2831da177e4SLinus Torvalds list_add(&r1_bio->retry_list, &conf->retry_list); 284824e47daScolyli@suse.de atomic_inc(&conf->nr_queued[idx]); 2851da177e4SLinus Torvalds spin_unlock_irqrestore(&conf->device_lock, flags); 2861da177e4SLinus Torvalds 28717999be4SNeilBrown wake_up(&conf->wait_barrier); 2881da177e4SLinus Torvalds md_wakeup_thread(mddev->thread); 2891da177e4SLinus Torvalds } 2901da177e4SLinus Torvalds 2911da177e4SLinus Torvalds /* 2921da177e4SLinus Torvalds * raid_end_bio_io() is called when we have finished servicing a mirrored 2931da177e4SLinus Torvalds * operation and are ready to return a success/failure code to the buffer 2941da177e4SLinus Torvalds * cache layer. 2951da177e4SLinus Torvalds */ 2969f2c9d12SNeilBrown static void call_bio_endio(struct r1bio *r1_bio) 297d2eb35acSNeilBrown { 298d2eb35acSNeilBrown struct bio *bio = r1_bio->master_bio; 299d2eb35acSNeilBrown 300d2eb35acSNeilBrown if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) 3014e4cbee9SChristoph Hellwig bio->bi_status = BLK_STS_IOERR; 3024246a0b6SChristoph Hellwig 3034246a0b6SChristoph Hellwig bio_endio(bio); 304d2eb35acSNeilBrown } 305d2eb35acSNeilBrown 3069f2c9d12SNeilBrown static void raid_end_bio_io(struct r1bio *r1_bio) 3071da177e4SLinus Torvalds { 3081da177e4SLinus Torvalds struct bio *bio = r1_bio->master_bio; 309c91114c2SDavid Jeffery struct r1conf *conf = r1_bio->mddev->private; 3101da177e4SLinus Torvalds 3114b6d287fSNeilBrown /* if nobody has done the final endio yet, do it now */ 3124b6d287fSNeilBrown if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 31336a4e1feSNeilBrown pr_debug("raid1: sync end %s on sectors %llu-%llu\n", 3144b6d287fSNeilBrown (bio_data_dir(bio) == WRITE) ? "write" : "read", 3154f024f37SKent Overstreet (unsigned long long) bio->bi_iter.bi_sector, 3164f024f37SKent Overstreet (unsigned long long) bio_end_sector(bio) - 1); 3174b6d287fSNeilBrown 318d2eb35acSNeilBrown call_bio_endio(r1_bio); 3194b6d287fSNeilBrown } 320c91114c2SDavid Jeffery /* 321c91114c2SDavid Jeffery * Wake up any possible resync thread that waits for the device 322c91114c2SDavid Jeffery * to go idle. All I/Os, even write-behind writes, are done. 323c91114c2SDavid Jeffery */ 324c91114c2SDavid Jeffery allow_barrier(conf, r1_bio->sector); 325c91114c2SDavid Jeffery 3261da177e4SLinus Torvalds free_r1bio(r1_bio); 3271da177e4SLinus Torvalds } 3281da177e4SLinus Torvalds 3291da177e4SLinus Torvalds /* 3301da177e4SLinus Torvalds * Update disk head position estimator based on IRQ completion info. 3311da177e4SLinus Torvalds */ 3329f2c9d12SNeilBrown static inline void update_head_pos(int disk, struct r1bio *r1_bio) 3331da177e4SLinus Torvalds { 334e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 3351da177e4SLinus Torvalds 3361da177e4SLinus Torvalds conf->mirrors[disk].head_position = 3371da177e4SLinus Torvalds r1_bio->sector + (r1_bio->sectors); 3381da177e4SLinus Torvalds } 3391da177e4SLinus Torvalds 340ba3ae3beSNamhyung Kim /* 341ba3ae3beSNamhyung Kim * Find the disk number which triggered given bio 342ba3ae3beSNamhyung Kim */ 3439f2c9d12SNeilBrown static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio) 344ba3ae3beSNamhyung Kim { 345ba3ae3beSNamhyung Kim int mirror; 34630194636SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 34730194636SNeilBrown int raid_disks = conf->raid_disks; 348ba3ae3beSNamhyung Kim 3498f19ccb2SNeilBrown for (mirror = 0; mirror < raid_disks * 2; mirror++) 350ba3ae3beSNamhyung Kim if (r1_bio->bios[mirror] == bio) 351ba3ae3beSNamhyung Kim break; 352ba3ae3beSNamhyung Kim 3538f19ccb2SNeilBrown BUG_ON(mirror == raid_disks * 2); 354ba3ae3beSNamhyung Kim update_head_pos(mirror, r1_bio); 355ba3ae3beSNamhyung Kim 356ba3ae3beSNamhyung Kim return mirror; 357ba3ae3beSNamhyung Kim } 358ba3ae3beSNamhyung Kim 3594246a0b6SChristoph Hellwig static void raid1_end_read_request(struct bio *bio) 3601da177e4SLinus Torvalds { 3614e4cbee9SChristoph Hellwig int uptodate = !bio->bi_status; 3629f2c9d12SNeilBrown struct r1bio *r1_bio = bio->bi_private; 363e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 364e5872d58SNeilBrown struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; 3651da177e4SLinus Torvalds 3661da177e4SLinus Torvalds /* 3671da177e4SLinus Torvalds * this branch is our 'one mirror IO has finished' event handler: 3681da177e4SLinus Torvalds */ 369e5872d58SNeilBrown update_head_pos(r1_bio->read_disk, r1_bio); 370ddaf22abSNeilBrown 371220946c9SNeilBrown if (uptodate) 3721da177e4SLinus Torvalds set_bit(R1BIO_Uptodate, &r1_bio->state); 3732e52d449SNeilBrown else if (test_bit(FailFast, &rdev->flags) && 3742e52d449SNeilBrown test_bit(R1BIO_FailFast, &r1_bio->state)) 3752e52d449SNeilBrown /* This was a fail-fast read so we definitely 3762e52d449SNeilBrown * want to retry */ 3772e52d449SNeilBrown ; 378dd00a99eSNeilBrown else { 379dd00a99eSNeilBrown /* If all other devices have failed, we want to return 380dd00a99eSNeilBrown * the error upwards rather than fail the last device. 381dd00a99eSNeilBrown * Here we redefine "uptodate" to mean "Don't want to retry" 382dd00a99eSNeilBrown */ 383dd00a99eSNeilBrown unsigned long flags; 384dd00a99eSNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 385dd00a99eSNeilBrown if (r1_bio->mddev->degraded == conf->raid_disks || 386dd00a99eSNeilBrown (r1_bio->mddev->degraded == conf->raid_disks-1 && 387e5872d58SNeilBrown test_bit(In_sync, &rdev->flags))) 388dd00a99eSNeilBrown uptodate = 1; 389dd00a99eSNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 390dd00a99eSNeilBrown } 3911da177e4SLinus Torvalds 3927ad4d4a6SNeilBrown if (uptodate) { 3931da177e4SLinus Torvalds raid_end_bio_io(r1_bio); 394e5872d58SNeilBrown rdev_dec_pending(rdev, conf->mddev); 3957ad4d4a6SNeilBrown } else { 3961da177e4SLinus Torvalds /* 3971da177e4SLinus Torvalds * oops, read error: 3981da177e4SLinus Torvalds */ 3991da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 4001d41c216SNeilBrown pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n", 4019dd1e2faSNeilBrown mdname(conf->mddev), 4021d41c216SNeilBrown bdevname(rdev->bdev, b), 4038bda470eSChristian Dietrich (unsigned long long)r1_bio->sector); 404d2eb35acSNeilBrown set_bit(R1BIO_ReadError, &r1_bio->state); 4051da177e4SLinus Torvalds reschedule_retry(r1_bio); 4067ad4d4a6SNeilBrown /* don't drop the reference on read_disk yet */ 4071da177e4SLinus Torvalds } 4081da177e4SLinus Torvalds } 4091da177e4SLinus Torvalds 4109f2c9d12SNeilBrown static void close_write(struct r1bio *r1_bio) 4114e78064fSNeilBrown { 4124e78064fSNeilBrown /* it really is the end of this request */ 4134e78064fSNeilBrown if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 414841c1316SMing Lei bio_free_pages(r1_bio->behind_master_bio); 415841c1316SMing Lei bio_put(r1_bio->behind_master_bio); 416841c1316SMing Lei r1_bio->behind_master_bio = NULL; 4174e78064fSNeilBrown } 4184e78064fSNeilBrown /* clear the bitmap if all writes complete successfully */ 419e64e4018SAndy Shevchenko md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, 4204e78064fSNeilBrown r1_bio->sectors, 4214e78064fSNeilBrown !test_bit(R1BIO_Degraded, &r1_bio->state), 422af6d7b76SNeilBrown test_bit(R1BIO_BehindIO, &r1_bio->state)); 4234e78064fSNeilBrown md_write_end(r1_bio->mddev); 424cd5ff9a1SNeilBrown } 425cd5ff9a1SNeilBrown 4269f2c9d12SNeilBrown static void r1_bio_write_done(struct r1bio *r1_bio) 427cd5ff9a1SNeilBrown { 428cd5ff9a1SNeilBrown if (!atomic_dec_and_test(&r1_bio->remaining)) 429cd5ff9a1SNeilBrown return; 430cd5ff9a1SNeilBrown 431cd5ff9a1SNeilBrown if (test_bit(R1BIO_WriteError, &r1_bio->state)) 432cd5ff9a1SNeilBrown reschedule_retry(r1_bio); 433cd5ff9a1SNeilBrown else { 434cd5ff9a1SNeilBrown close_write(r1_bio); 4354367af55SNeilBrown if (test_bit(R1BIO_MadeGood, &r1_bio->state)) 4364367af55SNeilBrown reschedule_retry(r1_bio); 4374367af55SNeilBrown else 4384e78064fSNeilBrown raid_end_bio_io(r1_bio); 4394e78064fSNeilBrown } 4404e78064fSNeilBrown } 4414e78064fSNeilBrown 4424246a0b6SChristoph Hellwig static void raid1_end_write_request(struct bio *bio) 4431da177e4SLinus Torvalds { 4449f2c9d12SNeilBrown struct r1bio *r1_bio = bio->bi_private; 445e5872d58SNeilBrown int behind = test_bit(R1BIO_BehindIO, &r1_bio->state); 446e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 44704b857f7SNeilBrown struct bio *to_put = NULL; 448e5872d58SNeilBrown int mirror = find_bio_disk(r1_bio, bio); 449e5872d58SNeilBrown struct md_rdev *rdev = conf->mirrors[mirror].rdev; 450e3f948cdSShaohua Li bool discard_error; 45169df9cfcSGuoqing Jiang sector_t lo = r1_bio->sector; 45269df9cfcSGuoqing Jiang sector_t hi = r1_bio->sector + r1_bio->sectors; 453e3f948cdSShaohua Li 4544e4cbee9SChristoph Hellwig discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; 4551da177e4SLinus Torvalds 4561da177e4SLinus Torvalds /* 457e9c7469bSTejun Heo * 'one mirror IO has finished' event handler: 4581da177e4SLinus Torvalds */ 4594e4cbee9SChristoph Hellwig if (bio->bi_status && !discard_error) { 460e5872d58SNeilBrown set_bit(WriteErrorSeen, &rdev->flags); 461e5872d58SNeilBrown if (!test_and_set_bit(WantReplacement, &rdev->flags)) 46219d67169SNeilBrown set_bit(MD_RECOVERY_NEEDED, & 46319d67169SNeilBrown conf->mddev->recovery); 46419d67169SNeilBrown 465212e7eb7SNeilBrown if (test_bit(FailFast, &rdev->flags) && 466212e7eb7SNeilBrown (bio->bi_opf & MD_FAILFAST) && 467212e7eb7SNeilBrown /* We never try FailFast to WriteMostly devices */ 468212e7eb7SNeilBrown !test_bit(WriteMostly, &rdev->flags)) { 469212e7eb7SNeilBrown md_error(r1_bio->mddev, rdev); 470eeba6809SYufen Yu } 471eeba6809SYufen Yu 472eeba6809SYufen Yu /* 473eeba6809SYufen Yu * When the device is faulty, it is not necessary to 474eeba6809SYufen Yu * handle write error. 475eeba6809SYufen Yu * For failfast, this is the only remaining device, 476eeba6809SYufen Yu * We need to retry the write without FailFast. 477212e7eb7SNeilBrown */ 478eeba6809SYufen Yu if (!test_bit(Faulty, &rdev->flags)) 479212e7eb7SNeilBrown set_bit(R1BIO_WriteError, &r1_bio->state); 480212e7eb7SNeilBrown else { 481212e7eb7SNeilBrown /* Finished with this branch */ 482212e7eb7SNeilBrown r1_bio->bios[mirror] = NULL; 483212e7eb7SNeilBrown to_put = bio; 484212e7eb7SNeilBrown } 4854367af55SNeilBrown } else { 4861da177e4SLinus Torvalds /* 487e9c7469bSTejun Heo * Set R1BIO_Uptodate in our master bio, so that we 488e9c7469bSTejun Heo * will return a good error code for to the higher 489e9c7469bSTejun Heo * levels even if IO on some other mirrored buffer 490e9c7469bSTejun Heo * fails. 4911da177e4SLinus Torvalds * 492e9c7469bSTejun Heo * The 'master' represents the composite IO operation 493e9c7469bSTejun Heo * to user-side. So if something waits for IO, then it 494e9c7469bSTejun Heo * will wait for the 'master' bio. 4951da177e4SLinus Torvalds */ 4964367af55SNeilBrown sector_t first_bad; 4974367af55SNeilBrown int bad_sectors; 4984367af55SNeilBrown 499cd5ff9a1SNeilBrown r1_bio->bios[mirror] = NULL; 500cd5ff9a1SNeilBrown to_put = bio; 5013056e3aeSAlex Lyakas /* 5023056e3aeSAlex Lyakas * Do not set R1BIO_Uptodate if the current device is 5033056e3aeSAlex Lyakas * rebuilding or Faulty. This is because we cannot use 5043056e3aeSAlex Lyakas * such device for properly reading the data back (we could 5053056e3aeSAlex Lyakas * potentially use it, if the current write would have felt 5063056e3aeSAlex Lyakas * before rdev->recovery_offset, but for simplicity we don't 5073056e3aeSAlex Lyakas * check this here. 5083056e3aeSAlex Lyakas */ 509e5872d58SNeilBrown if (test_bit(In_sync, &rdev->flags) && 510e5872d58SNeilBrown !test_bit(Faulty, &rdev->flags)) 5111da177e4SLinus Torvalds set_bit(R1BIO_Uptodate, &r1_bio->state); 5121da177e4SLinus Torvalds 5134367af55SNeilBrown /* Maybe we can clear some bad blocks. */ 514e5872d58SNeilBrown if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, 515e3f948cdSShaohua Li &first_bad, &bad_sectors) && !discard_error) { 5164367af55SNeilBrown r1_bio->bios[mirror] = IO_MADE_GOOD; 5174367af55SNeilBrown set_bit(R1BIO_MadeGood, &r1_bio->state); 5184367af55SNeilBrown } 5194367af55SNeilBrown } 5204367af55SNeilBrown 5214b6d287fSNeilBrown if (behind) { 52269df9cfcSGuoqing Jiang if (test_bit(CollisionCheck, &rdev->flags)) 523404659cfSGuoqing Jiang remove_serial(rdev, lo, hi); 524e5872d58SNeilBrown if (test_bit(WriteMostly, &rdev->flags)) 5254b6d287fSNeilBrown atomic_dec(&r1_bio->behind_remaining); 5264b6d287fSNeilBrown 527e9c7469bSTejun Heo /* 528e9c7469bSTejun Heo * In behind mode, we ACK the master bio once the I/O 529e9c7469bSTejun Heo * has safely reached all non-writemostly 530e9c7469bSTejun Heo * disks. Setting the Returned bit ensures that this 531e9c7469bSTejun Heo * gets done only once -- we don't ever want to return 532e9c7469bSTejun Heo * -EIO here, instead we'll wait 533e9c7469bSTejun Heo */ 5344b6d287fSNeilBrown if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && 5354b6d287fSNeilBrown test_bit(R1BIO_Uptodate, &r1_bio->state)) { 5364b6d287fSNeilBrown /* Maybe we can return now */ 5374b6d287fSNeilBrown if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 5384b6d287fSNeilBrown struct bio *mbio = r1_bio->master_bio; 53936a4e1feSNeilBrown pr_debug("raid1: behind end write sectors" 54036a4e1feSNeilBrown " %llu-%llu\n", 5414f024f37SKent Overstreet (unsigned long long) mbio->bi_iter.bi_sector, 5424f024f37SKent Overstreet (unsigned long long) bio_end_sector(mbio) - 1); 543d2eb35acSNeilBrown call_bio_endio(r1_bio); 5444b6d287fSNeilBrown } 5454b6d287fSNeilBrown } 54669df9cfcSGuoqing Jiang } else if (rdev->mddev->serialize_policy) 54769df9cfcSGuoqing Jiang remove_serial(rdev, lo, hi); 5484367af55SNeilBrown if (r1_bio->bios[mirror] == NULL) 549e5872d58SNeilBrown rdev_dec_pending(rdev, conf->mddev); 550e9c7469bSTejun Heo 5511da177e4SLinus Torvalds /* 5521da177e4SLinus Torvalds * Let's see if all mirrored write operations have finished 5531da177e4SLinus Torvalds * already. 5541da177e4SLinus Torvalds */ 555af6d7b76SNeilBrown r1_bio_write_done(r1_bio); 556c70810b3SNeilBrown 55704b857f7SNeilBrown if (to_put) 55804b857f7SNeilBrown bio_put(to_put); 5591da177e4SLinus Torvalds } 5601da177e4SLinus Torvalds 561fd76863eScolyli@suse.de static sector_t align_to_barrier_unit_end(sector_t start_sector, 562fd76863eScolyli@suse.de sector_t sectors) 563fd76863eScolyli@suse.de { 564fd76863eScolyli@suse.de sector_t len; 565fd76863eScolyli@suse.de 566fd76863eScolyli@suse.de WARN_ON(sectors == 0); 567fd76863eScolyli@suse.de /* 568fd76863eScolyli@suse.de * len is the number of sectors from start_sector to end of the 569fd76863eScolyli@suse.de * barrier unit which start_sector belongs to. 570fd76863eScolyli@suse.de */ 571fd76863eScolyli@suse.de len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) - 572fd76863eScolyli@suse.de start_sector; 573fd76863eScolyli@suse.de 574fd76863eScolyli@suse.de if (len > sectors) 575fd76863eScolyli@suse.de len = sectors; 576fd76863eScolyli@suse.de 577fd76863eScolyli@suse.de return len; 578fd76863eScolyli@suse.de } 579fd76863eScolyli@suse.de 5801da177e4SLinus Torvalds /* 5811da177e4SLinus Torvalds * This routine returns the disk from which the requested read should 5821da177e4SLinus Torvalds * be done. There is a per-array 'next expected sequential IO' sector 5831da177e4SLinus Torvalds * number - if this matches on the next IO then we use the last disk. 5841da177e4SLinus Torvalds * There is also a per-disk 'last know head position' sector that is 5851da177e4SLinus Torvalds * maintained from IRQ contexts, both the normal and the resync IO 5861da177e4SLinus Torvalds * completion handlers update this position correctly. If there is no 5871da177e4SLinus Torvalds * perfect sequential match then we pick the disk whose head is closest. 5881da177e4SLinus Torvalds * 5891da177e4SLinus Torvalds * If there are 2 mirrors in the same 2 devices, performance degrades 5901da177e4SLinus Torvalds * because position is mirror, not device based. 5911da177e4SLinus Torvalds * 5921da177e4SLinus Torvalds * The rdev for the device selected will have nr_pending incremented. 5931da177e4SLinus Torvalds */ 594e8096360SNeilBrown static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) 5951da177e4SLinus Torvalds { 596af3a2cd6SNeilBrown const sector_t this_sector = r1_bio->sector; 597d2eb35acSNeilBrown int sectors; 598d2eb35acSNeilBrown int best_good_sectors; 5999dedf603SShaohua Li int best_disk, best_dist_disk, best_pending_disk; 6009dedf603SShaohua Li int has_nonrot_disk; 601be4d3280SShaohua Li int disk; 60276073054SNeilBrown sector_t best_dist; 6039dedf603SShaohua Li unsigned int min_pending; 6043cb03002SNeilBrown struct md_rdev *rdev; 605f3ac8bf7SNeilBrown int choose_first; 60612cee5a8SShaohua Li int choose_next_idle; 6071da177e4SLinus Torvalds 6081da177e4SLinus Torvalds rcu_read_lock(); 6091da177e4SLinus Torvalds /* 6108ddf9efeSNeilBrown * Check if we can balance. We can balance on the whole 6111da177e4SLinus Torvalds * device if no resync is going on, or below the resync window. 6121da177e4SLinus Torvalds * We take the first readable disk when above the resync window. 6131da177e4SLinus Torvalds */ 6141da177e4SLinus Torvalds retry: 615d2eb35acSNeilBrown sectors = r1_bio->sectors; 61676073054SNeilBrown best_disk = -1; 6179dedf603SShaohua Li best_dist_disk = -1; 61876073054SNeilBrown best_dist = MaxSector; 6199dedf603SShaohua Li best_pending_disk = -1; 6209dedf603SShaohua Li min_pending = UINT_MAX; 621d2eb35acSNeilBrown best_good_sectors = 0; 6229dedf603SShaohua Li has_nonrot_disk = 0; 62312cee5a8SShaohua Li choose_next_idle = 0; 6242e52d449SNeilBrown clear_bit(R1BIO_FailFast, &r1_bio->state); 625d2eb35acSNeilBrown 6267d49ffcfSGoldwyn Rodrigues if ((conf->mddev->recovery_cp < this_sector + sectors) || 6277d49ffcfSGoldwyn Rodrigues (mddev_is_clustered(conf->mddev) && 62890382ed9SGoldwyn Rodrigues md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, 6297d49ffcfSGoldwyn Rodrigues this_sector + sectors))) 6307d49ffcfSGoldwyn Rodrigues choose_first = 1; 6317d49ffcfSGoldwyn Rodrigues else 6327d49ffcfSGoldwyn Rodrigues choose_first = 0; 6331da177e4SLinus Torvalds 634be4d3280SShaohua Li for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { 63576073054SNeilBrown sector_t dist; 636d2eb35acSNeilBrown sector_t first_bad; 637d2eb35acSNeilBrown int bad_sectors; 6389dedf603SShaohua Li unsigned int pending; 63912cee5a8SShaohua Li bool nonrot; 640d2eb35acSNeilBrown 641f3ac8bf7SNeilBrown rdev = rcu_dereference(conf->mirrors[disk].rdev); 642f3ac8bf7SNeilBrown if (r1_bio->bios[disk] == IO_BLOCKED 643f3ac8bf7SNeilBrown || rdev == NULL 64476073054SNeilBrown || test_bit(Faulty, &rdev->flags)) 645f3ac8bf7SNeilBrown continue; 64676073054SNeilBrown if (!test_bit(In_sync, &rdev->flags) && 64776073054SNeilBrown rdev->recovery_offset < this_sector + sectors) 64876073054SNeilBrown continue; 64976073054SNeilBrown if (test_bit(WriteMostly, &rdev->flags)) { 65076073054SNeilBrown /* Don't balance among write-mostly, just 65176073054SNeilBrown * use the first as a last resort */ 652d1901ef0STomáš Hodek if (best_dist_disk < 0) { 653307729c8SNeilBrown if (is_badblock(rdev, this_sector, sectors, 654307729c8SNeilBrown &first_bad, &bad_sectors)) { 655816b0acfSWei Fang if (first_bad <= this_sector) 656307729c8SNeilBrown /* Cannot use this */ 657307729c8SNeilBrown continue; 658307729c8SNeilBrown best_good_sectors = first_bad - this_sector; 659307729c8SNeilBrown } else 660307729c8SNeilBrown best_good_sectors = sectors; 661d1901ef0STomáš Hodek best_dist_disk = disk; 662d1901ef0STomáš Hodek best_pending_disk = disk; 663307729c8SNeilBrown } 66476073054SNeilBrown continue; 6658ddf9efeSNeilBrown } 66676073054SNeilBrown /* This is a reasonable device to use. It might 66776073054SNeilBrown * even be best. 6681da177e4SLinus Torvalds */ 669d2eb35acSNeilBrown if (is_badblock(rdev, this_sector, sectors, 670d2eb35acSNeilBrown &first_bad, &bad_sectors)) { 671d2eb35acSNeilBrown if (best_dist < MaxSector) 672d2eb35acSNeilBrown /* already have a better device */ 673d2eb35acSNeilBrown continue; 674d2eb35acSNeilBrown if (first_bad <= this_sector) { 675d2eb35acSNeilBrown /* cannot read here. If this is the 'primary' 676d2eb35acSNeilBrown * device, then we must not read beyond 677d2eb35acSNeilBrown * bad_sectors from another device.. 678d2eb35acSNeilBrown */ 679d2eb35acSNeilBrown bad_sectors -= (this_sector - first_bad); 680d2eb35acSNeilBrown if (choose_first && sectors > bad_sectors) 681d2eb35acSNeilBrown sectors = bad_sectors; 682d2eb35acSNeilBrown if (best_good_sectors > sectors) 683d2eb35acSNeilBrown best_good_sectors = sectors; 684d2eb35acSNeilBrown 685d2eb35acSNeilBrown } else { 686d2eb35acSNeilBrown sector_t good_sectors = first_bad - this_sector; 687d2eb35acSNeilBrown if (good_sectors > best_good_sectors) { 688d2eb35acSNeilBrown best_good_sectors = good_sectors; 689d2eb35acSNeilBrown best_disk = disk; 690d2eb35acSNeilBrown } 691d2eb35acSNeilBrown if (choose_first) 692d2eb35acSNeilBrown break; 693d2eb35acSNeilBrown } 694d2eb35acSNeilBrown continue; 695d82dd0e3STomasz Majchrzak } else { 696d82dd0e3STomasz Majchrzak if ((sectors > best_good_sectors) && (best_disk >= 0)) 697d82dd0e3STomasz Majchrzak best_disk = -1; 698d2eb35acSNeilBrown best_good_sectors = sectors; 699d82dd0e3STomasz Majchrzak } 700d2eb35acSNeilBrown 7012e52d449SNeilBrown if (best_disk >= 0) 7022e52d449SNeilBrown /* At least two disks to choose from so failfast is OK */ 7032e52d449SNeilBrown set_bit(R1BIO_FailFast, &r1_bio->state); 7042e52d449SNeilBrown 70512cee5a8SShaohua Li nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); 70612cee5a8SShaohua Li has_nonrot_disk |= nonrot; 7079dedf603SShaohua Li pending = atomic_read(&rdev->nr_pending); 70876073054SNeilBrown dist = abs(this_sector - conf->mirrors[disk].head_position); 70912cee5a8SShaohua Li if (choose_first) { 71076073054SNeilBrown best_disk = disk; 7111da177e4SLinus Torvalds break; 7121da177e4SLinus Torvalds } 71312cee5a8SShaohua Li /* Don't change to another disk for sequential reads */ 71412cee5a8SShaohua Li if (conf->mirrors[disk].next_seq_sect == this_sector 71512cee5a8SShaohua Li || dist == 0) { 71612cee5a8SShaohua Li int opt_iosize = bdev_io_opt(rdev->bdev) >> 9; 71712cee5a8SShaohua Li struct raid1_info *mirror = &conf->mirrors[disk]; 71812cee5a8SShaohua Li 71912cee5a8SShaohua Li best_disk = disk; 72012cee5a8SShaohua Li /* 72112cee5a8SShaohua Li * If buffered sequential IO size exceeds optimal 72212cee5a8SShaohua Li * iosize, check if there is idle disk. If yes, choose 72312cee5a8SShaohua Li * the idle disk. read_balance could already choose an 72412cee5a8SShaohua Li * idle disk before noticing it's a sequential IO in 72512cee5a8SShaohua Li * this disk. This doesn't matter because this disk 72612cee5a8SShaohua Li * will idle, next time it will be utilized after the 72712cee5a8SShaohua Li * first disk has IO size exceeds optimal iosize. In 72812cee5a8SShaohua Li * this way, iosize of the first disk will be optimal 72912cee5a8SShaohua Li * iosize at least. iosize of the second disk might be 73012cee5a8SShaohua Li * small, but not a big deal since when the second disk 73112cee5a8SShaohua Li * starts IO, the first disk is likely still busy. 73212cee5a8SShaohua Li */ 73312cee5a8SShaohua Li if (nonrot && opt_iosize > 0 && 73412cee5a8SShaohua Li mirror->seq_start != MaxSector && 73512cee5a8SShaohua Li mirror->next_seq_sect > opt_iosize && 73612cee5a8SShaohua Li mirror->next_seq_sect - opt_iosize >= 73712cee5a8SShaohua Li mirror->seq_start) { 73812cee5a8SShaohua Li choose_next_idle = 1; 73912cee5a8SShaohua Li continue; 74012cee5a8SShaohua Li } 74112cee5a8SShaohua Li break; 74212cee5a8SShaohua Li } 74312cee5a8SShaohua Li 74412cee5a8SShaohua Li if (choose_next_idle) 74512cee5a8SShaohua Li continue; 7469dedf603SShaohua Li 7479dedf603SShaohua Li if (min_pending > pending) { 7489dedf603SShaohua Li min_pending = pending; 7499dedf603SShaohua Li best_pending_disk = disk; 7509dedf603SShaohua Li } 7519dedf603SShaohua Li 75276073054SNeilBrown if (dist < best_dist) { 75376073054SNeilBrown best_dist = dist; 7549dedf603SShaohua Li best_dist_disk = disk; 7551da177e4SLinus Torvalds } 756f3ac8bf7SNeilBrown } 7571da177e4SLinus Torvalds 7589dedf603SShaohua Li /* 7599dedf603SShaohua Li * If all disks are rotational, choose the closest disk. If any disk is 7609dedf603SShaohua Li * non-rotational, choose the disk with less pending request even the 7619dedf603SShaohua Li * disk is rotational, which might/might not be optimal for raids with 7629dedf603SShaohua Li * mixed ratation/non-rotational disks depending on workload. 7639dedf603SShaohua Li */ 7649dedf603SShaohua Li if (best_disk == -1) { 7652e52d449SNeilBrown if (has_nonrot_disk || min_pending == 0) 7669dedf603SShaohua Li best_disk = best_pending_disk; 7679dedf603SShaohua Li else 7689dedf603SShaohua Li best_disk = best_dist_disk; 7699dedf603SShaohua Li } 7709dedf603SShaohua Li 77176073054SNeilBrown if (best_disk >= 0) { 77276073054SNeilBrown rdev = rcu_dereference(conf->mirrors[best_disk].rdev); 7738ddf9efeSNeilBrown if (!rdev) 7748ddf9efeSNeilBrown goto retry; 7758ddf9efeSNeilBrown atomic_inc(&rdev->nr_pending); 776d2eb35acSNeilBrown sectors = best_good_sectors; 77712cee5a8SShaohua Li 77812cee5a8SShaohua Li if (conf->mirrors[best_disk].next_seq_sect != this_sector) 77912cee5a8SShaohua Li conf->mirrors[best_disk].seq_start = this_sector; 78012cee5a8SShaohua Li 781be4d3280SShaohua Li conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; 7821da177e4SLinus Torvalds } 7831da177e4SLinus Torvalds rcu_read_unlock(); 784d2eb35acSNeilBrown *max_sectors = sectors; 7851da177e4SLinus Torvalds 78676073054SNeilBrown return best_disk; 7871da177e4SLinus Torvalds } 7881da177e4SLinus Torvalds 789673ca68dSNeilBrown static void flush_bio_list(struct r1conf *conf, struct bio *bio) 790a35e63efSNeilBrown { 791673ca68dSNeilBrown /* flush any pending bitmap writes to disk before proceeding w/ I/O */ 792e64e4018SAndy Shevchenko md_bitmap_unplug(conf->mddev->bitmap); 79334db0cd6SNeilBrown wake_up(&conf->wait_barrier); 794a35e63efSNeilBrown 795a35e63efSNeilBrown while (bio) { /* submit pending writes */ 796a35e63efSNeilBrown struct bio *next = bio->bi_next; 797309dca30SChristoph Hellwig struct md_rdev *rdev = (void *)bio->bi_bdev; 798a35e63efSNeilBrown bio->bi_next = NULL; 79974d46992SChristoph Hellwig bio_set_dev(bio, rdev->bdev); 8005e2c7a36SNeilBrown if (test_bit(Faulty, &rdev->flags)) { 8016308d8e3SGuoqing Jiang bio_io_error(bio); 8025e2c7a36SNeilBrown } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 803309dca30SChristoph Hellwig !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) 8042ff8cc2cSShaohua Li /* Just ignore it */ 8054246a0b6SChristoph Hellwig bio_endio(bio); 8062ff8cc2cSShaohua Li else 807ed00aabdSChristoph Hellwig submit_bio_noacct(bio); 808a35e63efSNeilBrown bio = next; 8095fa4f8baSHannes Reinecke cond_resched(); 810a35e63efSNeilBrown } 811673ca68dSNeilBrown } 812673ca68dSNeilBrown 813673ca68dSNeilBrown static void flush_pending_writes(struct r1conf *conf) 814673ca68dSNeilBrown { 815673ca68dSNeilBrown /* Any writes that have been queued but are awaiting 816673ca68dSNeilBrown * bitmap updates get flushed here. 817673ca68dSNeilBrown */ 818673ca68dSNeilBrown spin_lock_irq(&conf->device_lock); 819673ca68dSNeilBrown 820673ca68dSNeilBrown if (conf->pending_bio_list.head) { 82118022a1bSShaohua Li struct blk_plug plug; 822673ca68dSNeilBrown struct bio *bio; 82318022a1bSShaohua Li 824673ca68dSNeilBrown bio = bio_list_get(&conf->pending_bio_list); 825673ca68dSNeilBrown conf->pending_count = 0; 826673ca68dSNeilBrown spin_unlock_irq(&conf->device_lock); 827474beb57SNeilBrown 828474beb57SNeilBrown /* 829474beb57SNeilBrown * As this is called in a wait_event() loop (see freeze_array), 830474beb57SNeilBrown * current->state might be TASK_UNINTERRUPTIBLE which will 831474beb57SNeilBrown * cause a warning when we prepare to wait again. As it is 832474beb57SNeilBrown * rare that this path is taken, it is perfectly safe to force 833474beb57SNeilBrown * us to go around the wait_event() loop again, so the warning 834474beb57SNeilBrown * is a false-positive. Silence the warning by resetting 835474beb57SNeilBrown * thread state 836474beb57SNeilBrown */ 837474beb57SNeilBrown __set_current_state(TASK_RUNNING); 83818022a1bSShaohua Li blk_start_plug(&plug); 839673ca68dSNeilBrown flush_bio_list(conf, bio); 84018022a1bSShaohua Li blk_finish_plug(&plug); 841a35e63efSNeilBrown } else 842a35e63efSNeilBrown spin_unlock_irq(&conf->device_lock); 8437eaceaccSJens Axboe } 8447eaceaccSJens Axboe 84517999be4SNeilBrown /* Barriers.... 84617999be4SNeilBrown * Sometimes we need to suspend IO while we do something else, 84717999be4SNeilBrown * either some resync/recovery, or reconfigure the array. 84817999be4SNeilBrown * To do this we raise a 'barrier'. 84917999be4SNeilBrown * The 'barrier' is a counter that can be raised multiple times 85017999be4SNeilBrown * to count how many activities are happening which preclude 85117999be4SNeilBrown * normal IO. 85217999be4SNeilBrown * We can only raise the barrier if there is no pending IO. 85317999be4SNeilBrown * i.e. if nr_pending == 0. 85417999be4SNeilBrown * We choose only to raise the barrier if no-one is waiting for the 85517999be4SNeilBrown * barrier to go down. This means that as soon as an IO request 85617999be4SNeilBrown * is ready, no other operations which require a barrier will start 85717999be4SNeilBrown * until the IO request has had a chance. 85817999be4SNeilBrown * 85917999be4SNeilBrown * So: regular IO calls 'wait_barrier'. When that returns there 86017999be4SNeilBrown * is no backgroup IO happening, It must arrange to call 86117999be4SNeilBrown * allow_barrier when it has finished its IO. 86217999be4SNeilBrown * backgroup IO calls must call raise_barrier. Once that returns 86317999be4SNeilBrown * there is no normal IO happeing. It must arrange to call 86417999be4SNeilBrown * lower_barrier when the particular background IO completes. 8654675719dSHou Tao * 8664675719dSHou Tao * If resync/recovery is interrupted, returns -EINTR; 8674675719dSHou Tao * Otherwise, returns 0. 8681da177e4SLinus Torvalds */ 8694675719dSHou Tao static int raise_barrier(struct r1conf *conf, sector_t sector_nr) 8701da177e4SLinus Torvalds { 871fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 872fd76863eScolyli@suse.de 8731da177e4SLinus Torvalds spin_lock_irq(&conf->resync_lock); 8741da177e4SLinus Torvalds 87517999be4SNeilBrown /* Wait until no block IO is waiting */ 876824e47daScolyli@suse.de wait_event_lock_irq(conf->wait_barrier, 877824e47daScolyli@suse.de !atomic_read(&conf->nr_waiting[idx]), 878eed8c02eSLukas Czerner conf->resync_lock); 87917999be4SNeilBrown 88017999be4SNeilBrown /* block any new IO from starting */ 881824e47daScolyli@suse.de atomic_inc(&conf->barrier[idx]); 882824e47daScolyli@suse.de /* 883824e47daScolyli@suse.de * In raise_barrier() we firstly increase conf->barrier[idx] then 884824e47daScolyli@suse.de * check conf->nr_pending[idx]. In _wait_barrier() we firstly 885824e47daScolyli@suse.de * increase conf->nr_pending[idx] then check conf->barrier[idx]. 886824e47daScolyli@suse.de * A memory barrier here to make sure conf->nr_pending[idx] won't 887824e47daScolyli@suse.de * be fetched before conf->barrier[idx] is increased. Otherwise 888824e47daScolyli@suse.de * there will be a race between raise_barrier() and _wait_barrier(). 889824e47daScolyli@suse.de */ 890824e47daScolyli@suse.de smp_mb__after_atomic(); 89117999be4SNeilBrown 89279ef3a8aSmajianpeng /* For these conditions we must wait: 89379ef3a8aSmajianpeng * A: while the array is in frozen state 894fd76863eScolyli@suse.de * B: while conf->nr_pending[idx] is not 0, meaning regular I/O 895fd76863eScolyli@suse.de * existing in corresponding I/O barrier bucket. 896fd76863eScolyli@suse.de * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches 897fd76863eScolyli@suse.de * max resync count which allowed on current I/O barrier bucket. 89879ef3a8aSmajianpeng */ 89917999be4SNeilBrown wait_event_lock_irq(conf->wait_barrier, 9008c242593SYufen Yu (!conf->array_frozen && 901824e47daScolyli@suse.de !atomic_read(&conf->nr_pending[idx]) && 9028c242593SYufen Yu atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) || 9038c242593SYufen Yu test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), 904eed8c02eSLukas Czerner conf->resync_lock); 90517999be4SNeilBrown 9068c242593SYufen Yu if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 9078c242593SYufen Yu atomic_dec(&conf->barrier[idx]); 9088c242593SYufen Yu spin_unlock_irq(&conf->resync_lock); 9098c242593SYufen Yu wake_up(&conf->wait_barrier); 9108c242593SYufen Yu return -EINTR; 9118c242593SYufen Yu } 9128c242593SYufen Yu 91343ac9b84SXiao Ni atomic_inc(&conf->nr_sync_pending); 9141da177e4SLinus Torvalds spin_unlock_irq(&conf->resync_lock); 9158c242593SYufen Yu 9168c242593SYufen Yu return 0; 9171da177e4SLinus Torvalds } 9181da177e4SLinus Torvalds 919fd76863eScolyli@suse.de static void lower_barrier(struct r1conf *conf, sector_t sector_nr) 92017999be4SNeilBrown { 921fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 922fd76863eScolyli@suse.de 923824e47daScolyli@suse.de BUG_ON(atomic_read(&conf->barrier[idx]) <= 0); 924fd76863eScolyli@suse.de 925824e47daScolyli@suse.de atomic_dec(&conf->barrier[idx]); 92643ac9b84SXiao Ni atomic_dec(&conf->nr_sync_pending); 92717999be4SNeilBrown wake_up(&conf->wait_barrier); 92817999be4SNeilBrown } 92917999be4SNeilBrown 930fd76863eScolyli@suse.de static void _wait_barrier(struct r1conf *conf, int idx) 93117999be4SNeilBrown { 932824e47daScolyli@suse.de /* 933824e47daScolyli@suse.de * We need to increase conf->nr_pending[idx] very early here, 934824e47daScolyli@suse.de * then raise_barrier() can be blocked when it waits for 935824e47daScolyli@suse.de * conf->nr_pending[idx] to be 0. Then we can avoid holding 936824e47daScolyli@suse.de * conf->resync_lock when there is no barrier raised in same 937824e47daScolyli@suse.de * barrier unit bucket. Also if the array is frozen, I/O 938824e47daScolyli@suse.de * should be blocked until array is unfrozen. 939824e47daScolyli@suse.de */ 940824e47daScolyli@suse.de atomic_inc(&conf->nr_pending[idx]); 941824e47daScolyli@suse.de /* 942824e47daScolyli@suse.de * In _wait_barrier() we firstly increase conf->nr_pending[idx], then 943824e47daScolyli@suse.de * check conf->barrier[idx]. In raise_barrier() we firstly increase 944824e47daScolyli@suse.de * conf->barrier[idx], then check conf->nr_pending[idx]. A memory 945824e47daScolyli@suse.de * barrier is necessary here to make sure conf->barrier[idx] won't be 946824e47daScolyli@suse.de * fetched before conf->nr_pending[idx] is increased. Otherwise there 947824e47daScolyli@suse.de * will be a race between _wait_barrier() and raise_barrier(). 948824e47daScolyli@suse.de */ 949824e47daScolyli@suse.de smp_mb__after_atomic(); 95079ef3a8aSmajianpeng 951824e47daScolyli@suse.de /* 952824e47daScolyli@suse.de * Don't worry about checking two atomic_t variables at same time 953824e47daScolyli@suse.de * here. If during we check conf->barrier[idx], the array is 954824e47daScolyli@suse.de * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is 955824e47daScolyli@suse.de * 0, it is safe to return and make the I/O continue. Because the 956824e47daScolyli@suse.de * array is frozen, all I/O returned here will eventually complete 957824e47daScolyli@suse.de * or be queued, no race will happen. See code comment in 958824e47daScolyli@suse.de * frozen_array(). 959824e47daScolyli@suse.de */ 960824e47daScolyli@suse.de if (!READ_ONCE(conf->array_frozen) && 961824e47daScolyli@suse.de !atomic_read(&conf->barrier[idx])) 962824e47daScolyli@suse.de return; 963824e47daScolyli@suse.de 964824e47daScolyli@suse.de /* 965824e47daScolyli@suse.de * After holding conf->resync_lock, conf->nr_pending[idx] 966824e47daScolyli@suse.de * should be decreased before waiting for barrier to drop. 967824e47daScolyli@suse.de * Otherwise, we may encounter a race condition because 968824e47daScolyli@suse.de * raise_barrer() might be waiting for conf->nr_pending[idx] 969824e47daScolyli@suse.de * to be 0 at same time. 970824e47daScolyli@suse.de */ 971824e47daScolyli@suse.de spin_lock_irq(&conf->resync_lock); 972824e47daScolyli@suse.de atomic_inc(&conf->nr_waiting[idx]); 973824e47daScolyli@suse.de atomic_dec(&conf->nr_pending[idx]); 974824e47daScolyli@suse.de /* 975824e47daScolyli@suse.de * In case freeze_array() is waiting for 976824e47daScolyli@suse.de * get_unqueued_pending() == extra 977824e47daScolyli@suse.de */ 978824e47daScolyli@suse.de wake_up(&conf->wait_barrier); 979824e47daScolyli@suse.de /* Wait for the barrier in same barrier unit bucket to drop. */ 980824e47daScolyli@suse.de wait_event_lock_irq(conf->wait_barrier, 981824e47daScolyli@suse.de !conf->array_frozen && 982824e47daScolyli@suse.de !atomic_read(&conf->barrier[idx]), 983824e47daScolyli@suse.de conf->resync_lock); 984824e47daScolyli@suse.de atomic_inc(&conf->nr_pending[idx]); 985824e47daScolyli@suse.de atomic_dec(&conf->nr_waiting[idx]); 986fd76863eScolyli@suse.de spin_unlock_irq(&conf->resync_lock); 98779ef3a8aSmajianpeng } 98879ef3a8aSmajianpeng 989fd76863eScolyli@suse.de static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr) 99079ef3a8aSmajianpeng { 991fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 99279ef3a8aSmajianpeng 993824e47daScolyli@suse.de /* 994824e47daScolyli@suse.de * Very similar to _wait_barrier(). The difference is, for read 995824e47daScolyli@suse.de * I/O we don't need wait for sync I/O, but if the whole array 996824e47daScolyli@suse.de * is frozen, the read I/O still has to wait until the array is 997824e47daScolyli@suse.de * unfrozen. Since there is no ordering requirement with 998824e47daScolyli@suse.de * conf->barrier[idx] here, memory barrier is unnecessary as well. 999824e47daScolyli@suse.de */ 1000824e47daScolyli@suse.de atomic_inc(&conf->nr_pending[idx]); 1001824e47daScolyli@suse.de 1002824e47daScolyli@suse.de if (!READ_ONCE(conf->array_frozen)) 1003824e47daScolyli@suse.de return; 100417999be4SNeilBrown 100517999be4SNeilBrown spin_lock_irq(&conf->resync_lock); 1006824e47daScolyli@suse.de atomic_inc(&conf->nr_waiting[idx]); 1007824e47daScolyli@suse.de atomic_dec(&conf->nr_pending[idx]); 1008824e47daScolyli@suse.de /* 1009824e47daScolyli@suse.de * In case freeze_array() is waiting for 1010824e47daScolyli@suse.de * get_unqueued_pending() == extra 1011d6b42dcbSNeilBrown */ 101217999be4SNeilBrown wake_up(&conf->wait_barrier); 1013824e47daScolyli@suse.de /* Wait for array to be unfrozen */ 1014824e47daScolyli@suse.de wait_event_lock_irq(conf->wait_barrier, 1015fd76863eScolyli@suse.de !conf->array_frozen, 10161da177e4SLinus Torvalds conf->resync_lock); 1017824e47daScolyli@suse.de atomic_inc(&conf->nr_pending[idx]); 1018824e47daScolyli@suse.de atomic_dec(&conf->nr_waiting[idx]); 101917999be4SNeilBrown spin_unlock_irq(&conf->resync_lock); 102017999be4SNeilBrown } 102117999be4SNeilBrown 1022fd76863eScolyli@suse.de static void wait_barrier(struct r1conf *conf, sector_t sector_nr) 1023fd76863eScolyli@suse.de { 1024fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 1025fd76863eScolyli@suse.de 1026fd76863eScolyli@suse.de _wait_barrier(conf, idx); 1027fd76863eScolyli@suse.de } 1028fd76863eScolyli@suse.de 1029fd76863eScolyli@suse.de static void _allow_barrier(struct r1conf *conf, int idx) 103017999be4SNeilBrown { 1031824e47daScolyli@suse.de atomic_dec(&conf->nr_pending[idx]); 103217999be4SNeilBrown wake_up(&conf->wait_barrier); 103317999be4SNeilBrown } 103417999be4SNeilBrown 1035fd76863eScolyli@suse.de static void allow_barrier(struct r1conf *conf, sector_t sector_nr) 1036fd76863eScolyli@suse.de { 1037fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 1038fd76863eScolyli@suse.de 1039fd76863eScolyli@suse.de _allow_barrier(conf, idx); 1040fd76863eScolyli@suse.de } 1041fd76863eScolyli@suse.de 1042fd76863eScolyli@suse.de /* conf->resync_lock should be held */ 1043fd76863eScolyli@suse.de static int get_unqueued_pending(struct r1conf *conf) 1044fd76863eScolyli@suse.de { 1045fd76863eScolyli@suse.de int idx, ret; 1046fd76863eScolyli@suse.de 104743ac9b84SXiao Ni ret = atomic_read(&conf->nr_sync_pending); 104843ac9b84SXiao Ni for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) 1049824e47daScolyli@suse.de ret += atomic_read(&conf->nr_pending[idx]) - 1050824e47daScolyli@suse.de atomic_read(&conf->nr_queued[idx]); 1051fd76863eScolyli@suse.de 1052fd76863eScolyli@suse.de return ret; 105317999be4SNeilBrown } 105417999be4SNeilBrown 1055e2d59925SNeilBrown static void freeze_array(struct r1conf *conf, int extra) 1056ddaf22abSNeilBrown { 1057fd76863eScolyli@suse.de /* Stop sync I/O and normal I/O and wait for everything to 105811353b9dSZhilong Liu * go quiet. 1059fd76863eScolyli@suse.de * This is called in two situations: 1060fd76863eScolyli@suse.de * 1) management command handlers (reshape, remove disk, quiesce). 1061fd76863eScolyli@suse.de * 2) one normal I/O request failed. 1062fd76863eScolyli@suse.de 1063fd76863eScolyli@suse.de * After array_frozen is set to 1, new sync IO will be blocked at 1064fd76863eScolyli@suse.de * raise_barrier(), and new normal I/O will blocked at _wait_barrier() 1065fd76863eScolyli@suse.de * or wait_read_barrier(). The flying I/Os will either complete or be 1066fd76863eScolyli@suse.de * queued. When everything goes quite, there are only queued I/Os left. 1067fd76863eScolyli@suse.de 1068fd76863eScolyli@suse.de * Every flying I/O contributes to a conf->nr_pending[idx], idx is the 1069fd76863eScolyli@suse.de * barrier bucket index which this I/O request hits. When all sync and 1070fd76863eScolyli@suse.de * normal I/O are queued, sum of all conf->nr_pending[] will match sum 1071fd76863eScolyli@suse.de * of all conf->nr_queued[]. But normal I/O failure is an exception, 1072fd76863eScolyli@suse.de * in handle_read_error(), we may call freeze_array() before trying to 1073fd76863eScolyli@suse.de * fix the read error. In this case, the error read I/O is not queued, 1074fd76863eScolyli@suse.de * so get_unqueued_pending() == 1. 1075fd76863eScolyli@suse.de * 1076fd76863eScolyli@suse.de * Therefore before this function returns, we need to wait until 1077fd76863eScolyli@suse.de * get_unqueued_pendings(conf) gets equal to extra. For 1078fd76863eScolyli@suse.de * normal I/O context, extra is 1, in rested situations extra is 0. 1079ddaf22abSNeilBrown */ 1080ddaf22abSNeilBrown spin_lock_irq(&conf->resync_lock); 1081b364e3d0Smajianpeng conf->array_frozen = 1; 1082578b54adSNeilBrown raid1_log(conf->mddev, "wait freeze"); 1083fd76863eScolyli@suse.de wait_event_lock_irq_cmd( 1084fd76863eScolyli@suse.de conf->wait_barrier, 1085fd76863eScolyli@suse.de get_unqueued_pending(conf) == extra, 1086ddaf22abSNeilBrown conf->resync_lock, 1087c3b328acSNeilBrown flush_pending_writes(conf)); 1088ddaf22abSNeilBrown spin_unlock_irq(&conf->resync_lock); 1089ddaf22abSNeilBrown } 1090e8096360SNeilBrown static void unfreeze_array(struct r1conf *conf) 1091ddaf22abSNeilBrown { 1092ddaf22abSNeilBrown /* reverse the effect of the freeze */ 1093ddaf22abSNeilBrown spin_lock_irq(&conf->resync_lock); 1094b364e3d0Smajianpeng conf->array_frozen = 0; 1095ddaf22abSNeilBrown spin_unlock_irq(&conf->resync_lock); 1096824e47daScolyli@suse.de wake_up(&conf->wait_barrier); 1097ddaf22abSNeilBrown } 1098ddaf22abSNeilBrown 109916d56e2fSShaohua Li static void alloc_behind_master_bio(struct r1bio *r1_bio, 1100cb83efcfSNeilBrown struct bio *bio) 11014b6d287fSNeilBrown { 1102cb83efcfSNeilBrown int size = bio->bi_iter.bi_size; 1103841c1316SMing Lei unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1104841c1316SMing Lei int i = 0; 1105841c1316SMing Lei struct bio *behind_bio = NULL; 11064b6d287fSNeilBrown 1107*a78f18daSChristoph Hellwig behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set); 1108841c1316SMing Lei if (!behind_bio) 110916d56e2fSShaohua Li return; 1110841c1316SMing Lei 111141743c1fSShaohua Li /* discard op, we don't support writezero/writesame yet */ 111216d56e2fSShaohua Li if (!bio_has_data(bio)) { 111316d56e2fSShaohua Li behind_bio->bi_iter.bi_size = size; 111441743c1fSShaohua Li goto skip_copy; 111516d56e2fSShaohua Li } 111641743c1fSShaohua Li 1117dba40d46SMariusz Dabrowski behind_bio->bi_write_hint = bio->bi_write_hint; 1118dba40d46SMariusz Dabrowski 1119841c1316SMing Lei while (i < vcnt && size) { 1120841c1316SMing Lei struct page *page; 1121841c1316SMing Lei int len = min_t(int, PAGE_SIZE, size); 1122841c1316SMing Lei 1123841c1316SMing Lei page = alloc_page(GFP_NOIO); 1124841c1316SMing Lei if (unlikely(!page)) 1125841c1316SMing Lei goto free_pages; 1126841c1316SMing Lei 1127841c1316SMing Lei bio_add_page(behind_bio, page, len, 0); 1128841c1316SMing Lei 1129841c1316SMing Lei size -= len; 1130841c1316SMing Lei i++; 11314b6d287fSNeilBrown } 11324b6d287fSNeilBrown 1133cb83efcfSNeilBrown bio_copy_data(behind_bio, bio); 113441743c1fSShaohua Li skip_copy: 113556a64c17SLuis de Bethencourt r1_bio->behind_master_bio = behind_bio; 1136841c1316SMing Lei set_bit(R1BIO_BehindIO, &r1_bio->state); 1137841c1316SMing Lei 113816d56e2fSShaohua Li return; 1139841c1316SMing Lei 1140841c1316SMing Lei free_pages: 11414f024f37SKent Overstreet pr_debug("%dB behind alloc failed, doing sync I/O\n", 11424f024f37SKent Overstreet bio->bi_iter.bi_size); 1143841c1316SMing Lei bio_free_pages(behind_bio); 114416d56e2fSShaohua Li bio_put(behind_bio); 11454b6d287fSNeilBrown } 11464b6d287fSNeilBrown 1147f54a9d0eSNeilBrown struct raid1_plug_cb { 1148f54a9d0eSNeilBrown struct blk_plug_cb cb; 1149f54a9d0eSNeilBrown struct bio_list pending; 1150f54a9d0eSNeilBrown int pending_cnt; 1151f54a9d0eSNeilBrown }; 1152f54a9d0eSNeilBrown 1153f54a9d0eSNeilBrown static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) 1154f54a9d0eSNeilBrown { 1155f54a9d0eSNeilBrown struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, 1156f54a9d0eSNeilBrown cb); 1157f54a9d0eSNeilBrown struct mddev *mddev = plug->cb.data; 1158f54a9d0eSNeilBrown struct r1conf *conf = mddev->private; 1159f54a9d0eSNeilBrown struct bio *bio; 1160f54a9d0eSNeilBrown 1161874807a8SNeilBrown if (from_schedule || current->bio_list) { 1162f54a9d0eSNeilBrown spin_lock_irq(&conf->device_lock); 1163f54a9d0eSNeilBrown bio_list_merge(&conf->pending_bio_list, &plug->pending); 1164f54a9d0eSNeilBrown conf->pending_count += plug->pending_cnt; 1165f54a9d0eSNeilBrown spin_unlock_irq(&conf->device_lock); 1166ee0b0244SNeilBrown wake_up(&conf->wait_barrier); 1167f54a9d0eSNeilBrown md_wakeup_thread(mddev->thread); 1168f54a9d0eSNeilBrown kfree(plug); 1169f54a9d0eSNeilBrown return; 1170f54a9d0eSNeilBrown } 1171f54a9d0eSNeilBrown 1172f54a9d0eSNeilBrown /* we aren't scheduling, so we can do the write-out directly. */ 1173f54a9d0eSNeilBrown bio = bio_list_get(&plug->pending); 1174673ca68dSNeilBrown flush_bio_list(conf, bio); 1175f54a9d0eSNeilBrown kfree(plug); 1176f54a9d0eSNeilBrown } 1177f54a9d0eSNeilBrown 1178689389a0SNeilBrown static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio) 1179689389a0SNeilBrown { 1180689389a0SNeilBrown r1_bio->master_bio = bio; 1181689389a0SNeilBrown r1_bio->sectors = bio_sectors(bio); 1182689389a0SNeilBrown r1_bio->state = 0; 1183689389a0SNeilBrown r1_bio->mddev = mddev; 1184689389a0SNeilBrown r1_bio->sector = bio->bi_iter.bi_sector; 1185689389a0SNeilBrown } 1186689389a0SNeilBrown 1187fd76863eScolyli@suse.de static inline struct r1bio * 1188689389a0SNeilBrown alloc_r1bio(struct mddev *mddev, struct bio *bio) 1189fd76863eScolyli@suse.de { 1190fd76863eScolyli@suse.de struct r1conf *conf = mddev->private; 1191fd76863eScolyli@suse.de struct r1bio *r1_bio; 1192fd76863eScolyli@suse.de 1193afeee514SKent Overstreet r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO); 1194689389a0SNeilBrown /* Ensure no bio records IO_BLOCKED */ 1195689389a0SNeilBrown memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0])); 1196689389a0SNeilBrown init_r1bio(r1_bio, mddev, bio); 1197fd76863eScolyli@suse.de return r1_bio; 1198fd76863eScolyli@suse.de } 1199fd76863eScolyli@suse.de 1200c230e7e5SNeilBrown static void raid1_read_request(struct mddev *mddev, struct bio *bio, 1201689389a0SNeilBrown int max_read_sectors, struct r1bio *r1_bio) 12021da177e4SLinus Torvalds { 1203e8096360SNeilBrown struct r1conf *conf = mddev->private; 12040eaf822cSJonathan Brassow struct raid1_info *mirror; 12051da177e4SLinus Torvalds struct bio *read_bio; 12063b046a97SRobert LeBlanc struct bitmap *bitmap = mddev->bitmap; 1207796a5cf0SMike Christie const int op = bio_op(bio); 12081eff9d32SJens Axboe const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); 12091f68f0c4SNeilBrown int max_sectors; 1210d2eb35acSNeilBrown int rdisk; 1211689389a0SNeilBrown bool print_msg = !!r1_bio; 1212689389a0SNeilBrown char b[BDEVNAME_SIZE]; 1213689389a0SNeilBrown 1214689389a0SNeilBrown /* 1215689389a0SNeilBrown * If r1_bio is set, we are blocking the raid1d thread 1216689389a0SNeilBrown * so there is a tiny risk of deadlock. So ask for 1217689389a0SNeilBrown * emergency memory if needed. 1218689389a0SNeilBrown */ 1219689389a0SNeilBrown gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO; 1220689389a0SNeilBrown 1221689389a0SNeilBrown if (print_msg) { 1222689389a0SNeilBrown /* Need to get the block device name carefully */ 1223689389a0SNeilBrown struct md_rdev *rdev; 1224689389a0SNeilBrown rcu_read_lock(); 1225689389a0SNeilBrown rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev); 1226689389a0SNeilBrown if (rdev) 1227689389a0SNeilBrown bdevname(rdev->bdev, b); 1228689389a0SNeilBrown else 1229689389a0SNeilBrown strcpy(b, "???"); 1230689389a0SNeilBrown rcu_read_unlock(); 1231689389a0SNeilBrown } 1232d2eb35acSNeilBrown 1233fd76863eScolyli@suse.de /* 1234fd76863eScolyli@suse.de * Still need barrier for READ in case that whole 1235fd76863eScolyli@suse.de * array is frozen. 1236fd76863eScolyli@suse.de */ 1237fd76863eScolyli@suse.de wait_read_barrier(conf, bio->bi_iter.bi_sector); 12383b046a97SRobert LeBlanc 1239689389a0SNeilBrown if (!r1_bio) 1240689389a0SNeilBrown r1_bio = alloc_r1bio(mddev, bio); 1241689389a0SNeilBrown else 1242689389a0SNeilBrown init_r1bio(r1_bio, mddev, bio); 1243c230e7e5SNeilBrown r1_bio->sectors = max_read_sectors; 1244fd76863eScolyli@suse.de 1245fd76863eScolyli@suse.de /* 1246fd76863eScolyli@suse.de * make_request() can abort the operation when read-ahead is being 1247fd76863eScolyli@suse.de * used and no empty request is available. 1248fd76863eScolyli@suse.de */ 1249d2eb35acSNeilBrown rdisk = read_balance(conf, r1_bio, &max_sectors); 12501da177e4SLinus Torvalds 12511da177e4SLinus Torvalds if (rdisk < 0) { 12521da177e4SLinus Torvalds /* couldn't find anywhere to read from */ 1253689389a0SNeilBrown if (print_msg) { 1254689389a0SNeilBrown pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", 1255689389a0SNeilBrown mdname(mddev), 1256689389a0SNeilBrown b, 1257689389a0SNeilBrown (unsigned long long)r1_bio->sector); 1258689389a0SNeilBrown } 12591da177e4SLinus Torvalds raid_end_bio_io(r1_bio); 12605a7bbad2SChristoph Hellwig return; 12611da177e4SLinus Torvalds } 12621da177e4SLinus Torvalds mirror = conf->mirrors + rdisk; 12631da177e4SLinus Torvalds 1264689389a0SNeilBrown if (print_msg) 1265689389a0SNeilBrown pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n", 1266689389a0SNeilBrown mdname(mddev), 1267689389a0SNeilBrown (unsigned long long)r1_bio->sector, 1268689389a0SNeilBrown bdevname(mirror->rdev->bdev, b)); 1269689389a0SNeilBrown 1270e555190dSNeilBrown if (test_bit(WriteMostly, &mirror->rdev->flags) && 1271e555190dSNeilBrown bitmap) { 12723b046a97SRobert LeBlanc /* 12733b046a97SRobert LeBlanc * Reading from a write-mostly device must take care not to 12743b046a97SRobert LeBlanc * over-take any writes that are 'behind' 1275e555190dSNeilBrown */ 1276578b54adSNeilBrown raid1_log(mddev, "wait behind writes"); 1277e555190dSNeilBrown wait_event(bitmap->behind_wait, 1278e555190dSNeilBrown atomic_read(&bitmap->behind_writes) == 0); 1279e555190dSNeilBrown } 1280c230e7e5SNeilBrown 1281c230e7e5SNeilBrown if (max_sectors < bio_sectors(bio)) { 1282c230e7e5SNeilBrown struct bio *split = bio_split(bio, max_sectors, 1283afeee514SKent Overstreet gfp, &conf->bio_split); 1284c230e7e5SNeilBrown bio_chain(split, bio); 1285ed00aabdSChristoph Hellwig submit_bio_noacct(bio); 1286c230e7e5SNeilBrown bio = split; 1287c230e7e5SNeilBrown r1_bio->master_bio = bio; 1288c230e7e5SNeilBrown r1_bio->sectors = max_sectors; 1289c230e7e5SNeilBrown } 1290c230e7e5SNeilBrown 12911da177e4SLinus Torvalds r1_bio->read_disk = rdisk; 12921da177e4SLinus Torvalds 1293afeee514SKent Overstreet read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); 12941da177e4SLinus Torvalds 12951da177e4SLinus Torvalds r1_bio->bios[rdisk] = read_bio; 12961da177e4SLinus Torvalds 12974f024f37SKent Overstreet read_bio->bi_iter.bi_sector = r1_bio->sector + 12984f024f37SKent Overstreet mirror->rdev->data_offset; 129974d46992SChristoph Hellwig bio_set_dev(read_bio, mirror->rdev->bdev); 13001da177e4SLinus Torvalds read_bio->bi_end_io = raid1_end_read_request; 1301796a5cf0SMike Christie bio_set_op_attrs(read_bio, op, do_sync); 13022e52d449SNeilBrown if (test_bit(FailFast, &mirror->rdev->flags) && 13032e52d449SNeilBrown test_bit(R1BIO_FailFast, &r1_bio->state)) 13042e52d449SNeilBrown read_bio->bi_opf |= MD_FAILFAST; 13051da177e4SLinus Torvalds read_bio->bi_private = r1_bio; 13061da177e4SLinus Torvalds 1307109e3765SNeilBrown if (mddev->gendisk) 13081c02fca6SChristoph Hellwig trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk), 13091c02fca6SChristoph Hellwig r1_bio->sector); 1310109e3765SNeilBrown 1311ed00aabdSChristoph Hellwig submit_bio_noacct(read_bio); 13121da177e4SLinus Torvalds } 13131da177e4SLinus Torvalds 1314c230e7e5SNeilBrown static void raid1_write_request(struct mddev *mddev, struct bio *bio, 1315c230e7e5SNeilBrown int max_write_sectors) 13163b046a97SRobert LeBlanc { 13173b046a97SRobert LeBlanc struct r1conf *conf = mddev->private; 1318fd76863eScolyli@suse.de struct r1bio *r1_bio; 13193b046a97SRobert LeBlanc int i, disks; 13203b046a97SRobert LeBlanc struct bitmap *bitmap = mddev->bitmap; 13213b046a97SRobert LeBlanc unsigned long flags; 13223b046a97SRobert LeBlanc struct md_rdev *blocked_rdev; 13233b046a97SRobert LeBlanc struct blk_plug_cb *cb; 13243b046a97SRobert LeBlanc struct raid1_plug_cb *plug = NULL; 13253b046a97SRobert LeBlanc int first_clone; 13263b046a97SRobert LeBlanc int max_sectors; 13273b046a97SRobert LeBlanc 1328b3143b9aSNeilBrown if (mddev_is_clustered(mddev) && 13293b046a97SRobert LeBlanc md_cluster_ops->area_resyncing(mddev, WRITE, 1330b3143b9aSNeilBrown bio->bi_iter.bi_sector, bio_end_sector(bio))) { 13313b046a97SRobert LeBlanc 13323b046a97SRobert LeBlanc DEFINE_WAIT(w); 13333b046a97SRobert LeBlanc for (;;) { 13343b046a97SRobert LeBlanc prepare_to_wait(&conf->wait_barrier, 1335ae89fd3dSMikulas Patocka &w, TASK_IDLE); 1336f81f7302SGuoqing Jiang if (!md_cluster_ops->area_resyncing(mddev, WRITE, 13373b046a97SRobert LeBlanc bio->bi_iter.bi_sector, 1338b3143b9aSNeilBrown bio_end_sector(bio))) 13393b046a97SRobert LeBlanc break; 13403b046a97SRobert LeBlanc schedule(); 13413b046a97SRobert LeBlanc } 13423b046a97SRobert LeBlanc finish_wait(&conf->wait_barrier, &w); 13433b046a97SRobert LeBlanc } 1344f81f7302SGuoqing Jiang 1345f81f7302SGuoqing Jiang /* 1346f81f7302SGuoqing Jiang * Register the new request and wait if the reconstruction 1347f81f7302SGuoqing Jiang * thread has put up a bar for new requests. 1348f81f7302SGuoqing Jiang * Continue immediately if no resync is active currently. 1349f81f7302SGuoqing Jiang */ 1350fd76863eScolyli@suse.de wait_barrier(conf, bio->bi_iter.bi_sector); 1351fd76863eScolyli@suse.de 1352689389a0SNeilBrown r1_bio = alloc_r1bio(mddev, bio); 1353c230e7e5SNeilBrown r1_bio->sectors = max_write_sectors; 13543b046a97SRobert LeBlanc 135534db0cd6SNeilBrown if (conf->pending_count >= max_queued_requests) { 135634db0cd6SNeilBrown md_wakeup_thread(mddev->thread); 1357578b54adSNeilBrown raid1_log(mddev, "wait queued"); 135834db0cd6SNeilBrown wait_event(conf->wait_barrier, 135934db0cd6SNeilBrown conf->pending_count < max_queued_requests); 136034db0cd6SNeilBrown } 13611f68f0c4SNeilBrown /* first select target devices under rcu_lock and 13621da177e4SLinus Torvalds * inc refcount on their rdev. Record them by setting 13631da177e4SLinus Torvalds * bios[x] to bio 13641f68f0c4SNeilBrown * If there are known/acknowledged bad blocks on any device on 13651f68f0c4SNeilBrown * which we have seen a write error, we want to avoid writing those 13661f68f0c4SNeilBrown * blocks. 13671f68f0c4SNeilBrown * This potentially requires several writes to write around 13681f68f0c4SNeilBrown * the bad blocks. Each set of writes gets it's own r1bio 13691f68f0c4SNeilBrown * with a set of bios attached. 13701da177e4SLinus Torvalds */ 1371c3b328acSNeilBrown 13728f19ccb2SNeilBrown disks = conf->raid_disks * 2; 13736bfe0b49SDan Williams retry_write: 13746bfe0b49SDan Williams blocked_rdev = NULL; 13751da177e4SLinus Torvalds rcu_read_lock(); 13761f68f0c4SNeilBrown max_sectors = r1_bio->sectors; 13771da177e4SLinus Torvalds for (i = 0; i < disks; i++) { 13783cb03002SNeilBrown struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 13796bfe0b49SDan Williams if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 13806bfe0b49SDan Williams atomic_inc(&rdev->nr_pending); 13816bfe0b49SDan Williams blocked_rdev = rdev; 13826bfe0b49SDan Williams break; 13836bfe0b49SDan Williams } 13841da177e4SLinus Torvalds r1_bio->bios[i] = NULL; 13858ae12666SKent Overstreet if (!rdev || test_bit(Faulty, &rdev->flags)) { 13868f19ccb2SNeilBrown if (i < conf->raid_disks) 13871f68f0c4SNeilBrown set_bit(R1BIO_Degraded, &r1_bio->state); 13881f68f0c4SNeilBrown continue; 1389964147d5SNeilBrown } 13901f68f0c4SNeilBrown 13911f68f0c4SNeilBrown atomic_inc(&rdev->nr_pending); 13921f68f0c4SNeilBrown if (test_bit(WriteErrorSeen, &rdev->flags)) { 13931f68f0c4SNeilBrown sector_t first_bad; 13941f68f0c4SNeilBrown int bad_sectors; 13951f68f0c4SNeilBrown int is_bad; 13961f68f0c4SNeilBrown 13973b046a97SRobert LeBlanc is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, 13981f68f0c4SNeilBrown &first_bad, &bad_sectors); 13991f68f0c4SNeilBrown if (is_bad < 0) { 14001f68f0c4SNeilBrown /* mustn't write here until the bad block is 14011f68f0c4SNeilBrown * acknowledged*/ 14021f68f0c4SNeilBrown set_bit(BlockedBadBlocks, &rdev->flags); 14031f68f0c4SNeilBrown blocked_rdev = rdev; 14041f68f0c4SNeilBrown break; 14051f68f0c4SNeilBrown } 14061f68f0c4SNeilBrown if (is_bad && first_bad <= r1_bio->sector) { 14071f68f0c4SNeilBrown /* Cannot write here at all */ 14081f68f0c4SNeilBrown bad_sectors -= (r1_bio->sector - first_bad); 14091f68f0c4SNeilBrown if (bad_sectors < max_sectors) 14101f68f0c4SNeilBrown /* mustn't write more than bad_sectors 14111f68f0c4SNeilBrown * to other devices yet 14121f68f0c4SNeilBrown */ 14131f68f0c4SNeilBrown max_sectors = bad_sectors; 14141f68f0c4SNeilBrown rdev_dec_pending(rdev, mddev); 14151f68f0c4SNeilBrown /* We don't set R1BIO_Degraded as that 14161f68f0c4SNeilBrown * only applies if the disk is 14171f68f0c4SNeilBrown * missing, so it might be re-added, 14181f68f0c4SNeilBrown * and we want to know to recover this 14191f68f0c4SNeilBrown * chunk. 14201f68f0c4SNeilBrown * In this case the device is here, 14211f68f0c4SNeilBrown * and the fact that this chunk is not 14221f68f0c4SNeilBrown * in-sync is recorded in the bad 14231f68f0c4SNeilBrown * block log 14241f68f0c4SNeilBrown */ 14251f68f0c4SNeilBrown continue; 14261f68f0c4SNeilBrown } 14271f68f0c4SNeilBrown if (is_bad) { 14281f68f0c4SNeilBrown int good_sectors = first_bad - r1_bio->sector; 14291f68f0c4SNeilBrown if (good_sectors < max_sectors) 14301f68f0c4SNeilBrown max_sectors = good_sectors; 14311f68f0c4SNeilBrown } 14321f68f0c4SNeilBrown } 14331f68f0c4SNeilBrown r1_bio->bios[i] = bio; 14341da177e4SLinus Torvalds } 14351da177e4SLinus Torvalds rcu_read_unlock(); 14361da177e4SLinus Torvalds 14376bfe0b49SDan Williams if (unlikely(blocked_rdev)) { 14386bfe0b49SDan Williams /* Wait for this device to become unblocked */ 14396bfe0b49SDan Williams int j; 14406bfe0b49SDan Williams 14416bfe0b49SDan Williams for (j = 0; j < i; j++) 14426bfe0b49SDan Williams if (r1_bio->bios[j]) 14436bfe0b49SDan Williams rdev_dec_pending(conf->mirrors[j].rdev, mddev); 14441f68f0c4SNeilBrown r1_bio->state = 0; 1445fd76863eScolyli@suse.de allow_barrier(conf, bio->bi_iter.bi_sector); 1446578b54adSNeilBrown raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); 14476bfe0b49SDan Williams md_wait_for_blocked_rdev(blocked_rdev, mddev); 1448fd76863eScolyli@suse.de wait_barrier(conf, bio->bi_iter.bi_sector); 14496bfe0b49SDan Williams goto retry_write; 14506bfe0b49SDan Williams } 14516bfe0b49SDan Williams 1452c230e7e5SNeilBrown if (max_sectors < bio_sectors(bio)) { 1453c230e7e5SNeilBrown struct bio *split = bio_split(bio, max_sectors, 1454afeee514SKent Overstreet GFP_NOIO, &conf->bio_split); 1455c230e7e5SNeilBrown bio_chain(split, bio); 1456ed00aabdSChristoph Hellwig submit_bio_noacct(bio); 1457c230e7e5SNeilBrown bio = split; 1458c230e7e5SNeilBrown r1_bio->master_bio = bio; 14591f68f0c4SNeilBrown r1_bio->sectors = max_sectors; 1460191ea9b2SNeilBrown } 14614b6d287fSNeilBrown 14624e78064fSNeilBrown atomic_set(&r1_bio->remaining, 1); 14634b6d287fSNeilBrown atomic_set(&r1_bio->behind_remaining, 0); 1464191ea9b2SNeilBrown 14651f68f0c4SNeilBrown first_clone = 1; 1466d8c84c4fSMing Lei 14671da177e4SLinus Torvalds for (i = 0; i < disks; i++) { 14688e58e327SMing Lei struct bio *mbio = NULL; 146969df9cfcSGuoqing Jiang struct md_rdev *rdev = conf->mirrors[i].rdev; 14701da177e4SLinus Torvalds if (!r1_bio->bios[i]) 14711da177e4SLinus Torvalds continue; 14721da177e4SLinus Torvalds 14731f68f0c4SNeilBrown if (first_clone) { 14741f68f0c4SNeilBrown /* do behind I/O ? 14751f68f0c4SNeilBrown * Not if there are too many, or cannot 14761f68f0c4SNeilBrown * allocate memory, or a reader on WriteMostly 14771f68f0c4SNeilBrown * is waiting for behind writes to flush */ 14781f68f0c4SNeilBrown if (bitmap && 14791f68f0c4SNeilBrown (atomic_read(&bitmap->behind_writes) 14801f68f0c4SNeilBrown < mddev->bitmap_info.max_write_behind) && 14818e58e327SMing Lei !waitqueue_active(&bitmap->behind_wait)) { 148216d56e2fSShaohua Li alloc_behind_master_bio(r1_bio, bio); 14838e58e327SMing Lei } 14841da177e4SLinus Torvalds 1485e64e4018SAndy Shevchenko md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors, 1486e64e4018SAndy Shevchenko test_bit(R1BIO_BehindIO, &r1_bio->state)); 14871f68f0c4SNeilBrown first_clone = 0; 14881f68f0c4SNeilBrown } 14898e58e327SMing Lei 1490841c1316SMing Lei if (r1_bio->behind_master_bio) 1491841c1316SMing Lei mbio = bio_clone_fast(r1_bio->behind_master_bio, 1492afeee514SKent Overstreet GFP_NOIO, &mddev->bio_set); 1493c230e7e5SNeilBrown else 1494afeee514SKent Overstreet mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); 14958e58e327SMing Lei 1496841c1316SMing Lei if (r1_bio->behind_master_bio) { 149769df9cfcSGuoqing Jiang if (test_bit(CollisionCheck, &rdev->flags)) 1498d0d2d8baSGuoqing Jiang wait_for_serialization(rdev, r1_bio); 14993e148a32SGuoqing Jiang if (test_bit(WriteMostly, &rdev->flags)) 15004b6d287fSNeilBrown atomic_inc(&r1_bio->behind_remaining); 150169df9cfcSGuoqing Jiang } else if (mddev->serialize_policy) 1502d0d2d8baSGuoqing Jiang wait_for_serialization(rdev, r1_bio); 15034b6d287fSNeilBrown 15041f68f0c4SNeilBrown r1_bio->bios[i] = mbio; 15051f68f0c4SNeilBrown 15064f024f37SKent Overstreet mbio->bi_iter.bi_sector = (r1_bio->sector + 15071f68f0c4SNeilBrown conf->mirrors[i].rdev->data_offset); 150874d46992SChristoph Hellwig bio_set_dev(mbio, conf->mirrors[i].rdev->bdev); 15091f68f0c4SNeilBrown mbio->bi_end_io = raid1_end_write_request; 1510a682e003SLinus Torvalds mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); 1511212e7eb7SNeilBrown if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && 1512212e7eb7SNeilBrown !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && 1513212e7eb7SNeilBrown conf->raid_disks - mddev->degraded > 1) 1514212e7eb7SNeilBrown mbio->bi_opf |= MD_FAILFAST; 15151f68f0c4SNeilBrown mbio->bi_private = r1_bio; 15161f68f0c4SNeilBrown 15171da177e4SLinus Torvalds atomic_inc(&r1_bio->remaining); 1518f54a9d0eSNeilBrown 1519109e3765SNeilBrown if (mddev->gendisk) 15201c02fca6SChristoph Hellwig trace_block_bio_remap(mbio, disk_devt(mddev->gendisk), 1521109e3765SNeilBrown r1_bio->sector); 1522109e3765SNeilBrown /* flush_pending_writes() needs access to the rdev so...*/ 1523309dca30SChristoph Hellwig mbio->bi_bdev = (void *)conf->mirrors[i].rdev; 1524109e3765SNeilBrown 1525f54a9d0eSNeilBrown cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); 1526f54a9d0eSNeilBrown if (cb) 1527f54a9d0eSNeilBrown plug = container_of(cb, struct raid1_plug_cb, cb); 1528f54a9d0eSNeilBrown else 1529f54a9d0eSNeilBrown plug = NULL; 1530f54a9d0eSNeilBrown if (plug) { 1531f54a9d0eSNeilBrown bio_list_add(&plug->pending, mbio); 1532f54a9d0eSNeilBrown plug->pending_cnt++; 1533f54a9d0eSNeilBrown } else { 153423b245c0SShaohua Li spin_lock_irqsave(&conf->device_lock, flags); 15354e78064fSNeilBrown bio_list_add(&conf->pending_bio_list, mbio); 153634db0cd6SNeilBrown conf->pending_count++; 1537191ea9b2SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 1538b357f04aSNeilBrown md_wakeup_thread(mddev->thread); 15394e78064fSNeilBrown } 154023b245c0SShaohua Li } 15411f68f0c4SNeilBrown 1542079fa166SNeilBrown r1_bio_write_done(r1_bio); 1543079fa166SNeilBrown 1544079fa166SNeilBrown /* In case raid1d snuck in to freeze_array */ 1545079fa166SNeilBrown wake_up(&conf->wait_barrier); 15461da177e4SLinus Torvalds } 15471da177e4SLinus Torvalds 1548cc27b0c7SNeilBrown static bool raid1_make_request(struct mddev *mddev, struct bio *bio) 15493b046a97SRobert LeBlanc { 1550fd76863eScolyli@suse.de sector_t sectors; 15513b046a97SRobert LeBlanc 1552775d7831SDavid Jeffery if (unlikely(bio->bi_opf & REQ_PREFLUSH) 1553775d7831SDavid Jeffery && md_flush_request(mddev, bio)) 1554cc27b0c7SNeilBrown return true; 15553b046a97SRobert LeBlanc 1556c230e7e5SNeilBrown /* 1557c230e7e5SNeilBrown * There is a limit to the maximum size, but 1558c230e7e5SNeilBrown * the read/write handler might find a lower limit 1559c230e7e5SNeilBrown * due to bad blocks. To avoid multiple splits, 1560c230e7e5SNeilBrown * we pass the maximum number of sectors down 1561c230e7e5SNeilBrown * and let the lower level perform the split. 1562c230e7e5SNeilBrown */ 1563fd76863eScolyli@suse.de sectors = align_to_barrier_unit_end( 1564fd76863eScolyli@suse.de bio->bi_iter.bi_sector, bio_sectors(bio)); 15653b046a97SRobert LeBlanc 1566c230e7e5SNeilBrown if (bio_data_dir(bio) == READ) 1567689389a0SNeilBrown raid1_read_request(mddev, bio, sectors, NULL); 1568cc27b0c7SNeilBrown else { 1569cc27b0c7SNeilBrown if (!md_write_start(mddev,bio)) 1570cc27b0c7SNeilBrown return false; 1571c230e7e5SNeilBrown raid1_write_request(mddev, bio, sectors); 15723b046a97SRobert LeBlanc } 1573cc27b0c7SNeilBrown return true; 1574cc27b0c7SNeilBrown } 15753b046a97SRobert LeBlanc 1576849674e4SShaohua Li static void raid1_status(struct seq_file *seq, struct mddev *mddev) 15771da177e4SLinus Torvalds { 1578e8096360SNeilBrown struct r1conf *conf = mddev->private; 15791da177e4SLinus Torvalds int i; 15801da177e4SLinus Torvalds 15811da177e4SLinus Torvalds seq_printf(seq, " [%d/%d] [", conf->raid_disks, 158211ce99e6SNeilBrown conf->raid_disks - mddev->degraded); 1583ddac7c7eSNeilBrown rcu_read_lock(); 1584ddac7c7eSNeilBrown for (i = 0; i < conf->raid_disks; i++) { 15853cb03002SNeilBrown struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 15861da177e4SLinus Torvalds seq_printf(seq, "%s", 1587ddac7c7eSNeilBrown rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 1588ddac7c7eSNeilBrown } 1589ddac7c7eSNeilBrown rcu_read_unlock(); 15901da177e4SLinus Torvalds seq_printf(seq, "]"); 15911da177e4SLinus Torvalds } 15921da177e4SLinus Torvalds 1593849674e4SShaohua Li static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) 15941da177e4SLinus Torvalds { 15951da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 1596e8096360SNeilBrown struct r1conf *conf = mddev->private; 1597423f04d6SNeilBrown unsigned long flags; 15981da177e4SLinus Torvalds 15991da177e4SLinus Torvalds /* 16001da177e4SLinus Torvalds * If it is not operational, then we have already marked it as dead 16019a567843SGuoqing Jiang * else if it is the last working disks with "fail_last_dev == false", 16029a567843SGuoqing Jiang * ignore the error, let the next level up know. 16031da177e4SLinus Torvalds * else mark the drive as failed 16041da177e4SLinus Torvalds */ 16052e52d449SNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 16069a567843SGuoqing Jiang if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev 16074044ba58SNeilBrown && (conf->raid_disks - mddev->degraded) == 1) { 16081da177e4SLinus Torvalds /* 16091da177e4SLinus Torvalds * Don't fail the drive, act as though we were just a 16104044ba58SNeilBrown * normal single drive. 16114044ba58SNeilBrown * However don't try a recovery from this drive as 16124044ba58SNeilBrown * it is very likely to fail. 16131da177e4SLinus Torvalds */ 16145389042fSNeilBrown conf->recovery_disabled = mddev->recovery_disabled; 16152e52d449SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 16161da177e4SLinus Torvalds return; 16174044ba58SNeilBrown } 1618de393cdeSNeilBrown set_bit(Blocked, &rdev->flags); 1619ebda52faSYufen Yu if (test_and_clear_bit(In_sync, &rdev->flags)) 16201da177e4SLinus Torvalds mddev->degraded++; 1621dd00a99eSNeilBrown set_bit(Faulty, &rdev->flags); 1622423f04d6SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 16231da177e4SLinus Torvalds /* 16241da177e4SLinus Torvalds * if recovery is running, make sure it aborts. 16251da177e4SLinus Torvalds */ 1626dfc70645SNeilBrown set_bit(MD_RECOVERY_INTR, &mddev->recovery); 16272953079cSShaohua Li set_mask_bits(&mddev->sb_flags, 0, 16282953079cSShaohua Li BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 16291d41c216SNeilBrown pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n" 1630067032bcSJoe Perches "md/raid1:%s: Operation continuing on %d devices.\n", 16319dd1e2faSNeilBrown mdname(mddev), bdevname(rdev->bdev, b), 16329dd1e2faSNeilBrown mdname(mddev), conf->raid_disks - mddev->degraded); 16331da177e4SLinus Torvalds } 16341da177e4SLinus Torvalds 1635e8096360SNeilBrown static void print_conf(struct r1conf *conf) 16361da177e4SLinus Torvalds { 16371da177e4SLinus Torvalds int i; 16381da177e4SLinus Torvalds 16391d41c216SNeilBrown pr_debug("RAID1 conf printout:\n"); 16401da177e4SLinus Torvalds if (!conf) { 16411d41c216SNeilBrown pr_debug("(!conf)\n"); 16421da177e4SLinus Torvalds return; 16431da177e4SLinus Torvalds } 16441d41c216SNeilBrown pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, 16451da177e4SLinus Torvalds conf->raid_disks); 16461da177e4SLinus Torvalds 1647ddac7c7eSNeilBrown rcu_read_lock(); 16481da177e4SLinus Torvalds for (i = 0; i < conf->raid_disks; i++) { 16491da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 16503cb03002SNeilBrown struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 1651ddac7c7eSNeilBrown if (rdev) 16521d41c216SNeilBrown pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n", 1653ddac7c7eSNeilBrown i, !test_bit(In_sync, &rdev->flags), 1654ddac7c7eSNeilBrown !test_bit(Faulty, &rdev->flags), 1655ddac7c7eSNeilBrown bdevname(rdev->bdev,b)); 16561da177e4SLinus Torvalds } 1657ddac7c7eSNeilBrown rcu_read_unlock(); 16581da177e4SLinus Torvalds } 16591da177e4SLinus Torvalds 1660e8096360SNeilBrown static void close_sync(struct r1conf *conf) 16611da177e4SLinus Torvalds { 1662f6eca2d4SNate Dailey int idx; 1663f6eca2d4SNate Dailey 1664f6eca2d4SNate Dailey for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) { 1665f6eca2d4SNate Dailey _wait_barrier(conf, idx); 1666f6eca2d4SNate Dailey _allow_barrier(conf, idx); 1667f6eca2d4SNate Dailey } 16681da177e4SLinus Torvalds 1669afeee514SKent Overstreet mempool_exit(&conf->r1buf_pool); 16701da177e4SLinus Torvalds } 16711da177e4SLinus Torvalds 1672fd01b88cSNeilBrown static int raid1_spare_active(struct mddev *mddev) 16731da177e4SLinus Torvalds { 16741da177e4SLinus Torvalds int i; 1675e8096360SNeilBrown struct r1conf *conf = mddev->private; 16766b965620SNeilBrown int count = 0; 16776b965620SNeilBrown unsigned long flags; 16781da177e4SLinus Torvalds 16791da177e4SLinus Torvalds /* 16801da177e4SLinus Torvalds * Find all failed disks within the RAID1 configuration 1681ddac7c7eSNeilBrown * and mark them readable. 1682ddac7c7eSNeilBrown * Called under mddev lock, so rcu protection not needed. 1683423f04d6SNeilBrown * device_lock used to avoid races with raid1_end_read_request 1684423f04d6SNeilBrown * which expects 'In_sync' flags and ->degraded to be consistent. 16851da177e4SLinus Torvalds */ 1686423f04d6SNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 16871da177e4SLinus Torvalds for (i = 0; i < conf->raid_disks; i++) { 16883cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[i].rdev; 16898c7a2c2bSNeilBrown struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; 16908c7a2c2bSNeilBrown if (repl 16911aee41f6SGoldwyn Rodrigues && !test_bit(Candidate, &repl->flags) 16928c7a2c2bSNeilBrown && repl->recovery_offset == MaxSector 16938c7a2c2bSNeilBrown && !test_bit(Faulty, &repl->flags) 16948c7a2c2bSNeilBrown && !test_and_set_bit(In_sync, &repl->flags)) { 16958c7a2c2bSNeilBrown /* replacement has just become active */ 16968c7a2c2bSNeilBrown if (!rdev || 16978c7a2c2bSNeilBrown !test_and_clear_bit(In_sync, &rdev->flags)) 16988c7a2c2bSNeilBrown count++; 16998c7a2c2bSNeilBrown if (rdev) { 17008c7a2c2bSNeilBrown /* Replaced device not technically 17018c7a2c2bSNeilBrown * faulty, but we need to be sure 17028c7a2c2bSNeilBrown * it gets removed and never re-added 17038c7a2c2bSNeilBrown */ 17048c7a2c2bSNeilBrown set_bit(Faulty, &rdev->flags); 17058c7a2c2bSNeilBrown sysfs_notify_dirent_safe( 17068c7a2c2bSNeilBrown rdev->sysfs_state); 17078c7a2c2bSNeilBrown } 17088c7a2c2bSNeilBrown } 1709ddac7c7eSNeilBrown if (rdev 171061e4947cSLukasz Dorau && rdev->recovery_offset == MaxSector 1711ddac7c7eSNeilBrown && !test_bit(Faulty, &rdev->flags) 1712c04be0aaSNeilBrown && !test_and_set_bit(In_sync, &rdev->flags)) { 17136b965620SNeilBrown count++; 1714654e8b5aSJonathan Brassow sysfs_notify_dirent_safe(rdev->sysfs_state); 17151da177e4SLinus Torvalds } 17161da177e4SLinus Torvalds } 17176b965620SNeilBrown mddev->degraded -= count; 17186b965620SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 17191da177e4SLinus Torvalds 17201da177e4SLinus Torvalds print_conf(conf); 17216b965620SNeilBrown return count; 17221da177e4SLinus Torvalds } 17231da177e4SLinus Torvalds 1724fd01b88cSNeilBrown static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) 17251da177e4SLinus Torvalds { 1726e8096360SNeilBrown struct r1conf *conf = mddev->private; 1727199050eaSNeil Brown int err = -EEXIST; 172841158c7eSNeilBrown int mirror = 0; 17290eaf822cSJonathan Brassow struct raid1_info *p; 17306c2fce2eSNeil Brown int first = 0; 173130194636SNeilBrown int last = conf->raid_disks - 1; 17321da177e4SLinus Torvalds 17335389042fSNeilBrown if (mddev->recovery_disabled == conf->recovery_disabled) 17345389042fSNeilBrown return -EBUSY; 17355389042fSNeilBrown 17361501efadSDan Williams if (md_integrity_add_rdev(rdev, mddev)) 17371501efadSDan Williams return -ENXIO; 17381501efadSDan Williams 17396c2fce2eSNeil Brown if (rdev->raid_disk >= 0) 17406c2fce2eSNeil Brown first = last = rdev->raid_disk; 17416c2fce2eSNeil Brown 174270bcecdbSGoldwyn Rodrigues /* 174370bcecdbSGoldwyn Rodrigues * find the disk ... but prefer rdev->saved_raid_disk 174470bcecdbSGoldwyn Rodrigues * if possible. 174570bcecdbSGoldwyn Rodrigues */ 174670bcecdbSGoldwyn Rodrigues if (rdev->saved_raid_disk >= 0 && 174770bcecdbSGoldwyn Rodrigues rdev->saved_raid_disk >= first && 17489e753ba9SShaohua Li rdev->saved_raid_disk < conf->raid_disks && 174970bcecdbSGoldwyn Rodrigues conf->mirrors[rdev->saved_raid_disk].rdev == NULL) 175070bcecdbSGoldwyn Rodrigues first = last = rdev->saved_raid_disk; 175170bcecdbSGoldwyn Rodrigues 17527ef449d1SNeilBrown for (mirror = first; mirror <= last; mirror++) { 17537ef449d1SNeilBrown p = conf->mirrors + mirror; 17547ef449d1SNeilBrown if (!p->rdev) { 17559092c02dSJonathan Brassow if (mddev->gendisk) 17568f6c2e4bSMartin K. Petersen disk_stack_limits(mddev->gendisk, rdev->bdev, 17578f6c2e4bSMartin K. Petersen rdev->data_offset << 9); 17581da177e4SLinus Torvalds 17591da177e4SLinus Torvalds p->head_position = 0; 17601da177e4SLinus Torvalds rdev->raid_disk = mirror; 1761199050eaSNeil Brown err = 0; 17626aea114aSNeilBrown /* As all devices are equivalent, we don't need a full recovery 17636aea114aSNeilBrown * if this was recently any drive of the array 17646aea114aSNeilBrown */ 17656aea114aSNeilBrown if (rdev->saved_raid_disk < 0) 176641158c7eSNeilBrown conf->fullsync = 1; 1767d6065f7bSSuzanne Wood rcu_assign_pointer(p->rdev, rdev); 17681da177e4SLinus Torvalds break; 17691da177e4SLinus Torvalds } 17707ef449d1SNeilBrown if (test_bit(WantReplacement, &p->rdev->flags) && 17717ef449d1SNeilBrown p[conf->raid_disks].rdev == NULL) { 17727ef449d1SNeilBrown /* Add this device as a replacement */ 17737ef449d1SNeilBrown clear_bit(In_sync, &rdev->flags); 17747ef449d1SNeilBrown set_bit(Replacement, &rdev->flags); 17757ef449d1SNeilBrown rdev->raid_disk = mirror; 17767ef449d1SNeilBrown err = 0; 17777ef449d1SNeilBrown conf->fullsync = 1; 17787ef449d1SNeilBrown rcu_assign_pointer(p[conf->raid_disks].rdev, rdev); 17797ef449d1SNeilBrown break; 17807ef449d1SNeilBrown } 17817ef449d1SNeilBrown } 17829092c02dSJonathan Brassow if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) 17838b904b5bSBart Van Assche blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); 17841da177e4SLinus Torvalds print_conf(conf); 1785199050eaSNeil Brown return err; 17861da177e4SLinus Torvalds } 17871da177e4SLinus Torvalds 1788b8321b68SNeilBrown static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 17891da177e4SLinus Torvalds { 1790e8096360SNeilBrown struct r1conf *conf = mddev->private; 17911da177e4SLinus Torvalds int err = 0; 1792b8321b68SNeilBrown int number = rdev->raid_disk; 17930eaf822cSJonathan Brassow struct raid1_info *p = conf->mirrors + number; 17941da177e4SLinus Torvalds 1795b014f14cSNeilBrown if (rdev != p->rdev) 1796b014f14cSNeilBrown p = conf->mirrors + conf->raid_disks + number; 1797b014f14cSNeilBrown 17981da177e4SLinus Torvalds print_conf(conf); 1799b8321b68SNeilBrown if (rdev == p->rdev) { 1800b2d444d7SNeilBrown if (test_bit(In_sync, &rdev->flags) || 18011da177e4SLinus Torvalds atomic_read(&rdev->nr_pending)) { 18021da177e4SLinus Torvalds err = -EBUSY; 18031da177e4SLinus Torvalds goto abort; 18041da177e4SLinus Torvalds } 1805046abeedSNeilBrown /* Only remove non-faulty devices if recovery 1806dfc70645SNeilBrown * is not possible. 1807dfc70645SNeilBrown */ 1808dfc70645SNeilBrown if (!test_bit(Faulty, &rdev->flags) && 18095389042fSNeilBrown mddev->recovery_disabled != conf->recovery_disabled && 1810dfc70645SNeilBrown mddev->degraded < conf->raid_disks) { 1811dfc70645SNeilBrown err = -EBUSY; 1812dfc70645SNeilBrown goto abort; 1813dfc70645SNeilBrown } 18141da177e4SLinus Torvalds p->rdev = NULL; 1815d787be40SNeilBrown if (!test_bit(RemoveSynchronized, &rdev->flags)) { 1816fbd568a3SPaul E. McKenney synchronize_rcu(); 18171da177e4SLinus Torvalds if (atomic_read(&rdev->nr_pending)) { 18181da177e4SLinus Torvalds /* lost the race, try later */ 18191da177e4SLinus Torvalds err = -EBUSY; 18201da177e4SLinus Torvalds p->rdev = rdev; 1821ac5e7113SAndre Noll goto abort; 1822d787be40SNeilBrown } 1823d787be40SNeilBrown } 1824d787be40SNeilBrown if (conf->mirrors[conf->raid_disks + number].rdev) { 18258c7a2c2bSNeilBrown /* We just removed a device that is being replaced. 18268c7a2c2bSNeilBrown * Move down the replacement. We drain all IO before 18278c7a2c2bSNeilBrown * doing this to avoid confusion. 18288c7a2c2bSNeilBrown */ 18298c7a2c2bSNeilBrown struct md_rdev *repl = 18308c7a2c2bSNeilBrown conf->mirrors[conf->raid_disks + number].rdev; 1831e2d59925SNeilBrown freeze_array(conf, 0); 18323de59bb9SYufen Yu if (atomic_read(&repl->nr_pending)) { 18333de59bb9SYufen Yu /* It means that some queued IO of retry_list 18343de59bb9SYufen Yu * hold repl. Thus, we cannot set replacement 18353de59bb9SYufen Yu * as NULL, avoiding rdev NULL pointer 18363de59bb9SYufen Yu * dereference in sync_request_write and 18373de59bb9SYufen Yu * handle_write_finished. 18383de59bb9SYufen Yu */ 18393de59bb9SYufen Yu err = -EBUSY; 18403de59bb9SYufen Yu unfreeze_array(conf); 18413de59bb9SYufen Yu goto abort; 18423de59bb9SYufen Yu } 18438c7a2c2bSNeilBrown clear_bit(Replacement, &repl->flags); 18448c7a2c2bSNeilBrown p->rdev = repl; 18458c7a2c2bSNeilBrown conf->mirrors[conf->raid_disks + number].rdev = NULL; 1846e2d59925SNeilBrown unfreeze_array(conf); 1847e5bc9c3cSGuoqing Jiang } 1848e5bc9c3cSGuoqing Jiang 18498c7a2c2bSNeilBrown clear_bit(WantReplacement, &rdev->flags); 1850a91a2785SMartin K. Petersen err = md_integrity_register(mddev); 18511da177e4SLinus Torvalds } 18521da177e4SLinus Torvalds abort: 18531da177e4SLinus Torvalds 18541da177e4SLinus Torvalds print_conf(conf); 18551da177e4SLinus Torvalds return err; 18561da177e4SLinus Torvalds } 18571da177e4SLinus Torvalds 18584246a0b6SChristoph Hellwig static void end_sync_read(struct bio *bio) 18591da177e4SLinus Torvalds { 186098d30c58SMing Lei struct r1bio *r1_bio = get_resync_r1bio(bio); 18611da177e4SLinus Torvalds 18620fc280f6SNeilBrown update_head_pos(r1_bio->read_disk, r1_bio); 1863ba3ae3beSNamhyung Kim 18641da177e4SLinus Torvalds /* 18651da177e4SLinus Torvalds * we have read a block, now it needs to be re-written, 18661da177e4SLinus Torvalds * or re-read if the read failed. 18671da177e4SLinus Torvalds * We don't do much here, just schedule handling by raid1d 18681da177e4SLinus Torvalds */ 18694e4cbee9SChristoph Hellwig if (!bio->bi_status) 18701da177e4SLinus Torvalds set_bit(R1BIO_Uptodate, &r1_bio->state); 1871d11c171eSNeilBrown 1872d11c171eSNeilBrown if (atomic_dec_and_test(&r1_bio->remaining)) 18731da177e4SLinus Torvalds reschedule_retry(r1_bio); 18741da177e4SLinus Torvalds } 18751da177e4SLinus Torvalds 1876dfcc34c9SNate Dailey static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) 1877dfcc34c9SNate Dailey { 1878dfcc34c9SNate Dailey sector_t sync_blocks = 0; 1879dfcc34c9SNate Dailey sector_t s = r1_bio->sector; 1880dfcc34c9SNate Dailey long sectors_to_go = r1_bio->sectors; 1881dfcc34c9SNate Dailey 1882dfcc34c9SNate Dailey /* make sure these bits don't get cleared. */ 1883dfcc34c9SNate Dailey do { 1884dfcc34c9SNate Dailey md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); 1885dfcc34c9SNate Dailey s += sync_blocks; 1886dfcc34c9SNate Dailey sectors_to_go -= sync_blocks; 1887dfcc34c9SNate Dailey } while (sectors_to_go > 0); 1888dfcc34c9SNate Dailey } 1889dfcc34c9SNate Dailey 1890449808a2SHou Tao static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate) 1891449808a2SHou Tao { 1892449808a2SHou Tao if (atomic_dec_and_test(&r1_bio->remaining)) { 1893449808a2SHou Tao struct mddev *mddev = r1_bio->mddev; 1894449808a2SHou Tao int s = r1_bio->sectors; 1895449808a2SHou Tao 1896449808a2SHou Tao if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 1897449808a2SHou Tao test_bit(R1BIO_WriteError, &r1_bio->state)) 1898449808a2SHou Tao reschedule_retry(r1_bio); 1899449808a2SHou Tao else { 1900449808a2SHou Tao put_buf(r1_bio); 1901449808a2SHou Tao md_done_sync(mddev, s, uptodate); 1902449808a2SHou Tao } 1903449808a2SHou Tao } 1904449808a2SHou Tao } 1905449808a2SHou Tao 19064246a0b6SChristoph Hellwig static void end_sync_write(struct bio *bio) 19071da177e4SLinus Torvalds { 19084e4cbee9SChristoph Hellwig int uptodate = !bio->bi_status; 190998d30c58SMing Lei struct r1bio *r1_bio = get_resync_r1bio(bio); 1910fd01b88cSNeilBrown struct mddev *mddev = r1_bio->mddev; 1911e8096360SNeilBrown struct r1conf *conf = mddev->private; 19124367af55SNeilBrown sector_t first_bad; 19134367af55SNeilBrown int bad_sectors; 1914854abd75SNeilBrown struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; 1915ba3ae3beSNamhyung Kim 19166b1117d5SNeilBrown if (!uptodate) { 1917dfcc34c9SNate Dailey abort_sync_write(mddev, r1_bio); 1918854abd75SNeilBrown set_bit(WriteErrorSeen, &rdev->flags); 1919854abd75SNeilBrown if (!test_and_set_bit(WantReplacement, &rdev->flags)) 192019d67169SNeilBrown set_bit(MD_RECOVERY_NEEDED, & 192119d67169SNeilBrown mddev->recovery); 1922d8f05d29SNeilBrown set_bit(R1BIO_WriteError, &r1_bio->state); 1923854abd75SNeilBrown } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, 19243a9f28a5SNeilBrown &first_bad, &bad_sectors) && 19253a9f28a5SNeilBrown !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, 19263a9f28a5SNeilBrown r1_bio->sector, 19273a9f28a5SNeilBrown r1_bio->sectors, 19283a9f28a5SNeilBrown &first_bad, &bad_sectors) 19293a9f28a5SNeilBrown ) 19304367af55SNeilBrown set_bit(R1BIO_MadeGood, &r1_bio->state); 1931e3b9703eSNeilBrown 1932449808a2SHou Tao put_sync_write_buf(r1_bio, uptodate); 19334367af55SNeilBrown } 19341da177e4SLinus Torvalds 19353cb03002SNeilBrown static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, 1936d8f05d29SNeilBrown int sectors, struct page *page, int rw) 1937d8f05d29SNeilBrown { 1938796a5cf0SMike Christie if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) 1939d8f05d29SNeilBrown /* success */ 1940d8f05d29SNeilBrown return 1; 194119d67169SNeilBrown if (rw == WRITE) { 1942d8f05d29SNeilBrown set_bit(WriteErrorSeen, &rdev->flags); 194319d67169SNeilBrown if (!test_and_set_bit(WantReplacement, 194419d67169SNeilBrown &rdev->flags)) 194519d67169SNeilBrown set_bit(MD_RECOVERY_NEEDED, & 194619d67169SNeilBrown rdev->mddev->recovery); 194719d67169SNeilBrown } 1948d8f05d29SNeilBrown /* need to record an error - either for the block or the device */ 1949d8f05d29SNeilBrown if (!rdev_set_badblocks(rdev, sector, sectors, 0)) 1950d8f05d29SNeilBrown md_error(rdev->mddev, rdev); 1951d8f05d29SNeilBrown return 0; 1952d8f05d29SNeilBrown } 1953d8f05d29SNeilBrown 19549f2c9d12SNeilBrown static int fix_sync_read_error(struct r1bio *r1_bio) 19551da177e4SLinus Torvalds { 1956a68e5870SNeilBrown /* Try some synchronous reads of other devices to get 195769382e85SNeilBrown * good data, much like with normal read errors. Only 1958ddac7c7eSNeilBrown * read into the pages we already have so we don't 195969382e85SNeilBrown * need to re-issue the read request. 196069382e85SNeilBrown * We don't need to freeze the array, because being in an 196169382e85SNeilBrown * active sync request, there is no normal IO, and 196269382e85SNeilBrown * no overlapping syncs. 196306f60385SNeilBrown * We don't need to check is_badblock() again as we 196406f60385SNeilBrown * made sure that anything with a bad block in range 196506f60385SNeilBrown * will have bi_end_io clear. 19661da177e4SLinus Torvalds */ 1967fd01b88cSNeilBrown struct mddev *mddev = r1_bio->mddev; 1968e8096360SNeilBrown struct r1conf *conf = mddev->private; 1969a68e5870SNeilBrown struct bio *bio = r1_bio->bios[r1_bio->read_disk]; 197044cf0f4dSMing Lei struct page **pages = get_resync_pages(bio)->pages; 197169382e85SNeilBrown sector_t sect = r1_bio->sector; 197269382e85SNeilBrown int sectors = r1_bio->sectors; 197369382e85SNeilBrown int idx = 0; 19742e52d449SNeilBrown struct md_rdev *rdev; 19752e52d449SNeilBrown 19762e52d449SNeilBrown rdev = conf->mirrors[r1_bio->read_disk].rdev; 19772e52d449SNeilBrown if (test_bit(FailFast, &rdev->flags)) { 19782e52d449SNeilBrown /* Don't try recovering from here - just fail it 19792e52d449SNeilBrown * ... unless it is the last working device of course */ 19802e52d449SNeilBrown md_error(mddev, rdev); 19812e52d449SNeilBrown if (test_bit(Faulty, &rdev->flags)) 19822e52d449SNeilBrown /* Don't try to read from here, but make sure 19832e52d449SNeilBrown * put_buf does it's thing 19842e52d449SNeilBrown */ 19852e52d449SNeilBrown bio->bi_end_io = end_sync_write; 19862e52d449SNeilBrown } 198769382e85SNeilBrown 198869382e85SNeilBrown while(sectors) { 198969382e85SNeilBrown int s = sectors; 199069382e85SNeilBrown int d = r1_bio->read_disk; 199169382e85SNeilBrown int success = 0; 199278d7f5f7SNeilBrown int start; 199369382e85SNeilBrown 199469382e85SNeilBrown if (s > (PAGE_SIZE>>9)) 199569382e85SNeilBrown s = PAGE_SIZE >> 9; 199669382e85SNeilBrown do { 199769382e85SNeilBrown if (r1_bio->bios[d]->bi_end_io == end_sync_read) { 1998ddac7c7eSNeilBrown /* No rcu protection needed here devices 1999ddac7c7eSNeilBrown * can only be removed when no resync is 2000ddac7c7eSNeilBrown * active, and resync is currently active 2001ddac7c7eSNeilBrown */ 200269382e85SNeilBrown rdev = conf->mirrors[d].rdev; 20039d3d8011SNamhyung Kim if (sync_page_io(rdev, sect, s<<9, 200444cf0f4dSMing Lei pages[idx], 2005796a5cf0SMike Christie REQ_OP_READ, 0, false)) { 200669382e85SNeilBrown success = 1; 200769382e85SNeilBrown break; 200869382e85SNeilBrown } 200969382e85SNeilBrown } 201069382e85SNeilBrown d++; 20118f19ccb2SNeilBrown if (d == conf->raid_disks * 2) 201269382e85SNeilBrown d = 0; 201369382e85SNeilBrown } while (!success && d != r1_bio->read_disk); 201469382e85SNeilBrown 201578d7f5f7SNeilBrown if (!success) { 201678d7f5f7SNeilBrown char b[BDEVNAME_SIZE]; 20173a9f28a5SNeilBrown int abort = 0; 20183a9f28a5SNeilBrown /* Cannot read from anywhere, this block is lost. 20193a9f28a5SNeilBrown * Record a bad block on each device. If that doesn't 20203a9f28a5SNeilBrown * work just disable and interrupt the recovery. 20213a9f28a5SNeilBrown * Don't fail devices as that won't really help. 20223a9f28a5SNeilBrown */ 20231d41c216SNeilBrown pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", 202474d46992SChristoph Hellwig mdname(mddev), bio_devname(bio, b), 202578d7f5f7SNeilBrown (unsigned long long)r1_bio->sector); 20268f19ccb2SNeilBrown for (d = 0; d < conf->raid_disks * 2; d++) { 20273a9f28a5SNeilBrown rdev = conf->mirrors[d].rdev; 20283a9f28a5SNeilBrown if (!rdev || test_bit(Faulty, &rdev->flags)) 20293a9f28a5SNeilBrown continue; 20303a9f28a5SNeilBrown if (!rdev_set_badblocks(rdev, sect, s, 0)) 20313a9f28a5SNeilBrown abort = 1; 20323a9f28a5SNeilBrown } 20333a9f28a5SNeilBrown if (abort) { 2034d890fa2bSNeilBrown conf->recovery_disabled = 2035d890fa2bSNeilBrown mddev->recovery_disabled; 20363a9f28a5SNeilBrown set_bit(MD_RECOVERY_INTR, &mddev->recovery); 203778d7f5f7SNeilBrown md_done_sync(mddev, r1_bio->sectors, 0); 203878d7f5f7SNeilBrown put_buf(r1_bio); 203978d7f5f7SNeilBrown return 0; 204078d7f5f7SNeilBrown } 20413a9f28a5SNeilBrown /* Try next page */ 20423a9f28a5SNeilBrown sectors -= s; 20433a9f28a5SNeilBrown sect += s; 20443a9f28a5SNeilBrown idx++; 20453a9f28a5SNeilBrown continue; 20463a9f28a5SNeilBrown } 204778d7f5f7SNeilBrown 204878d7f5f7SNeilBrown start = d; 204969382e85SNeilBrown /* write it back and re-read */ 205069382e85SNeilBrown while (d != r1_bio->read_disk) { 205169382e85SNeilBrown if (d == 0) 20528f19ccb2SNeilBrown d = conf->raid_disks * 2; 205369382e85SNeilBrown d--; 205469382e85SNeilBrown if (r1_bio->bios[d]->bi_end_io != end_sync_read) 205569382e85SNeilBrown continue; 205669382e85SNeilBrown rdev = conf->mirrors[d].rdev; 2057d8f05d29SNeilBrown if (r1_sync_page_io(rdev, sect, s, 205844cf0f4dSMing Lei pages[idx], 2059d8f05d29SNeilBrown WRITE) == 0) { 206078d7f5f7SNeilBrown r1_bio->bios[d]->bi_end_io = NULL; 206178d7f5f7SNeilBrown rdev_dec_pending(rdev, mddev); 20629d3d8011SNamhyung Kim } 2063097426f6SNeilBrown } 2064097426f6SNeilBrown d = start; 2065097426f6SNeilBrown while (d != r1_bio->read_disk) { 2066097426f6SNeilBrown if (d == 0) 20678f19ccb2SNeilBrown d = conf->raid_disks * 2; 2068097426f6SNeilBrown d--; 2069097426f6SNeilBrown if (r1_bio->bios[d]->bi_end_io != end_sync_read) 2070097426f6SNeilBrown continue; 2071097426f6SNeilBrown rdev = conf->mirrors[d].rdev; 2072d8f05d29SNeilBrown if (r1_sync_page_io(rdev, sect, s, 207344cf0f4dSMing Lei pages[idx], 2074d8f05d29SNeilBrown READ) != 0) 20759d3d8011SNamhyung Kim atomic_add(s, &rdev->corrected_errors); 207669382e85SNeilBrown } 207769382e85SNeilBrown sectors -= s; 207869382e85SNeilBrown sect += s; 207969382e85SNeilBrown idx ++; 208069382e85SNeilBrown } 208178d7f5f7SNeilBrown set_bit(R1BIO_Uptodate, &r1_bio->state); 20824e4cbee9SChristoph Hellwig bio->bi_status = 0; 2083a68e5870SNeilBrown return 1; 208469382e85SNeilBrown } 2085d11c171eSNeilBrown 2086c95e6385SNeilBrown static void process_checks(struct r1bio *r1_bio) 2087a68e5870SNeilBrown { 2088a68e5870SNeilBrown /* We have read all readable devices. If we haven't 2089a68e5870SNeilBrown * got the block, then there is no hope left. 2090a68e5870SNeilBrown * If we have, then we want to do a comparison 2091a68e5870SNeilBrown * and skip the write if everything is the same. 2092a68e5870SNeilBrown * If any blocks failed to read, then we need to 2093a68e5870SNeilBrown * attempt an over-write 2094a68e5870SNeilBrown */ 2095fd01b88cSNeilBrown struct mddev *mddev = r1_bio->mddev; 2096e8096360SNeilBrown struct r1conf *conf = mddev->private; 2097a68e5870SNeilBrown int primary; 2098a68e5870SNeilBrown int i; 2099f4380a91Smajianpeng int vcnt; 2100a68e5870SNeilBrown 210130bc9b53SNeilBrown /* Fix variable parts of all bios */ 210230bc9b53SNeilBrown vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); 210330bc9b53SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 21044e4cbee9SChristoph Hellwig blk_status_t status; 210530bc9b53SNeilBrown struct bio *b = r1_bio->bios[i]; 210698d30c58SMing Lei struct resync_pages *rp = get_resync_pages(b); 210730bc9b53SNeilBrown if (b->bi_end_io != end_sync_read) 210830bc9b53SNeilBrown continue; 21094246a0b6SChristoph Hellwig /* fixup the bio for reuse, but preserve errno */ 21104e4cbee9SChristoph Hellwig status = b->bi_status; 211130bc9b53SNeilBrown bio_reset(b); 21124e4cbee9SChristoph Hellwig b->bi_status = status; 21134f024f37SKent Overstreet b->bi_iter.bi_sector = r1_bio->sector + 211430bc9b53SNeilBrown conf->mirrors[i].rdev->data_offset; 211574d46992SChristoph Hellwig bio_set_dev(b, conf->mirrors[i].rdev->bdev); 211630bc9b53SNeilBrown b->bi_end_io = end_sync_read; 211798d30c58SMing Lei rp->raid_bio = r1_bio; 211898d30c58SMing Lei b->bi_private = rp; 211930bc9b53SNeilBrown 2120fb0eb5dfSMing Lei /* initialize bvec table again */ 2121fb0eb5dfSMing Lei md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9); 212230bc9b53SNeilBrown } 21238f19ccb2SNeilBrown for (primary = 0; primary < conf->raid_disks * 2; primary++) 2124a68e5870SNeilBrown if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 21254e4cbee9SChristoph Hellwig !r1_bio->bios[primary]->bi_status) { 2126a68e5870SNeilBrown r1_bio->bios[primary]->bi_end_io = NULL; 2127a68e5870SNeilBrown rdev_dec_pending(conf->mirrors[primary].rdev, mddev); 2128a68e5870SNeilBrown break; 2129a68e5870SNeilBrown } 2130a68e5870SNeilBrown r1_bio->read_disk = primary; 21318f19ccb2SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 21322b070cfeSChristoph Hellwig int j = 0; 2133a68e5870SNeilBrown struct bio *pbio = r1_bio->bios[primary]; 2134a68e5870SNeilBrown struct bio *sbio = r1_bio->bios[i]; 21354e4cbee9SChristoph Hellwig blk_status_t status = sbio->bi_status; 213644cf0f4dSMing Lei struct page **ppages = get_resync_pages(pbio)->pages; 213744cf0f4dSMing Lei struct page **spages = get_resync_pages(sbio)->pages; 213860928a91SMing Lei struct bio_vec *bi; 21398fc04e6eSMing Lei int page_len[RESYNC_PAGES] = { 0 }; 21406dc4f100SMing Lei struct bvec_iter_all iter_all; 214178d7f5f7SNeilBrown 21422aabaa65SKent Overstreet if (sbio->bi_end_io != end_sync_read) 214378d7f5f7SNeilBrown continue; 21444246a0b6SChristoph Hellwig /* Now we can 'fixup' the error value */ 21454e4cbee9SChristoph Hellwig sbio->bi_status = 0; 2146a68e5870SNeilBrown 21472b070cfeSChristoph Hellwig bio_for_each_segment_all(bi, sbio, iter_all) 21482b070cfeSChristoph Hellwig page_len[j++] = bi->bv_len; 214960928a91SMing Lei 21504e4cbee9SChristoph Hellwig if (!status) { 2151a68e5870SNeilBrown for (j = vcnt; j-- ; ) { 215244cf0f4dSMing Lei if (memcmp(page_address(ppages[j]), 215344cf0f4dSMing Lei page_address(spages[j]), 215460928a91SMing Lei page_len[j])) 2155a68e5870SNeilBrown break; 2156a68e5870SNeilBrown } 2157a68e5870SNeilBrown } else 2158a68e5870SNeilBrown j = 0; 2159a68e5870SNeilBrown if (j >= 0) 21607f7583d4SJianpeng Ma atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); 2161a68e5870SNeilBrown if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) 21624e4cbee9SChristoph Hellwig && !status)) { 216378d7f5f7SNeilBrown /* No need to write to this device. */ 2164a68e5870SNeilBrown sbio->bi_end_io = NULL; 2165a68e5870SNeilBrown rdev_dec_pending(conf->mirrors[i].rdev, mddev); 216678d7f5f7SNeilBrown continue; 216778d7f5f7SNeilBrown } 2168d3b45c2aSKent Overstreet 2169d3b45c2aSKent Overstreet bio_copy_data(sbio, pbio); 2170a68e5870SNeilBrown } 2171a68e5870SNeilBrown } 2172a68e5870SNeilBrown 21739f2c9d12SNeilBrown static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) 2174a68e5870SNeilBrown { 2175e8096360SNeilBrown struct r1conf *conf = mddev->private; 2176a68e5870SNeilBrown int i; 21778f19ccb2SNeilBrown int disks = conf->raid_disks * 2; 2178037d2ff6SGuoqing Jiang struct bio *wbio; 2179a68e5870SNeilBrown 2180a68e5870SNeilBrown if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) 2181a68e5870SNeilBrown /* ouch - failed to read all of that. */ 2182a68e5870SNeilBrown if (!fix_sync_read_error(r1_bio)) 2183a68e5870SNeilBrown return; 21847ca78d57SNeilBrown 21857ca78d57SNeilBrown if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2186c95e6385SNeilBrown process_checks(r1_bio); 2187c95e6385SNeilBrown 2188d11c171eSNeilBrown /* 2189d11c171eSNeilBrown * schedule writes 2190d11c171eSNeilBrown */ 21911da177e4SLinus Torvalds atomic_set(&r1_bio->remaining, 1); 21921da177e4SLinus Torvalds for (i = 0; i < disks ; i++) { 21931da177e4SLinus Torvalds wbio = r1_bio->bios[i]; 21943e198f78SNeilBrown if (wbio->bi_end_io == NULL || 21953e198f78SNeilBrown (wbio->bi_end_io == end_sync_read && 21963e198f78SNeilBrown (i == r1_bio->read_disk || 21973e198f78SNeilBrown !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) 21981da177e4SLinus Torvalds continue; 2199dfcc34c9SNate Dailey if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { 2200dfcc34c9SNate Dailey abort_sync_write(mddev, r1_bio); 22010c9d5b12SNeilBrown continue; 2202dfcc34c9SNate Dailey } 22031da177e4SLinus Torvalds 2204796a5cf0SMike Christie bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2205212e7eb7SNeilBrown if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) 2206212e7eb7SNeilBrown wbio->bi_opf |= MD_FAILFAST; 2207212e7eb7SNeilBrown 22083e198f78SNeilBrown wbio->bi_end_io = end_sync_write; 22091da177e4SLinus Torvalds atomic_inc(&r1_bio->remaining); 2210aa8b57aaSKent Overstreet md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); 2211191ea9b2SNeilBrown 2212ed00aabdSChristoph Hellwig submit_bio_noacct(wbio); 22131da177e4SLinus Torvalds } 22141da177e4SLinus Torvalds 2215449808a2SHou Tao put_sync_write_buf(r1_bio, 1); 22161da177e4SLinus Torvalds } 22171da177e4SLinus Torvalds 22181da177e4SLinus Torvalds /* 22191da177e4SLinus Torvalds * This is a kernel thread which: 22201da177e4SLinus Torvalds * 22211da177e4SLinus Torvalds * 1. Retries failed read operations on working mirrors. 22221da177e4SLinus Torvalds * 2. Updates the raid superblock when problems encounter. 2223d2eb35acSNeilBrown * 3. Performs writes following reads for array synchronising. 22241da177e4SLinus Torvalds */ 22251da177e4SLinus Torvalds 2226e8096360SNeilBrown static void fix_read_error(struct r1conf *conf, int read_disk, 2227867868fbSNeilBrown sector_t sect, int sectors) 2228867868fbSNeilBrown { 2229fd01b88cSNeilBrown struct mddev *mddev = conf->mddev; 2230867868fbSNeilBrown while(sectors) { 2231867868fbSNeilBrown int s = sectors; 2232867868fbSNeilBrown int d = read_disk; 2233867868fbSNeilBrown int success = 0; 2234867868fbSNeilBrown int start; 22353cb03002SNeilBrown struct md_rdev *rdev; 2236867868fbSNeilBrown 2237867868fbSNeilBrown if (s > (PAGE_SIZE>>9)) 2238867868fbSNeilBrown s = PAGE_SIZE >> 9; 2239867868fbSNeilBrown 2240867868fbSNeilBrown do { 2241d2eb35acSNeilBrown sector_t first_bad; 2242d2eb35acSNeilBrown int bad_sectors; 2243d2eb35acSNeilBrown 2244707a6a42SNeilBrown rcu_read_lock(); 2245707a6a42SNeilBrown rdev = rcu_dereference(conf->mirrors[d].rdev); 2246867868fbSNeilBrown if (rdev && 2247da8840a7Smajianpeng (test_bit(In_sync, &rdev->flags) || 2248da8840a7Smajianpeng (!test_bit(Faulty, &rdev->flags) && 2249da8840a7Smajianpeng rdev->recovery_offset >= sect + s)) && 2250d2eb35acSNeilBrown is_badblock(rdev, sect, s, 2251707a6a42SNeilBrown &first_bad, &bad_sectors) == 0) { 2252707a6a42SNeilBrown atomic_inc(&rdev->nr_pending); 2253707a6a42SNeilBrown rcu_read_unlock(); 2254707a6a42SNeilBrown if (sync_page_io(rdev, sect, s<<9, 2255796a5cf0SMike Christie conf->tmppage, REQ_OP_READ, 0, false)) 2256867868fbSNeilBrown success = 1; 2257707a6a42SNeilBrown rdev_dec_pending(rdev, mddev); 2258707a6a42SNeilBrown if (success) 2259707a6a42SNeilBrown break; 2260707a6a42SNeilBrown } else 2261707a6a42SNeilBrown rcu_read_unlock(); 2262867868fbSNeilBrown d++; 22638f19ccb2SNeilBrown if (d == conf->raid_disks * 2) 2264867868fbSNeilBrown d = 0; 2265867868fbSNeilBrown } while (!success && d != read_disk); 2266867868fbSNeilBrown 2267867868fbSNeilBrown if (!success) { 2268d8f05d29SNeilBrown /* Cannot read from anywhere - mark it bad */ 22693cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[read_disk].rdev; 2270d8f05d29SNeilBrown if (!rdev_set_badblocks(rdev, sect, s, 0)) 2271d8f05d29SNeilBrown md_error(mddev, rdev); 2272867868fbSNeilBrown break; 2273867868fbSNeilBrown } 2274867868fbSNeilBrown /* write it back and re-read */ 2275867868fbSNeilBrown start = d; 2276867868fbSNeilBrown while (d != read_disk) { 2277867868fbSNeilBrown if (d==0) 22788f19ccb2SNeilBrown d = conf->raid_disks * 2; 2279867868fbSNeilBrown d--; 2280707a6a42SNeilBrown rcu_read_lock(); 2281707a6a42SNeilBrown rdev = rcu_dereference(conf->mirrors[d].rdev); 2282867868fbSNeilBrown if (rdev && 2283707a6a42SNeilBrown !test_bit(Faulty, &rdev->flags)) { 2284707a6a42SNeilBrown atomic_inc(&rdev->nr_pending); 2285707a6a42SNeilBrown rcu_read_unlock(); 2286d8f05d29SNeilBrown r1_sync_page_io(rdev, sect, s, 2287d8f05d29SNeilBrown conf->tmppage, WRITE); 2288707a6a42SNeilBrown rdev_dec_pending(rdev, mddev); 2289707a6a42SNeilBrown } else 2290707a6a42SNeilBrown rcu_read_unlock(); 2291867868fbSNeilBrown } 2292867868fbSNeilBrown d = start; 2293867868fbSNeilBrown while (d != read_disk) { 2294867868fbSNeilBrown char b[BDEVNAME_SIZE]; 2295867868fbSNeilBrown if (d==0) 22968f19ccb2SNeilBrown d = conf->raid_disks * 2; 2297867868fbSNeilBrown d--; 2298707a6a42SNeilBrown rcu_read_lock(); 2299707a6a42SNeilBrown rdev = rcu_dereference(conf->mirrors[d].rdev); 2300867868fbSNeilBrown if (rdev && 2301b8cb6b4cSNeilBrown !test_bit(Faulty, &rdev->flags)) { 2302707a6a42SNeilBrown atomic_inc(&rdev->nr_pending); 2303707a6a42SNeilBrown rcu_read_unlock(); 2304d8f05d29SNeilBrown if (r1_sync_page_io(rdev, sect, s, 2305d8f05d29SNeilBrown conf->tmppage, READ)) { 2306867868fbSNeilBrown atomic_add(s, &rdev->corrected_errors); 23071d41c216SNeilBrown pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n", 2308867868fbSNeilBrown mdname(mddev), s, 2309969b755aSRandy Dunlap (unsigned long long)(sect + 2310969b755aSRandy Dunlap rdev->data_offset), 2311867868fbSNeilBrown bdevname(rdev->bdev, b)); 2312867868fbSNeilBrown } 2313707a6a42SNeilBrown rdev_dec_pending(rdev, mddev); 2314707a6a42SNeilBrown } else 2315707a6a42SNeilBrown rcu_read_unlock(); 2316867868fbSNeilBrown } 2317867868fbSNeilBrown sectors -= s; 2318867868fbSNeilBrown sect += s; 2319867868fbSNeilBrown } 2320867868fbSNeilBrown } 2321867868fbSNeilBrown 23229f2c9d12SNeilBrown static int narrow_write_error(struct r1bio *r1_bio, int i) 2323cd5ff9a1SNeilBrown { 2324fd01b88cSNeilBrown struct mddev *mddev = r1_bio->mddev; 2325e8096360SNeilBrown struct r1conf *conf = mddev->private; 23263cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[i].rdev; 2327cd5ff9a1SNeilBrown 2328cd5ff9a1SNeilBrown /* bio has the data to be written to device 'i' where 2329cd5ff9a1SNeilBrown * we just recently had a write error. 2330cd5ff9a1SNeilBrown * We repeatedly clone the bio and trim down to one block, 2331cd5ff9a1SNeilBrown * then try the write. Where the write fails we record 2332cd5ff9a1SNeilBrown * a bad block. 2333cd5ff9a1SNeilBrown * It is conceivable that the bio doesn't exactly align with 2334cd5ff9a1SNeilBrown * blocks. We must handle this somehow. 2335cd5ff9a1SNeilBrown * 2336cd5ff9a1SNeilBrown * We currently own a reference on the rdev. 2337cd5ff9a1SNeilBrown */ 2338cd5ff9a1SNeilBrown 2339cd5ff9a1SNeilBrown int block_sectors; 2340cd5ff9a1SNeilBrown sector_t sector; 2341cd5ff9a1SNeilBrown int sectors; 2342cd5ff9a1SNeilBrown int sect_to_write = r1_bio->sectors; 2343cd5ff9a1SNeilBrown int ok = 1; 2344cd5ff9a1SNeilBrown 2345cd5ff9a1SNeilBrown if (rdev->badblocks.shift < 0) 2346cd5ff9a1SNeilBrown return 0; 2347cd5ff9a1SNeilBrown 2348ab713cdcSNate Dailey block_sectors = roundup(1 << rdev->badblocks.shift, 2349ab713cdcSNate Dailey bdev_logical_block_size(rdev->bdev) >> 9); 2350cd5ff9a1SNeilBrown sector = r1_bio->sector; 2351cd5ff9a1SNeilBrown sectors = ((sector + block_sectors) 2352cd5ff9a1SNeilBrown & ~(sector_t)(block_sectors - 1)) 2353cd5ff9a1SNeilBrown - sector; 2354cd5ff9a1SNeilBrown 2355cd5ff9a1SNeilBrown while (sect_to_write) { 2356cd5ff9a1SNeilBrown struct bio *wbio; 2357cd5ff9a1SNeilBrown if (sectors > sect_to_write) 2358cd5ff9a1SNeilBrown sectors = sect_to_write; 2359cd5ff9a1SNeilBrown /* Write at 'sector' for 'sectors'*/ 2360cd5ff9a1SNeilBrown 2361b783863fSKent Overstreet if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 2362841c1316SMing Lei wbio = bio_clone_fast(r1_bio->behind_master_bio, 2363841c1316SMing Lei GFP_NOIO, 2364afeee514SKent Overstreet &mddev->bio_set); 2365b783863fSKent Overstreet } else { 2366d7a10308SMing Lei wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, 2367afeee514SKent Overstreet &mddev->bio_set); 2368b783863fSKent Overstreet } 2369b783863fSKent Overstreet 2370796a5cf0SMike Christie bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 23714f024f37SKent Overstreet wbio->bi_iter.bi_sector = r1_bio->sector; 23724f024f37SKent Overstreet wbio->bi_iter.bi_size = r1_bio->sectors << 9; 2373cd5ff9a1SNeilBrown 23746678d83fSKent Overstreet bio_trim(wbio, sector - r1_bio->sector, sectors); 23754f024f37SKent Overstreet wbio->bi_iter.bi_sector += rdev->data_offset; 237674d46992SChristoph Hellwig bio_set_dev(wbio, rdev->bdev); 23774e49ea4aSMike Christie 23784e49ea4aSMike Christie if (submit_bio_wait(wbio) < 0) 2379cd5ff9a1SNeilBrown /* failure! */ 2380cd5ff9a1SNeilBrown ok = rdev_set_badblocks(rdev, sector, 2381cd5ff9a1SNeilBrown sectors, 0) 2382cd5ff9a1SNeilBrown && ok; 2383cd5ff9a1SNeilBrown 2384cd5ff9a1SNeilBrown bio_put(wbio); 2385cd5ff9a1SNeilBrown sect_to_write -= sectors; 2386cd5ff9a1SNeilBrown sector += sectors; 2387cd5ff9a1SNeilBrown sectors = block_sectors; 2388cd5ff9a1SNeilBrown } 2389cd5ff9a1SNeilBrown return ok; 2390cd5ff9a1SNeilBrown } 2391cd5ff9a1SNeilBrown 2392e8096360SNeilBrown static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) 239362096bceSNeilBrown { 239462096bceSNeilBrown int m; 239562096bceSNeilBrown int s = r1_bio->sectors; 23968f19ccb2SNeilBrown for (m = 0; m < conf->raid_disks * 2 ; m++) { 23973cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[m].rdev; 239862096bceSNeilBrown struct bio *bio = r1_bio->bios[m]; 239962096bceSNeilBrown if (bio->bi_end_io == NULL) 240062096bceSNeilBrown continue; 24014e4cbee9SChristoph Hellwig if (!bio->bi_status && 240262096bceSNeilBrown test_bit(R1BIO_MadeGood, &r1_bio->state)) { 2403c6563a8cSNeilBrown rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); 240462096bceSNeilBrown } 24054e4cbee9SChristoph Hellwig if (bio->bi_status && 240662096bceSNeilBrown test_bit(R1BIO_WriteError, &r1_bio->state)) { 240762096bceSNeilBrown if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) 240862096bceSNeilBrown md_error(conf->mddev, rdev); 240962096bceSNeilBrown } 241062096bceSNeilBrown } 241162096bceSNeilBrown put_buf(r1_bio); 241262096bceSNeilBrown md_done_sync(conf->mddev, s, 1); 241362096bceSNeilBrown } 241462096bceSNeilBrown 2415e8096360SNeilBrown static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) 241662096bceSNeilBrown { 2417fd76863eScolyli@suse.de int m, idx; 241855ce74d4SNeilBrown bool fail = false; 2419fd76863eScolyli@suse.de 24208f19ccb2SNeilBrown for (m = 0; m < conf->raid_disks * 2 ; m++) 242162096bceSNeilBrown if (r1_bio->bios[m] == IO_MADE_GOOD) { 24223cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[m].rdev; 242362096bceSNeilBrown rdev_clear_badblocks(rdev, 242462096bceSNeilBrown r1_bio->sector, 2425c6563a8cSNeilBrown r1_bio->sectors, 0); 242662096bceSNeilBrown rdev_dec_pending(rdev, conf->mddev); 242762096bceSNeilBrown } else if (r1_bio->bios[m] != NULL) { 242862096bceSNeilBrown /* This drive got a write error. We need to 242962096bceSNeilBrown * narrow down and record precise write 243062096bceSNeilBrown * errors. 243162096bceSNeilBrown */ 243255ce74d4SNeilBrown fail = true; 243362096bceSNeilBrown if (!narrow_write_error(r1_bio, m)) { 243462096bceSNeilBrown md_error(conf->mddev, 243562096bceSNeilBrown conf->mirrors[m].rdev); 243662096bceSNeilBrown /* an I/O failed, we can't clear the bitmap */ 243762096bceSNeilBrown set_bit(R1BIO_Degraded, &r1_bio->state); 243862096bceSNeilBrown } 243962096bceSNeilBrown rdev_dec_pending(conf->mirrors[m].rdev, 244062096bceSNeilBrown conf->mddev); 244162096bceSNeilBrown } 244255ce74d4SNeilBrown if (fail) { 244355ce74d4SNeilBrown spin_lock_irq(&conf->device_lock); 244455ce74d4SNeilBrown list_add(&r1_bio->retry_list, &conf->bio_end_io_list); 2445fd76863eScolyli@suse.de idx = sector_to_idx(r1_bio->sector); 2446824e47daScolyli@suse.de atomic_inc(&conf->nr_queued[idx]); 244755ce74d4SNeilBrown spin_unlock_irq(&conf->device_lock); 2448824e47daScolyli@suse.de /* 2449824e47daScolyli@suse.de * In case freeze_array() is waiting for condition 2450824e47daScolyli@suse.de * get_unqueued_pending() == extra to be true. 2451824e47daScolyli@suse.de */ 2452824e47daScolyli@suse.de wake_up(&conf->wait_barrier); 245355ce74d4SNeilBrown md_wakeup_thread(conf->mddev->thread); 2454bd8688a1SNeilBrown } else { 2455bd8688a1SNeilBrown if (test_bit(R1BIO_WriteError, &r1_bio->state)) 2456bd8688a1SNeilBrown close_write(r1_bio); 245762096bceSNeilBrown raid_end_bio_io(r1_bio); 245862096bceSNeilBrown } 2459bd8688a1SNeilBrown } 246062096bceSNeilBrown 2461e8096360SNeilBrown static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) 246262096bceSNeilBrown { 2463fd01b88cSNeilBrown struct mddev *mddev = conf->mddev; 246462096bceSNeilBrown struct bio *bio; 24653cb03002SNeilBrown struct md_rdev *rdev; 246662096bceSNeilBrown 246762096bceSNeilBrown clear_bit(R1BIO_ReadError, &r1_bio->state); 246862096bceSNeilBrown /* we got a read error. Maybe the drive is bad. Maybe just 246962096bceSNeilBrown * the block and we can fix it. 247062096bceSNeilBrown * We freeze all other IO, and try reading the block from 247162096bceSNeilBrown * other devices. When we find one, we re-write 247262096bceSNeilBrown * and check it that fixes the read error. 247362096bceSNeilBrown * This is all done synchronously while the array is 247462096bceSNeilBrown * frozen 247562096bceSNeilBrown */ 24767449f699STomasz Majchrzak 24777449f699STomasz Majchrzak bio = r1_bio->bios[r1_bio->read_disk]; 24787449f699STomasz Majchrzak bio_put(bio); 24797449f699STomasz Majchrzak r1_bio->bios[r1_bio->read_disk] = NULL; 24807449f699STomasz Majchrzak 24812e52d449SNeilBrown rdev = conf->mirrors[r1_bio->read_disk].rdev; 24822e52d449SNeilBrown if (mddev->ro == 0 24832e52d449SNeilBrown && !test_bit(FailFast, &rdev->flags)) { 2484e2d59925SNeilBrown freeze_array(conf, 1); 248562096bceSNeilBrown fix_read_error(conf, r1_bio->read_disk, 248662096bceSNeilBrown r1_bio->sector, r1_bio->sectors); 248762096bceSNeilBrown unfreeze_array(conf); 2488b33d1062SGioh Kim } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) { 2489b33d1062SGioh Kim md_error(mddev, rdev); 24907449f699STomasz Majchrzak } else { 24917449f699STomasz Majchrzak r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; 24927449f699STomasz Majchrzak } 24937449f699STomasz Majchrzak 24942e52d449SNeilBrown rdev_dec_pending(rdev, conf->mddev); 2495689389a0SNeilBrown allow_barrier(conf, r1_bio->sector); 2496689389a0SNeilBrown bio = r1_bio->master_bio; 249762096bceSNeilBrown 2498689389a0SNeilBrown /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */ 2499689389a0SNeilBrown r1_bio->state = 0; 2500689389a0SNeilBrown raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); 2501109e3765SNeilBrown } 250262096bceSNeilBrown 25034ed8731dSShaohua Li static void raid1d(struct md_thread *thread) 25041da177e4SLinus Torvalds { 25054ed8731dSShaohua Li struct mddev *mddev = thread->mddev; 25069f2c9d12SNeilBrown struct r1bio *r1_bio; 25071da177e4SLinus Torvalds unsigned long flags; 2508e8096360SNeilBrown struct r1conf *conf = mddev->private; 25091da177e4SLinus Torvalds struct list_head *head = &conf->retry_list; 2510e1dfa0a2SNeilBrown struct blk_plug plug; 2511fd76863eScolyli@suse.de int idx; 25121da177e4SLinus Torvalds 25131da177e4SLinus Torvalds md_check_recovery(mddev); 25141da177e4SLinus Torvalds 251555ce74d4SNeilBrown if (!list_empty_careful(&conf->bio_end_io_list) && 25162953079cSShaohua Li !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 251755ce74d4SNeilBrown LIST_HEAD(tmp); 251855ce74d4SNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 2519fd76863eScolyli@suse.de if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 2520fd76863eScolyli@suse.de list_splice_init(&conf->bio_end_io_list, &tmp); 252155ce74d4SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 252255ce74d4SNeilBrown while (!list_empty(&tmp)) { 2523a452744bSMikulas Patocka r1_bio = list_first_entry(&tmp, struct r1bio, 2524a452744bSMikulas Patocka retry_list); 252555ce74d4SNeilBrown list_del(&r1_bio->retry_list); 2526fd76863eScolyli@suse.de idx = sector_to_idx(r1_bio->sector); 2527824e47daScolyli@suse.de atomic_dec(&conf->nr_queued[idx]); 2528bd8688a1SNeilBrown if (mddev->degraded) 2529bd8688a1SNeilBrown set_bit(R1BIO_Degraded, &r1_bio->state); 2530bd8688a1SNeilBrown if (test_bit(R1BIO_WriteError, &r1_bio->state)) 2531bd8688a1SNeilBrown close_write(r1_bio); 253255ce74d4SNeilBrown raid_end_bio_io(r1_bio); 253355ce74d4SNeilBrown } 253455ce74d4SNeilBrown } 253555ce74d4SNeilBrown 2536e1dfa0a2SNeilBrown blk_start_plug(&plug); 25371da177e4SLinus Torvalds for (;;) { 2538a35e63efSNeilBrown 25397eaceaccSJens Axboe flush_pending_writes(conf); 2540a35e63efSNeilBrown 25411da177e4SLinus Torvalds spin_lock_irqsave(&conf->device_lock, flags); 2542a35e63efSNeilBrown if (list_empty(head)) { 2543191ea9b2SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 25441da177e4SLinus Torvalds break; 2545a35e63efSNeilBrown } 25469f2c9d12SNeilBrown r1_bio = list_entry(head->prev, struct r1bio, retry_list); 25471da177e4SLinus Torvalds list_del(head->prev); 2548fd76863eScolyli@suse.de idx = sector_to_idx(r1_bio->sector); 2549824e47daScolyli@suse.de atomic_dec(&conf->nr_queued[idx]); 25501da177e4SLinus Torvalds spin_unlock_irqrestore(&conf->device_lock, flags); 25511da177e4SLinus Torvalds 25521da177e4SLinus Torvalds mddev = r1_bio->mddev; 2553070ec55dSNeilBrown conf = mddev->private; 25544367af55SNeilBrown if (test_bit(R1BIO_IsSync, &r1_bio->state)) { 2555d8f05d29SNeilBrown if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 255662096bceSNeilBrown test_bit(R1BIO_WriteError, &r1_bio->state)) 255762096bceSNeilBrown handle_sync_write_finished(conf, r1_bio); 255862096bceSNeilBrown else 25591da177e4SLinus Torvalds sync_request_write(mddev, r1_bio); 2560cd5ff9a1SNeilBrown } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 256162096bceSNeilBrown test_bit(R1BIO_WriteError, &r1_bio->state)) 256262096bceSNeilBrown handle_write_finished(conf, r1_bio); 256362096bceSNeilBrown else if (test_bit(R1BIO_ReadError, &r1_bio->state)) 256462096bceSNeilBrown handle_read_error(conf, r1_bio); 2565d2eb35acSNeilBrown else 2566c230e7e5SNeilBrown WARN_ON_ONCE(1); 256762096bceSNeilBrown 25681d9d5241SNeilBrown cond_resched(); 25692953079cSShaohua Li if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) 2570de393cdeSNeilBrown md_check_recovery(mddev); 25711da177e4SLinus Torvalds } 2572e1dfa0a2SNeilBrown blk_finish_plug(&plug); 25731da177e4SLinus Torvalds } 25741da177e4SLinus Torvalds 2575e8096360SNeilBrown static int init_resync(struct r1conf *conf) 25761da177e4SLinus Torvalds { 25771da177e4SLinus Torvalds int buffs; 25781da177e4SLinus Torvalds 25791da177e4SLinus Torvalds buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 2580afeee514SKent Overstreet BUG_ON(mempool_initialized(&conf->r1buf_pool)); 2581afeee514SKent Overstreet 2582afeee514SKent Overstreet return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc, 2583afeee514SKent Overstreet r1buf_pool_free, conf->poolinfo); 25841da177e4SLinus Torvalds } 25851da177e4SLinus Torvalds 2586208410b5SShaohua Li static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) 2587208410b5SShaohua Li { 2588afeee514SKent Overstreet struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO); 2589208410b5SShaohua Li struct resync_pages *rps; 2590208410b5SShaohua Li struct bio *bio; 2591208410b5SShaohua Li int i; 2592208410b5SShaohua Li 2593208410b5SShaohua Li for (i = conf->poolinfo->raid_disks; i--; ) { 2594208410b5SShaohua Li bio = r1bio->bios[i]; 2595208410b5SShaohua Li rps = bio->bi_private; 2596208410b5SShaohua Li bio_reset(bio); 2597208410b5SShaohua Li bio->bi_private = rps; 2598208410b5SShaohua Li } 2599208410b5SShaohua Li r1bio->master_bio = NULL; 2600208410b5SShaohua Li return r1bio; 2601208410b5SShaohua Li } 2602208410b5SShaohua Li 26031da177e4SLinus Torvalds /* 26041da177e4SLinus Torvalds * perform a "sync" on one "block" 26051da177e4SLinus Torvalds * 26061da177e4SLinus Torvalds * We need to make sure that no normal I/O request - particularly write 26071da177e4SLinus Torvalds * requests - conflict with active sync requests. 26081da177e4SLinus Torvalds * 26091da177e4SLinus Torvalds * This is achieved by tracking pending requests and a 'barrier' concept 26101da177e4SLinus Torvalds * that can be installed to exclude normal IO requests. 26111da177e4SLinus Torvalds */ 26121da177e4SLinus Torvalds 2613849674e4SShaohua Li static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, 2614849674e4SShaohua Li int *skipped) 26151da177e4SLinus Torvalds { 2616e8096360SNeilBrown struct r1conf *conf = mddev->private; 26179f2c9d12SNeilBrown struct r1bio *r1_bio; 26181da177e4SLinus Torvalds struct bio *bio; 26191da177e4SLinus Torvalds sector_t max_sector, nr_sectors; 26203e198f78SNeilBrown int disk = -1; 26211da177e4SLinus Torvalds int i; 26223e198f78SNeilBrown int wonly = -1; 26233e198f78SNeilBrown int write_targets = 0, read_targets = 0; 262457dab0bdSNeilBrown sector_t sync_blocks; 2625e3b9703eSNeilBrown int still_degraded = 0; 262606f60385SNeilBrown int good_sectors = RESYNC_SECTORS; 262706f60385SNeilBrown int min_bad = 0; /* number of sectors that are bad in all devices */ 2628fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 2629022e510fSMing Lei int page_idx = 0; 26301da177e4SLinus Torvalds 2631afeee514SKent Overstreet if (!mempool_initialized(&conf->r1buf_pool)) 26321da177e4SLinus Torvalds if (init_resync(conf)) 263357afd89fSNeilBrown return 0; 26341da177e4SLinus Torvalds 263558c0fed4SAndre Noll max_sector = mddev->dev_sectors; 26361da177e4SLinus Torvalds if (sector_nr >= max_sector) { 2637191ea9b2SNeilBrown /* If we aborted, we need to abort the 2638191ea9b2SNeilBrown * sync on the 'current' bitmap chunk (there will 2639191ea9b2SNeilBrown * only be one in raid1 resync. 2640191ea9b2SNeilBrown * We can find the current addess in mddev->curr_resync 2641191ea9b2SNeilBrown */ 26426a806c51SNeilBrown if (mddev->curr_resync < max_sector) /* aborted */ 2643e64e4018SAndy Shevchenko md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2644191ea9b2SNeilBrown &sync_blocks, 1); 26456a806c51SNeilBrown else /* completed sync */ 2646191ea9b2SNeilBrown conf->fullsync = 0; 26476a806c51SNeilBrown 2648e64e4018SAndy Shevchenko md_bitmap_close_sync(mddev->bitmap); 26491da177e4SLinus Torvalds close_sync(conf); 2650c40f341fSGoldwyn Rodrigues 2651c40f341fSGoldwyn Rodrigues if (mddev_is_clustered(mddev)) { 2652c40f341fSGoldwyn Rodrigues conf->cluster_sync_low = 0; 2653c40f341fSGoldwyn Rodrigues conf->cluster_sync_high = 0; 2654c40f341fSGoldwyn Rodrigues } 26551da177e4SLinus Torvalds return 0; 26561da177e4SLinus Torvalds } 26571da177e4SLinus Torvalds 265807d84d10SNeilBrown if (mddev->bitmap == NULL && 265907d84d10SNeilBrown mddev->recovery_cp == MaxSector && 26606394cca5SNeilBrown !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 266107d84d10SNeilBrown conf->fullsync == 0) { 266207d84d10SNeilBrown *skipped = 1; 266307d84d10SNeilBrown return max_sector - sector_nr; 266407d84d10SNeilBrown } 26656394cca5SNeilBrown /* before building a request, check if we can skip these blocks.. 26666394cca5SNeilBrown * This call the bitmap_start_sync doesn't actually record anything 26676394cca5SNeilBrown */ 2668e64e4018SAndy Shevchenko if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 2669e5de485fSNeilBrown !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 2670191ea9b2SNeilBrown /* We can skip this block, and probably several more */ 2671191ea9b2SNeilBrown *skipped = 1; 2672191ea9b2SNeilBrown return sync_blocks; 2673191ea9b2SNeilBrown } 267417999be4SNeilBrown 26757ac50447STomasz Majchrzak /* 26767ac50447STomasz Majchrzak * If there is non-resync activity waiting for a turn, then let it 26777ac50447STomasz Majchrzak * though before starting on this new sync request. 26787ac50447STomasz Majchrzak */ 2679824e47daScolyli@suse.de if (atomic_read(&conf->nr_waiting[idx])) 26807ac50447STomasz Majchrzak schedule_timeout_uninterruptible(1); 26817ac50447STomasz Majchrzak 2682c40f341fSGoldwyn Rodrigues /* we are incrementing sector_nr below. To be safe, we check against 2683c40f341fSGoldwyn Rodrigues * sector_nr + two times RESYNC_SECTORS 2684c40f341fSGoldwyn Rodrigues */ 2685c40f341fSGoldwyn Rodrigues 2686e64e4018SAndy Shevchenko md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, 2687c40f341fSGoldwyn Rodrigues mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); 268817999be4SNeilBrown 26898c242593SYufen Yu 26908c242593SYufen Yu if (raise_barrier(conf, sector_nr)) 26918c242593SYufen Yu return 0; 26928c242593SYufen Yu 26938c242593SYufen Yu r1_bio = raid1_alloc_init_r1buf(conf); 26941da177e4SLinus Torvalds 26953e198f78SNeilBrown rcu_read_lock(); 26963e198f78SNeilBrown /* 26973e198f78SNeilBrown * If we get a correctably read error during resync or recovery, 26983e198f78SNeilBrown * we might want to read from a different device. So we 26993e198f78SNeilBrown * flag all drives that could conceivably be read from for READ, 27003e198f78SNeilBrown * and any others (which will be non-In_sync devices) for WRITE. 27013e198f78SNeilBrown * If a read fails, we try reading from something else for which READ 27023e198f78SNeilBrown * is OK. 27033e198f78SNeilBrown */ 27041da177e4SLinus Torvalds 27051da177e4SLinus Torvalds r1_bio->mddev = mddev; 27061da177e4SLinus Torvalds r1_bio->sector = sector_nr; 2707191ea9b2SNeilBrown r1_bio->state = 0; 27081da177e4SLinus Torvalds set_bit(R1BIO_IsSync, &r1_bio->state); 2709fd76863eScolyli@suse.de /* make sure good_sectors won't go across barrier unit boundary */ 2710fd76863eScolyli@suse.de good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors); 27111da177e4SLinus Torvalds 27128f19ccb2SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 27133cb03002SNeilBrown struct md_rdev *rdev; 27141da177e4SLinus Torvalds bio = r1_bio->bios[i]; 27151da177e4SLinus Torvalds 27163e198f78SNeilBrown rdev = rcu_dereference(conf->mirrors[i].rdev); 27173e198f78SNeilBrown if (rdev == NULL || 27183e198f78SNeilBrown test_bit(Faulty, &rdev->flags)) { 27198f19ccb2SNeilBrown if (i < conf->raid_disks) 2720e3b9703eSNeilBrown still_degraded = 1; 27213e198f78SNeilBrown } else if (!test_bit(In_sync, &rdev->flags)) { 2722796a5cf0SMike Christie bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 27231da177e4SLinus Torvalds bio->bi_end_io = end_sync_write; 27241da177e4SLinus Torvalds write_targets ++; 27253e198f78SNeilBrown } else { 27263e198f78SNeilBrown /* may need to read from here */ 272706f60385SNeilBrown sector_t first_bad = MaxSector; 272806f60385SNeilBrown int bad_sectors; 272906f60385SNeilBrown 273006f60385SNeilBrown if (is_badblock(rdev, sector_nr, good_sectors, 273106f60385SNeilBrown &first_bad, &bad_sectors)) { 273206f60385SNeilBrown if (first_bad > sector_nr) 273306f60385SNeilBrown good_sectors = first_bad - sector_nr; 273406f60385SNeilBrown else { 273506f60385SNeilBrown bad_sectors -= (sector_nr - first_bad); 273606f60385SNeilBrown if (min_bad == 0 || 273706f60385SNeilBrown min_bad > bad_sectors) 273806f60385SNeilBrown min_bad = bad_sectors; 273906f60385SNeilBrown } 274006f60385SNeilBrown } 274106f60385SNeilBrown if (sector_nr < first_bad) { 27423e198f78SNeilBrown if (test_bit(WriteMostly, &rdev->flags)) { 27433e198f78SNeilBrown if (wonly < 0) 27443e198f78SNeilBrown wonly = i; 27453e198f78SNeilBrown } else { 27463e198f78SNeilBrown if (disk < 0) 27473e198f78SNeilBrown disk = i; 27483e198f78SNeilBrown } 2749796a5cf0SMike Christie bio_set_op_attrs(bio, REQ_OP_READ, 0); 275006f60385SNeilBrown bio->bi_end_io = end_sync_read; 27513e198f78SNeilBrown read_targets++; 2752d57368afSAlexander Lyakas } else if (!test_bit(WriteErrorSeen, &rdev->flags) && 2753d57368afSAlexander Lyakas test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 2754d57368afSAlexander Lyakas !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 2755d57368afSAlexander Lyakas /* 2756d57368afSAlexander Lyakas * The device is suitable for reading (InSync), 2757d57368afSAlexander Lyakas * but has bad block(s) here. Let's try to correct them, 2758d57368afSAlexander Lyakas * if we are doing resync or repair. Otherwise, leave 2759d57368afSAlexander Lyakas * this device alone for this sync request. 2760d57368afSAlexander Lyakas */ 2761796a5cf0SMike Christie bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 2762d57368afSAlexander Lyakas bio->bi_end_io = end_sync_write; 2763d57368afSAlexander Lyakas write_targets++; 27643e198f78SNeilBrown } 276506f60385SNeilBrown } 2766028288dfSZhiqiang Liu if (rdev && bio->bi_end_io) { 27673e198f78SNeilBrown atomic_inc(&rdev->nr_pending); 27684f024f37SKent Overstreet bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; 276974d46992SChristoph Hellwig bio_set_dev(bio, rdev->bdev); 27702e52d449SNeilBrown if (test_bit(FailFast, &rdev->flags)) 27712e52d449SNeilBrown bio->bi_opf |= MD_FAILFAST; 27721da177e4SLinus Torvalds } 277306f60385SNeilBrown } 27743e198f78SNeilBrown rcu_read_unlock(); 27753e198f78SNeilBrown if (disk < 0) 27763e198f78SNeilBrown disk = wonly; 27773e198f78SNeilBrown r1_bio->read_disk = disk; 2778191ea9b2SNeilBrown 277906f60385SNeilBrown if (read_targets == 0 && min_bad > 0) { 278006f60385SNeilBrown /* These sectors are bad on all InSync devices, so we 278106f60385SNeilBrown * need to mark them bad on all write targets 278206f60385SNeilBrown */ 278306f60385SNeilBrown int ok = 1; 27848f19ccb2SNeilBrown for (i = 0 ; i < conf->raid_disks * 2 ; i++) 278506f60385SNeilBrown if (r1_bio->bios[i]->bi_end_io == end_sync_write) { 2786a42f9d83Smajianpeng struct md_rdev *rdev = conf->mirrors[i].rdev; 278706f60385SNeilBrown ok = rdev_set_badblocks(rdev, sector_nr, 278806f60385SNeilBrown min_bad, 0 278906f60385SNeilBrown ) && ok; 279006f60385SNeilBrown } 27912953079cSShaohua Li set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 279206f60385SNeilBrown *skipped = 1; 279306f60385SNeilBrown put_buf(r1_bio); 279406f60385SNeilBrown 279506f60385SNeilBrown if (!ok) { 279606f60385SNeilBrown /* Cannot record the badblocks, so need to 279706f60385SNeilBrown * abort the resync. 279806f60385SNeilBrown * If there are multiple read targets, could just 279906f60385SNeilBrown * fail the really bad ones ??? 280006f60385SNeilBrown */ 280106f60385SNeilBrown conf->recovery_disabled = mddev->recovery_disabled; 280206f60385SNeilBrown set_bit(MD_RECOVERY_INTR, &mddev->recovery); 280306f60385SNeilBrown return 0; 280406f60385SNeilBrown } else 280506f60385SNeilBrown return min_bad; 280606f60385SNeilBrown 280706f60385SNeilBrown } 280806f60385SNeilBrown if (min_bad > 0 && min_bad < good_sectors) { 280906f60385SNeilBrown /* only resync enough to reach the next bad->good 281006f60385SNeilBrown * transition */ 281106f60385SNeilBrown good_sectors = min_bad; 281206f60385SNeilBrown } 281306f60385SNeilBrown 28143e198f78SNeilBrown if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) 28153e198f78SNeilBrown /* extra read targets are also write targets */ 28163e198f78SNeilBrown write_targets += read_targets-1; 28173e198f78SNeilBrown 28183e198f78SNeilBrown if (write_targets == 0 || read_targets == 0) { 28191da177e4SLinus Torvalds /* There is nowhere to write, so all non-sync 28201da177e4SLinus Torvalds * drives must be failed - so we are finished 28211da177e4SLinus Torvalds */ 2822b7219ccbSNeilBrown sector_t rv; 2823b7219ccbSNeilBrown if (min_bad > 0) 2824b7219ccbSNeilBrown max_sector = sector_nr + min_bad; 2825b7219ccbSNeilBrown rv = max_sector - sector_nr; 282657afd89fSNeilBrown *skipped = 1; 28271da177e4SLinus Torvalds put_buf(r1_bio); 28281da177e4SLinus Torvalds return rv; 28291da177e4SLinus Torvalds } 28301da177e4SLinus Torvalds 2831c6207277SNeilBrown if (max_sector > mddev->resync_max) 2832c6207277SNeilBrown max_sector = mddev->resync_max; /* Don't do IO beyond here */ 283306f60385SNeilBrown if (max_sector > sector_nr + good_sectors) 283406f60385SNeilBrown max_sector = sector_nr + good_sectors; 28351da177e4SLinus Torvalds nr_sectors = 0; 2836289e99e8SNeilBrown sync_blocks = 0; 28371da177e4SLinus Torvalds do { 28381da177e4SLinus Torvalds struct page *page; 28391da177e4SLinus Torvalds int len = PAGE_SIZE; 28401da177e4SLinus Torvalds if (sector_nr + (len>>9) > max_sector) 28411da177e4SLinus Torvalds len = (max_sector - sector_nr) << 9; 28421da177e4SLinus Torvalds if (len == 0) 28431da177e4SLinus Torvalds break; 2844ab7a30c7SNeilBrown if (sync_blocks == 0) { 2845e64e4018SAndy Shevchenko if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, 2846e3b9703eSNeilBrown &sync_blocks, still_degraded) && 2847e5de485fSNeilBrown !conf->fullsync && 2848e5de485fSNeilBrown !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2849191ea9b2SNeilBrown break; 28507571ae88SNeilBrown if ((len >> 9) > sync_blocks) 28516a806c51SNeilBrown len = sync_blocks<<9; 2852ab7a30c7SNeilBrown } 2853191ea9b2SNeilBrown 28548f19ccb2SNeilBrown for (i = 0 ; i < conf->raid_disks * 2; i++) { 285598d30c58SMing Lei struct resync_pages *rp; 285698d30c58SMing Lei 28571da177e4SLinus Torvalds bio = r1_bio->bios[i]; 285898d30c58SMing Lei rp = get_resync_pages(bio); 28591da177e4SLinus Torvalds if (bio->bi_end_io) { 2860022e510fSMing Lei page = resync_fetch_page(rp, page_idx); 2861c85ba149SMing Lei 2862c85ba149SMing Lei /* 2863c85ba149SMing Lei * won't fail because the vec table is big 2864c85ba149SMing Lei * enough to hold all these pages 2865c85ba149SMing Lei */ 2866c85ba149SMing Lei bio_add_page(bio, page, len, 0); 28671da177e4SLinus Torvalds } 28681da177e4SLinus Torvalds } 28691da177e4SLinus Torvalds nr_sectors += len>>9; 28701da177e4SLinus Torvalds sector_nr += len>>9; 2871191ea9b2SNeilBrown sync_blocks -= (len>>9); 2872022e510fSMing Lei } while (++page_idx < RESYNC_PAGES); 287398d30c58SMing Lei 28741da177e4SLinus Torvalds r1_bio->sectors = nr_sectors; 28751da177e4SLinus Torvalds 2876c40f341fSGoldwyn Rodrigues if (mddev_is_clustered(mddev) && 2877c40f341fSGoldwyn Rodrigues conf->cluster_sync_high < sector_nr + nr_sectors) { 2878c40f341fSGoldwyn Rodrigues conf->cluster_sync_low = mddev->curr_resync_completed; 2879c40f341fSGoldwyn Rodrigues conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS; 2880c40f341fSGoldwyn Rodrigues /* Send resync message */ 2881c40f341fSGoldwyn Rodrigues md_cluster_ops->resync_info_update(mddev, 2882c40f341fSGoldwyn Rodrigues conf->cluster_sync_low, 2883c40f341fSGoldwyn Rodrigues conf->cluster_sync_high); 2884c40f341fSGoldwyn Rodrigues } 2885c40f341fSGoldwyn Rodrigues 2886d11c171eSNeilBrown /* For a user-requested sync, we read all readable devices and do a 2887d11c171eSNeilBrown * compare 2888d11c171eSNeilBrown */ 2889d11c171eSNeilBrown if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 2890d11c171eSNeilBrown atomic_set(&r1_bio->remaining, read_targets); 28912d4f4f33SNeilBrown for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) { 2892d11c171eSNeilBrown bio = r1_bio->bios[i]; 2893d11c171eSNeilBrown if (bio->bi_end_io == end_sync_read) { 28942d4f4f33SNeilBrown read_targets--; 289574d46992SChristoph Hellwig md_sync_acct_bio(bio, nr_sectors); 28962e52d449SNeilBrown if (read_targets == 1) 28972e52d449SNeilBrown bio->bi_opf &= ~MD_FAILFAST; 2898ed00aabdSChristoph Hellwig submit_bio_noacct(bio); 2899d11c171eSNeilBrown } 2900d11c171eSNeilBrown } 2901d11c171eSNeilBrown } else { 2902d11c171eSNeilBrown atomic_set(&r1_bio->remaining, 1); 2903d11c171eSNeilBrown bio = r1_bio->bios[r1_bio->read_disk]; 290474d46992SChristoph Hellwig md_sync_acct_bio(bio, nr_sectors); 29052e52d449SNeilBrown if (read_targets == 1) 29062e52d449SNeilBrown bio->bi_opf &= ~MD_FAILFAST; 2907ed00aabdSChristoph Hellwig submit_bio_noacct(bio); 2908d11c171eSNeilBrown } 29091da177e4SLinus Torvalds return nr_sectors; 29101da177e4SLinus Torvalds } 29111da177e4SLinus Torvalds 2912fd01b88cSNeilBrown static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) 291380c3a6ceSDan Williams { 291480c3a6ceSDan Williams if (sectors) 291580c3a6ceSDan Williams return sectors; 291680c3a6ceSDan Williams 291780c3a6ceSDan Williams return mddev->dev_sectors; 291880c3a6ceSDan Williams } 291980c3a6ceSDan Williams 2920e8096360SNeilBrown static struct r1conf *setup_conf(struct mddev *mddev) 29211da177e4SLinus Torvalds { 2922e8096360SNeilBrown struct r1conf *conf; 2923709ae487SNeilBrown int i; 29240eaf822cSJonathan Brassow struct raid1_info *disk; 29253cb03002SNeilBrown struct md_rdev *rdev; 2926709ae487SNeilBrown int err = -ENOMEM; 29271da177e4SLinus Torvalds 2928e8096360SNeilBrown conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL); 29291da177e4SLinus Torvalds if (!conf) 2930709ae487SNeilBrown goto abort; 29311da177e4SLinus Torvalds 2932fd76863eScolyli@suse.de conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR, 2933824e47daScolyli@suse.de sizeof(atomic_t), GFP_KERNEL); 2934fd76863eScolyli@suse.de if (!conf->nr_pending) 2935fd76863eScolyli@suse.de goto abort; 2936fd76863eScolyli@suse.de 2937fd76863eScolyli@suse.de conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR, 2938824e47daScolyli@suse.de sizeof(atomic_t), GFP_KERNEL); 2939fd76863eScolyli@suse.de if (!conf->nr_waiting) 2940fd76863eScolyli@suse.de goto abort; 2941fd76863eScolyli@suse.de 2942fd76863eScolyli@suse.de conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR, 2943824e47daScolyli@suse.de sizeof(atomic_t), GFP_KERNEL); 2944fd76863eScolyli@suse.de if (!conf->nr_queued) 2945fd76863eScolyli@suse.de goto abort; 2946fd76863eScolyli@suse.de 2947fd76863eScolyli@suse.de conf->barrier = kcalloc(BARRIER_BUCKETS_NR, 2948824e47daScolyli@suse.de sizeof(atomic_t), GFP_KERNEL); 2949fd76863eScolyli@suse.de if (!conf->barrier) 2950fd76863eScolyli@suse.de goto abort; 2951fd76863eScolyli@suse.de 29526396bb22SKees Cook conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info), 29536396bb22SKees Cook mddev->raid_disks, 2), 29541da177e4SLinus Torvalds GFP_KERNEL); 29551da177e4SLinus Torvalds if (!conf->mirrors) 2956709ae487SNeilBrown goto abort; 29571da177e4SLinus Torvalds 2958ddaf22abSNeilBrown conf->tmppage = alloc_page(GFP_KERNEL); 2959ddaf22abSNeilBrown if (!conf->tmppage) 2960709ae487SNeilBrown goto abort; 2961ddaf22abSNeilBrown 2962709ae487SNeilBrown conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); 29631da177e4SLinus Torvalds if (!conf->poolinfo) 2964709ae487SNeilBrown goto abort; 29658f19ccb2SNeilBrown conf->poolinfo->raid_disks = mddev->raid_disks * 2; 29663f677f9cSMarcos Paulo de Souza err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc, 2967c7afa803SMarcos Paulo de Souza rbio_pool_free, conf->poolinfo); 2968afeee514SKent Overstreet if (err) 2969709ae487SNeilBrown goto abort; 2970709ae487SNeilBrown 2971afeee514SKent Overstreet err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); 2972afeee514SKent Overstreet if (err) 2973c230e7e5SNeilBrown goto abort; 2974c230e7e5SNeilBrown 2975ed9bfdf1SNeilBrown conf->poolinfo->mddev = mddev; 29761da177e4SLinus Torvalds 2977c19d5798SNeilBrown err = -EINVAL; 2978e7e72bf6SNeil Brown spin_lock_init(&conf->device_lock); 2979dafb20faSNeilBrown rdev_for_each(rdev, mddev) { 2980709ae487SNeilBrown int disk_idx = rdev->raid_disk; 29811da177e4SLinus Torvalds if (disk_idx >= mddev->raid_disks 29821da177e4SLinus Torvalds || disk_idx < 0) 29831da177e4SLinus Torvalds continue; 2984c19d5798SNeilBrown if (test_bit(Replacement, &rdev->flags)) 298502b898f2SNeilBrown disk = conf->mirrors + mddev->raid_disks + disk_idx; 2986c19d5798SNeilBrown else 29871da177e4SLinus Torvalds disk = conf->mirrors + disk_idx; 29881da177e4SLinus Torvalds 2989c19d5798SNeilBrown if (disk->rdev) 2990c19d5798SNeilBrown goto abort; 29911da177e4SLinus Torvalds disk->rdev = rdev; 29921da177e4SLinus Torvalds disk->head_position = 0; 299312cee5a8SShaohua Li disk->seq_start = MaxSector; 29941da177e4SLinus Torvalds } 29951da177e4SLinus Torvalds conf->raid_disks = mddev->raid_disks; 29961da177e4SLinus Torvalds conf->mddev = mddev; 29971da177e4SLinus Torvalds INIT_LIST_HEAD(&conf->retry_list); 299855ce74d4SNeilBrown INIT_LIST_HEAD(&conf->bio_end_io_list); 29991da177e4SLinus Torvalds 30001da177e4SLinus Torvalds spin_lock_init(&conf->resync_lock); 300117999be4SNeilBrown init_waitqueue_head(&conf->wait_barrier); 30021da177e4SLinus Torvalds 3003191ea9b2SNeilBrown bio_list_init(&conf->pending_bio_list); 300434db0cd6SNeilBrown conf->pending_count = 0; 3005d890fa2bSNeilBrown conf->recovery_disabled = mddev->recovery_disabled - 1; 3006191ea9b2SNeilBrown 3007c19d5798SNeilBrown err = -EIO; 30088f19ccb2SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 30091da177e4SLinus Torvalds 30101da177e4SLinus Torvalds disk = conf->mirrors + i; 30111da177e4SLinus Torvalds 3012c19d5798SNeilBrown if (i < conf->raid_disks && 3013c19d5798SNeilBrown disk[conf->raid_disks].rdev) { 3014c19d5798SNeilBrown /* This slot has a replacement. */ 3015c19d5798SNeilBrown if (!disk->rdev) { 3016c19d5798SNeilBrown /* No original, just make the replacement 3017c19d5798SNeilBrown * a recovering spare 3018c19d5798SNeilBrown */ 3019c19d5798SNeilBrown disk->rdev = 3020c19d5798SNeilBrown disk[conf->raid_disks].rdev; 3021c19d5798SNeilBrown disk[conf->raid_disks].rdev = NULL; 3022c19d5798SNeilBrown } else if (!test_bit(In_sync, &disk->rdev->flags)) 3023c19d5798SNeilBrown /* Original is not in_sync - bad */ 3024c19d5798SNeilBrown goto abort; 3025c19d5798SNeilBrown } 3026c19d5798SNeilBrown 30275fd6c1dcSNeilBrown if (!disk->rdev || 30285fd6c1dcSNeilBrown !test_bit(In_sync, &disk->rdev->flags)) { 30291da177e4SLinus Torvalds disk->head_position = 0; 30304f0a5e01SJonathan Brassow if (disk->rdev && 30314f0a5e01SJonathan Brassow (disk->rdev->saved_raid_disk < 0)) 303217571284SNeilBrown conf->fullsync = 1; 3033be4d3280SShaohua Li } 30341da177e4SLinus Torvalds } 3035709ae487SNeilBrown 3036709ae487SNeilBrown err = -ENOMEM; 30370232605dSNeilBrown conf->thread = md_register_thread(raid1d, mddev, "raid1"); 30381d41c216SNeilBrown if (!conf->thread) 3039709ae487SNeilBrown goto abort; 3040191ea9b2SNeilBrown 3041709ae487SNeilBrown return conf; 3042709ae487SNeilBrown 3043709ae487SNeilBrown abort: 3044709ae487SNeilBrown if (conf) { 3045afeee514SKent Overstreet mempool_exit(&conf->r1bio_pool); 3046709ae487SNeilBrown kfree(conf->mirrors); 3047709ae487SNeilBrown safe_put_page(conf->tmppage); 3048709ae487SNeilBrown kfree(conf->poolinfo); 3049fd76863eScolyli@suse.de kfree(conf->nr_pending); 3050fd76863eScolyli@suse.de kfree(conf->nr_waiting); 3051fd76863eScolyli@suse.de kfree(conf->nr_queued); 3052fd76863eScolyli@suse.de kfree(conf->barrier); 3053afeee514SKent Overstreet bioset_exit(&conf->bio_split); 3054709ae487SNeilBrown kfree(conf); 3055709ae487SNeilBrown } 3056709ae487SNeilBrown return ERR_PTR(err); 3057709ae487SNeilBrown } 3058709ae487SNeilBrown 3059afa0f557SNeilBrown static void raid1_free(struct mddev *mddev, void *priv); 3060849674e4SShaohua Li static int raid1_run(struct mddev *mddev) 3061709ae487SNeilBrown { 3062e8096360SNeilBrown struct r1conf *conf; 3063709ae487SNeilBrown int i; 30643cb03002SNeilBrown struct md_rdev *rdev; 30655220ea1eSmajianpeng int ret; 30662ff8cc2cSShaohua Li bool discard_supported = false; 3067709ae487SNeilBrown 3068709ae487SNeilBrown if (mddev->level != 1) { 30691d41c216SNeilBrown pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n", 3070709ae487SNeilBrown mdname(mddev), mddev->level); 3071709ae487SNeilBrown return -EIO; 3072709ae487SNeilBrown } 3073709ae487SNeilBrown if (mddev->reshape_position != MaxSector) { 30741d41c216SNeilBrown pr_warn("md/raid1:%s: reshape_position set but not supported\n", 3075709ae487SNeilBrown mdname(mddev)); 3076709ae487SNeilBrown return -EIO; 3077709ae487SNeilBrown } 3078a415c0f1SNeilBrown if (mddev_init_writes_pending(mddev) < 0) 3079a415c0f1SNeilBrown return -ENOMEM; 3080709ae487SNeilBrown /* 3081709ae487SNeilBrown * copy the already verified devices into our private RAID1 3082709ae487SNeilBrown * bookkeeping area. [whatever we allocate in run(), 3083afa0f557SNeilBrown * should be freed in raid1_free()] 3084709ae487SNeilBrown */ 3085709ae487SNeilBrown if (mddev->private == NULL) 3086709ae487SNeilBrown conf = setup_conf(mddev); 3087709ae487SNeilBrown else 3088709ae487SNeilBrown conf = mddev->private; 3089709ae487SNeilBrown 3090709ae487SNeilBrown if (IS_ERR(conf)) 3091709ae487SNeilBrown return PTR_ERR(conf); 3092709ae487SNeilBrown 30933deff1a7SChristoph Hellwig if (mddev->queue) { 30945026d7a9SH. Peter Anvin blk_queue_max_write_same_sectors(mddev->queue, 0); 30953deff1a7SChristoph Hellwig blk_queue_max_write_zeroes_sectors(mddev->queue, 0); 30963deff1a7SChristoph Hellwig } 30975026d7a9SH. Peter Anvin 3098dafb20faSNeilBrown rdev_for_each(rdev, mddev) { 30991ed7242eSJonathan Brassow if (!mddev->gendisk) 31001ed7242eSJonathan Brassow continue; 3101709ae487SNeilBrown disk_stack_limits(mddev->gendisk, rdev->bdev, 3102709ae487SNeilBrown rdev->data_offset << 9); 31032ff8cc2cSShaohua Li if (blk_queue_discard(bdev_get_queue(rdev->bdev))) 31042ff8cc2cSShaohua Li discard_supported = true; 3105709ae487SNeilBrown } 3106709ae487SNeilBrown 3107709ae487SNeilBrown mddev->degraded = 0; 3108709ae487SNeilBrown for (i = 0; i < conf->raid_disks; i++) 3109709ae487SNeilBrown if (conf->mirrors[i].rdev == NULL || 3110709ae487SNeilBrown !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || 3111709ae487SNeilBrown test_bit(Faulty, &conf->mirrors[i].rdev->flags)) 3112709ae487SNeilBrown mddev->degraded++; 311307f1a685SYufen Yu /* 311407f1a685SYufen Yu * RAID1 needs at least one disk in active 311507f1a685SYufen Yu */ 311607f1a685SYufen Yu if (conf->raid_disks - mddev->degraded < 1) { 311707f1a685SYufen Yu ret = -EINVAL; 311807f1a685SYufen Yu goto abort; 311907f1a685SYufen Yu } 3120709ae487SNeilBrown 3121709ae487SNeilBrown if (conf->raid_disks - mddev->degraded == 1) 3122709ae487SNeilBrown mddev->recovery_cp = MaxSector; 3123709ae487SNeilBrown 31248c6ac868SAndre Noll if (mddev->recovery_cp != MaxSector) 31251d41c216SNeilBrown pr_info("md/raid1:%s: not clean -- starting background reconstruction\n", 31268c6ac868SAndre Noll mdname(mddev)); 31271d41c216SNeilBrown pr_info("md/raid1:%s: active with %d out of %d mirrors\n", 31281da177e4SLinus Torvalds mdname(mddev), mddev->raid_disks - mddev->degraded, 31291da177e4SLinus Torvalds mddev->raid_disks); 3130709ae487SNeilBrown 31311da177e4SLinus Torvalds /* 31321da177e4SLinus Torvalds * Ok, everything is just fine now 31331da177e4SLinus Torvalds */ 3134709ae487SNeilBrown mddev->thread = conf->thread; 3135709ae487SNeilBrown conf->thread = NULL; 3136709ae487SNeilBrown mddev->private = conf; 313746533ff7SNeilBrown set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); 3138709ae487SNeilBrown 31391f403624SDan Williams md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 31401da177e4SLinus Torvalds 31411ed7242eSJonathan Brassow if (mddev->queue) { 31422ff8cc2cSShaohua Li if (discard_supported) 31438b904b5bSBart Van Assche blk_queue_flag_set(QUEUE_FLAG_DISCARD, 31442ff8cc2cSShaohua Li mddev->queue); 31452ff8cc2cSShaohua Li else 31468b904b5bSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_DISCARD, 31472ff8cc2cSShaohua Li mddev->queue); 31481ed7242eSJonathan Brassow } 31495220ea1eSmajianpeng 31505220ea1eSmajianpeng ret = md_integrity_register(mddev); 31515aa61f42SNeilBrown if (ret) { 31525aa61f42SNeilBrown md_unregister_thread(&mddev->thread); 315307f1a685SYufen Yu goto abort; 31545aa61f42SNeilBrown } 315507f1a685SYufen Yu return 0; 315607f1a685SYufen Yu 315707f1a685SYufen Yu abort: 315807f1a685SYufen Yu raid1_free(mddev, conf); 31595220ea1eSmajianpeng return ret; 31601da177e4SLinus Torvalds } 31611da177e4SLinus Torvalds 3162afa0f557SNeilBrown static void raid1_free(struct mddev *mddev, void *priv) 31631da177e4SLinus Torvalds { 3164afa0f557SNeilBrown struct r1conf *conf = priv; 31654b6d287fSNeilBrown 3166afeee514SKent Overstreet mempool_exit(&conf->r1bio_pool); 31671da177e4SLinus Torvalds kfree(conf->mirrors); 31680fea7ed8SHirokazu Takahashi safe_put_page(conf->tmppage); 31691da177e4SLinus Torvalds kfree(conf->poolinfo); 3170fd76863eScolyli@suse.de kfree(conf->nr_pending); 3171fd76863eScolyli@suse.de kfree(conf->nr_waiting); 3172fd76863eScolyli@suse.de kfree(conf->nr_queued); 3173fd76863eScolyli@suse.de kfree(conf->barrier); 3174afeee514SKent Overstreet bioset_exit(&conf->bio_split); 31751da177e4SLinus Torvalds kfree(conf); 31761da177e4SLinus Torvalds } 31771da177e4SLinus Torvalds 3178fd01b88cSNeilBrown static int raid1_resize(struct mddev *mddev, sector_t sectors) 31791da177e4SLinus Torvalds { 31801da177e4SLinus Torvalds /* no resync is happening, and there is enough space 31811da177e4SLinus Torvalds * on all devices, so we can resize. 31821da177e4SLinus Torvalds * We need to make sure resync covers any new space. 31831da177e4SLinus Torvalds * If the array is shrinking we should possibly wait until 31841da177e4SLinus Torvalds * any io in the removed space completes, but it hardly seems 31851da177e4SLinus Torvalds * worth it. 31861da177e4SLinus Torvalds */ 3187a4a6125aSNeilBrown sector_t newsize = raid1_size(mddev, sectors, 0); 3188a4a6125aSNeilBrown if (mddev->external_size && 3189a4a6125aSNeilBrown mddev->array_sectors > newsize) 3190b522adcdSDan Williams return -EINVAL; 3191a4a6125aSNeilBrown if (mddev->bitmap) { 3192e64e4018SAndy Shevchenko int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); 3193a4a6125aSNeilBrown if (ret) 3194a4a6125aSNeilBrown return ret; 3195a4a6125aSNeilBrown } 3196a4a6125aSNeilBrown md_set_array_sectors(mddev, newsize); 3197b522adcdSDan Williams if (sectors > mddev->dev_sectors && 3198b098636cSNeilBrown mddev->recovery_cp > mddev->dev_sectors) { 319958c0fed4SAndre Noll mddev->recovery_cp = mddev->dev_sectors; 32001da177e4SLinus Torvalds set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 32011da177e4SLinus Torvalds } 3202b522adcdSDan Williams mddev->dev_sectors = sectors; 32034b5c7ae8SNeilBrown mddev->resync_max_sectors = sectors; 32041da177e4SLinus Torvalds return 0; 32051da177e4SLinus Torvalds } 32061da177e4SLinus Torvalds 3207fd01b88cSNeilBrown static int raid1_reshape(struct mddev *mddev) 32081da177e4SLinus Torvalds { 32091da177e4SLinus Torvalds /* We need to: 32101da177e4SLinus Torvalds * 1/ resize the r1bio_pool 32111da177e4SLinus Torvalds * 2/ resize conf->mirrors 32121da177e4SLinus Torvalds * 32131da177e4SLinus Torvalds * We allocate a new r1bio_pool if we can. 32141da177e4SLinus Torvalds * Then raise a device barrier and wait until all IO stops. 32151da177e4SLinus Torvalds * Then resize conf->mirrors and swap in the new r1bio pool. 32166ea9c07cSNeilBrown * 32176ea9c07cSNeilBrown * At the same time, we "pack" the devices so that all the missing 32186ea9c07cSNeilBrown * devices have the higher raid_disk numbers. 32191da177e4SLinus Torvalds */ 3220afeee514SKent Overstreet mempool_t newpool, oldpool; 32211da177e4SLinus Torvalds struct pool_info *newpoolinfo; 32220eaf822cSJonathan Brassow struct raid1_info *newmirrors; 3223e8096360SNeilBrown struct r1conf *conf = mddev->private; 322463c70c4fSNeilBrown int cnt, raid_disks; 3225c04be0aaSNeilBrown unsigned long flags; 32262214c260SArtur Paszkiewicz int d, d2; 3227afeee514SKent Overstreet int ret; 3228afeee514SKent Overstreet 3229afeee514SKent Overstreet memset(&newpool, 0, sizeof(newpool)); 3230afeee514SKent Overstreet memset(&oldpool, 0, sizeof(oldpool)); 32311da177e4SLinus Torvalds 323263c70c4fSNeilBrown /* Cannot change chunk_size, layout, or level */ 3233664e7c41SAndre Noll if (mddev->chunk_sectors != mddev->new_chunk_sectors || 323463c70c4fSNeilBrown mddev->layout != mddev->new_layout || 323563c70c4fSNeilBrown mddev->level != mddev->new_level) { 3236664e7c41SAndre Noll mddev->new_chunk_sectors = mddev->chunk_sectors; 323763c70c4fSNeilBrown mddev->new_layout = mddev->layout; 323863c70c4fSNeilBrown mddev->new_level = mddev->level; 323963c70c4fSNeilBrown return -EINVAL; 324063c70c4fSNeilBrown } 324163c70c4fSNeilBrown 32422214c260SArtur Paszkiewicz if (!mddev_is_clustered(mddev)) 32432214c260SArtur Paszkiewicz md_allow_write(mddev); 32442a2275d6SNeilBrown 324563c70c4fSNeilBrown raid_disks = mddev->raid_disks + mddev->delta_disks; 324663c70c4fSNeilBrown 32476ea9c07cSNeilBrown if (raid_disks < conf->raid_disks) { 32486ea9c07cSNeilBrown cnt=0; 32496ea9c07cSNeilBrown for (d= 0; d < conf->raid_disks; d++) 32501da177e4SLinus Torvalds if (conf->mirrors[d].rdev) 32516ea9c07cSNeilBrown cnt++; 32526ea9c07cSNeilBrown if (cnt > raid_disks) 32531da177e4SLinus Torvalds return -EBUSY; 32546ea9c07cSNeilBrown } 32551da177e4SLinus Torvalds 32561da177e4SLinus Torvalds newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL); 32571da177e4SLinus Torvalds if (!newpoolinfo) 32581da177e4SLinus Torvalds return -ENOMEM; 32591da177e4SLinus Torvalds newpoolinfo->mddev = mddev; 32608f19ccb2SNeilBrown newpoolinfo->raid_disks = raid_disks * 2; 32611da177e4SLinus Torvalds 32623f677f9cSMarcos Paulo de Souza ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc, 3263c7afa803SMarcos Paulo de Souza rbio_pool_free, newpoolinfo); 3264afeee514SKent Overstreet if (ret) { 32651da177e4SLinus Torvalds kfree(newpoolinfo); 3266afeee514SKent Overstreet return ret; 32671da177e4SLinus Torvalds } 32686396bb22SKees Cook newmirrors = kzalloc(array3_size(sizeof(struct raid1_info), 32696396bb22SKees Cook raid_disks, 2), 32708f19ccb2SNeilBrown GFP_KERNEL); 32711da177e4SLinus Torvalds if (!newmirrors) { 32721da177e4SLinus Torvalds kfree(newpoolinfo); 3273afeee514SKent Overstreet mempool_exit(&newpool); 32741da177e4SLinus Torvalds return -ENOMEM; 32751da177e4SLinus Torvalds } 32761da177e4SLinus Torvalds 3277e2d59925SNeilBrown freeze_array(conf, 0); 32781da177e4SLinus Torvalds 32791da177e4SLinus Torvalds /* ok, everything is stopped */ 32801da177e4SLinus Torvalds oldpool = conf->r1bio_pool; 32811da177e4SLinus Torvalds conf->r1bio_pool = newpool; 32826ea9c07cSNeilBrown 3283a88aa786SNeilBrown for (d = d2 = 0; d < conf->raid_disks; d++) { 32843cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[d].rdev; 3285a88aa786SNeilBrown if (rdev && rdev->raid_disk != d2) { 328636fad858SNamhyung Kim sysfs_unlink_rdev(mddev, rdev); 3287a88aa786SNeilBrown rdev->raid_disk = d2; 328836fad858SNamhyung Kim sysfs_unlink_rdev(mddev, rdev); 328936fad858SNamhyung Kim if (sysfs_link_rdev(mddev, rdev)) 32901d41c216SNeilBrown pr_warn("md/raid1:%s: cannot register rd%d\n", 329136fad858SNamhyung Kim mdname(mddev), rdev->raid_disk); 3292a88aa786SNeilBrown } 3293a88aa786SNeilBrown if (rdev) 3294a88aa786SNeilBrown newmirrors[d2++].rdev = rdev; 32956ea9c07cSNeilBrown } 32961da177e4SLinus Torvalds kfree(conf->mirrors); 32971da177e4SLinus Torvalds conf->mirrors = newmirrors; 32981da177e4SLinus Torvalds kfree(conf->poolinfo); 32991da177e4SLinus Torvalds conf->poolinfo = newpoolinfo; 33001da177e4SLinus Torvalds 3301c04be0aaSNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 33021da177e4SLinus Torvalds mddev->degraded += (raid_disks - conf->raid_disks); 3303c04be0aaSNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 33041da177e4SLinus Torvalds conf->raid_disks = mddev->raid_disks = raid_disks; 330563c70c4fSNeilBrown mddev->delta_disks = 0; 33061da177e4SLinus Torvalds 3307e2d59925SNeilBrown unfreeze_array(conf); 33081da177e4SLinus Torvalds 3309985ca973SNeilBrown set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 33101da177e4SLinus Torvalds set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 33111da177e4SLinus Torvalds md_wakeup_thread(mddev->thread); 33121da177e4SLinus Torvalds 3313afeee514SKent Overstreet mempool_exit(&oldpool); 33141da177e4SLinus Torvalds return 0; 33151da177e4SLinus Torvalds } 33161da177e4SLinus Torvalds 3317b03e0ccbSNeilBrown static void raid1_quiesce(struct mddev *mddev, int quiesce) 331836fa3063SNeilBrown { 3319e8096360SNeilBrown struct r1conf *conf = mddev->private; 332036fa3063SNeilBrown 3321b03e0ccbSNeilBrown if (quiesce) 332207169fd4Smajianpeng freeze_array(conf, 0); 3323b03e0ccbSNeilBrown else 332407169fd4Smajianpeng unfreeze_array(conf); 332536fa3063SNeilBrown } 332636fa3063SNeilBrown 3327fd01b88cSNeilBrown static void *raid1_takeover(struct mddev *mddev) 3328709ae487SNeilBrown { 3329709ae487SNeilBrown /* raid1 can take over: 3330709ae487SNeilBrown * raid5 with 2 devices, any layout or chunk size 3331709ae487SNeilBrown */ 3332709ae487SNeilBrown if (mddev->level == 5 && mddev->raid_disks == 2) { 3333e8096360SNeilBrown struct r1conf *conf; 3334709ae487SNeilBrown mddev->new_level = 1; 3335709ae487SNeilBrown mddev->new_layout = 0; 3336709ae487SNeilBrown mddev->new_chunk_sectors = 0; 3337709ae487SNeilBrown conf = setup_conf(mddev); 33386995f0b2SShaohua Li if (!IS_ERR(conf)) { 333907169fd4Smajianpeng /* Array must appear to be quiesced */ 334007169fd4Smajianpeng conf->array_frozen = 1; 3341394ed8e4SShaohua Li mddev_clear_unsupported_flags(mddev, 3342394ed8e4SShaohua Li UNSUPPORTED_MDDEV_FLAGS); 33436995f0b2SShaohua Li } 3344709ae487SNeilBrown return conf; 3345709ae487SNeilBrown } 3346709ae487SNeilBrown return ERR_PTR(-EINVAL); 3347709ae487SNeilBrown } 33481da177e4SLinus Torvalds 334984fc4b56SNeilBrown static struct md_personality raid1_personality = 33501da177e4SLinus Torvalds { 33511da177e4SLinus Torvalds .name = "raid1", 33522604b703SNeilBrown .level = 1, 33531da177e4SLinus Torvalds .owner = THIS_MODULE, 3354849674e4SShaohua Li .make_request = raid1_make_request, 3355849674e4SShaohua Li .run = raid1_run, 3356afa0f557SNeilBrown .free = raid1_free, 3357849674e4SShaohua Li .status = raid1_status, 3358849674e4SShaohua Li .error_handler = raid1_error, 33591da177e4SLinus Torvalds .hot_add_disk = raid1_add_disk, 33601da177e4SLinus Torvalds .hot_remove_disk= raid1_remove_disk, 33611da177e4SLinus Torvalds .spare_active = raid1_spare_active, 3362849674e4SShaohua Li .sync_request = raid1_sync_request, 33631da177e4SLinus Torvalds .resize = raid1_resize, 336480c3a6ceSDan Williams .size = raid1_size, 336563c70c4fSNeilBrown .check_reshape = raid1_reshape, 336636fa3063SNeilBrown .quiesce = raid1_quiesce, 3367709ae487SNeilBrown .takeover = raid1_takeover, 33681da177e4SLinus Torvalds }; 33691da177e4SLinus Torvalds 33701da177e4SLinus Torvalds static int __init raid_init(void) 33711da177e4SLinus Torvalds { 33722604b703SNeilBrown return register_md_personality(&raid1_personality); 33731da177e4SLinus Torvalds } 33741da177e4SLinus Torvalds 33751da177e4SLinus Torvalds static void raid_exit(void) 33761da177e4SLinus Torvalds { 33772604b703SNeilBrown unregister_md_personality(&raid1_personality); 33781da177e4SLinus Torvalds } 33791da177e4SLinus Torvalds 33801da177e4SLinus Torvalds module_init(raid_init); 33811da177e4SLinus Torvalds module_exit(raid_exit); 33821da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 33830efb9e61SNeilBrown MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD"); 33841da177e4SLinus Torvalds MODULE_ALIAS("md-personality-3"); /* RAID1 */ 3385d9d166c2SNeilBrown MODULE_ALIAS("md-raid1"); 33862604b703SNeilBrown MODULE_ALIAS("md-level-1"); 338734db0cd6SNeilBrown 338834db0cd6SNeilBrown module_param(max_queued_requests, int, S_IRUGO|S_IWUSR); 3389