11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * raid1.c : Multiple Devices driver for Linux 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * RAID-1 management functions. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000 111da177e4SLinus Torvalds * 1296de0e25SJan Engelhardt * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk> 131da177e4SLinus Torvalds * Various fixes by Neil Brown <neilb@cse.unsw.edu.au> 141da177e4SLinus Torvalds * 15191ea9b2SNeilBrown * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support 16191ea9b2SNeilBrown * bitmapped intelligence in resync: 17191ea9b2SNeilBrown * 18191ea9b2SNeilBrown * - bitmap marked during normal i/o 19191ea9b2SNeilBrown * - bitmap used to skip nondirty blocks during sync 20191ea9b2SNeilBrown * 21191ea9b2SNeilBrown * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology: 22191ea9b2SNeilBrown * - persistent bitmap code 23191ea9b2SNeilBrown * 241da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 251da177e4SLinus Torvalds * it under the terms of the GNU General Public License as published by 261da177e4SLinus Torvalds * the Free Software Foundation; either version 2, or (at your option) 271da177e4SLinus Torvalds * any later version. 281da177e4SLinus Torvalds * 291da177e4SLinus Torvalds * You should have received a copy of the GNU General Public License 301da177e4SLinus Torvalds * (for example /usr/src/linux/COPYING); if not, write to the Free 311da177e4SLinus Torvalds * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 321da177e4SLinus Torvalds */ 331da177e4SLinus Torvalds 345a0e3ad6STejun Heo #include <linux/slab.h> 3525570727SStephen Rothwell #include <linux/delay.h> 36bff61975SNeilBrown #include <linux/blkdev.h> 37bff61975SNeilBrown #include <linux/seq_file.h> 3843b2e5d8SNeilBrown #include "md.h" 39ef740c37SChristoph Hellwig #include "raid1.h" 40ef740c37SChristoph Hellwig #include "bitmap.h" 41191ea9b2SNeilBrown 42191ea9b2SNeilBrown #define DEBUG 0 43191ea9b2SNeilBrown #if DEBUG 44191ea9b2SNeilBrown #define PRINTK(x...) printk(x) 45191ea9b2SNeilBrown #else 46191ea9b2SNeilBrown #define PRINTK(x...) 47191ea9b2SNeilBrown #endif 481da177e4SLinus Torvalds 491da177e4SLinus Torvalds /* 501da177e4SLinus Torvalds * Number of guaranteed r1bios in case of extreme VM load: 511da177e4SLinus Torvalds */ 521da177e4SLinus Torvalds #define NR_RAID1_BIOS 256 531da177e4SLinus Torvalds 541da177e4SLinus Torvalds 5517999be4SNeilBrown static void allow_barrier(conf_t *conf); 5617999be4SNeilBrown static void lower_barrier(conf_t *conf); 571da177e4SLinus Torvalds 58dd0fc66fSAl Viro static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 591da177e4SLinus Torvalds { 601da177e4SLinus Torvalds struct pool_info *pi = data; 611da177e4SLinus Torvalds int size = offsetof(r1bio_t, bios[pi->raid_disks]); 621da177e4SLinus Torvalds 631da177e4SLinus Torvalds /* allocate a r1bio with room for raid_disks entries in the bios array */ 647eaceaccSJens Axboe return kzalloc(size, gfp_flags); 651da177e4SLinus Torvalds } 661da177e4SLinus Torvalds 671da177e4SLinus Torvalds static void r1bio_pool_free(void *r1_bio, void *data) 681da177e4SLinus Torvalds { 691da177e4SLinus Torvalds kfree(r1_bio); 701da177e4SLinus Torvalds } 711da177e4SLinus Torvalds 721da177e4SLinus Torvalds #define RESYNC_BLOCK_SIZE (64*1024) 731da177e4SLinus Torvalds //#define RESYNC_BLOCK_SIZE PAGE_SIZE 741da177e4SLinus Torvalds #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) 751da177e4SLinus Torvalds #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 761da177e4SLinus Torvalds #define RESYNC_WINDOW (2048*1024) 771da177e4SLinus Torvalds 78dd0fc66fSAl Viro static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) 791da177e4SLinus Torvalds { 801da177e4SLinus Torvalds struct pool_info *pi = data; 811da177e4SLinus Torvalds struct page *page; 821da177e4SLinus Torvalds r1bio_t *r1_bio; 831da177e4SLinus Torvalds struct bio *bio; 841da177e4SLinus Torvalds int i, j; 851da177e4SLinus Torvalds 861da177e4SLinus Torvalds r1_bio = r1bio_pool_alloc(gfp_flags, pi); 877eaceaccSJens Axboe if (!r1_bio) 881da177e4SLinus Torvalds return NULL; 891da177e4SLinus Torvalds 901da177e4SLinus Torvalds /* 911da177e4SLinus Torvalds * Allocate bios : 1 for reading, n-1 for writing 921da177e4SLinus Torvalds */ 931da177e4SLinus Torvalds for (j = pi->raid_disks ; j-- ; ) { 946746557fSNeilBrown bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 951da177e4SLinus Torvalds if (!bio) 961da177e4SLinus Torvalds goto out_free_bio; 971da177e4SLinus Torvalds r1_bio->bios[j] = bio; 981da177e4SLinus Torvalds } 991da177e4SLinus Torvalds /* 1001da177e4SLinus Torvalds * Allocate RESYNC_PAGES data pages and attach them to 101d11c171eSNeilBrown * the first bio. 102d11c171eSNeilBrown * If this is a user-requested check/repair, allocate 103d11c171eSNeilBrown * RESYNC_PAGES for each bio. 1041da177e4SLinus Torvalds */ 105d11c171eSNeilBrown if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) 106d11c171eSNeilBrown j = pi->raid_disks; 107d11c171eSNeilBrown else 108d11c171eSNeilBrown j = 1; 109d11c171eSNeilBrown while(j--) { 110d11c171eSNeilBrown bio = r1_bio->bios[j]; 1111da177e4SLinus Torvalds for (i = 0; i < RESYNC_PAGES; i++) { 1121da177e4SLinus Torvalds page = alloc_page(gfp_flags); 1131da177e4SLinus Torvalds if (unlikely(!page)) 1141da177e4SLinus Torvalds goto out_free_pages; 1151da177e4SLinus Torvalds 1161da177e4SLinus Torvalds bio->bi_io_vec[i].bv_page = page; 117303a0e11SNeilBrown bio->bi_vcnt = i+1; 1181da177e4SLinus Torvalds } 119d11c171eSNeilBrown } 120d11c171eSNeilBrown /* If not user-requests, copy the page pointers to all bios */ 121d11c171eSNeilBrown if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { 122d11c171eSNeilBrown for (i=0; i<RESYNC_PAGES ; i++) 123d11c171eSNeilBrown for (j=1; j<pi->raid_disks; j++) 124d11c171eSNeilBrown r1_bio->bios[j]->bi_io_vec[i].bv_page = 125d11c171eSNeilBrown r1_bio->bios[0]->bi_io_vec[i].bv_page; 126d11c171eSNeilBrown } 1271da177e4SLinus Torvalds 1281da177e4SLinus Torvalds r1_bio->master_bio = NULL; 1291da177e4SLinus Torvalds 1301da177e4SLinus Torvalds return r1_bio; 1311da177e4SLinus Torvalds 1321da177e4SLinus Torvalds out_free_pages: 133d11c171eSNeilBrown for (j=0 ; j < pi->raid_disks; j++) 134303a0e11SNeilBrown for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++) 135303a0e11SNeilBrown put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); 136d11c171eSNeilBrown j = -1; 1371da177e4SLinus Torvalds out_free_bio: 1381da177e4SLinus Torvalds while ( ++j < pi->raid_disks ) 1391da177e4SLinus Torvalds bio_put(r1_bio->bios[j]); 1401da177e4SLinus Torvalds r1bio_pool_free(r1_bio, data); 1411da177e4SLinus Torvalds return NULL; 1421da177e4SLinus Torvalds } 1431da177e4SLinus Torvalds 1441da177e4SLinus Torvalds static void r1buf_pool_free(void *__r1_bio, void *data) 1451da177e4SLinus Torvalds { 1461da177e4SLinus Torvalds struct pool_info *pi = data; 147d11c171eSNeilBrown int i,j; 1481da177e4SLinus Torvalds r1bio_t *r1bio = __r1_bio; 1491da177e4SLinus Torvalds 150d11c171eSNeilBrown for (i = 0; i < RESYNC_PAGES; i++) 151d11c171eSNeilBrown for (j = pi->raid_disks; j-- ;) { 152d11c171eSNeilBrown if (j == 0 || 153d11c171eSNeilBrown r1bio->bios[j]->bi_io_vec[i].bv_page != 154d11c171eSNeilBrown r1bio->bios[0]->bi_io_vec[i].bv_page) 1551345b1d8SNeilBrown safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); 1561da177e4SLinus Torvalds } 1571da177e4SLinus Torvalds for (i=0 ; i < pi->raid_disks; i++) 1581da177e4SLinus Torvalds bio_put(r1bio->bios[i]); 1591da177e4SLinus Torvalds 1601da177e4SLinus Torvalds r1bio_pool_free(r1bio, data); 1611da177e4SLinus Torvalds } 1621da177e4SLinus Torvalds 1631da177e4SLinus Torvalds static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) 1641da177e4SLinus Torvalds { 1651da177e4SLinus Torvalds int i; 1661da177e4SLinus Torvalds 1671da177e4SLinus Torvalds for (i = 0; i < conf->raid_disks; i++) { 1681da177e4SLinus Torvalds struct bio **bio = r1_bio->bios + i; 169cf30a473SNeilBrown if (*bio && *bio != IO_BLOCKED) 1701da177e4SLinus Torvalds bio_put(*bio); 1711da177e4SLinus Torvalds *bio = NULL; 1721da177e4SLinus Torvalds } 1731da177e4SLinus Torvalds } 1741da177e4SLinus Torvalds 175858119e1SArjan van de Ven static void free_r1bio(r1bio_t *r1_bio) 1761da177e4SLinus Torvalds { 177070ec55dSNeilBrown conf_t *conf = r1_bio->mddev->private; 1781da177e4SLinus Torvalds 1791da177e4SLinus Torvalds /* 1801da177e4SLinus Torvalds * Wake up any possible resync thread that waits for the device 1811da177e4SLinus Torvalds * to go idle. 1821da177e4SLinus Torvalds */ 18317999be4SNeilBrown allow_barrier(conf); 1841da177e4SLinus Torvalds 1851da177e4SLinus Torvalds put_all_bios(conf, r1_bio); 1861da177e4SLinus Torvalds mempool_free(r1_bio, conf->r1bio_pool); 1871da177e4SLinus Torvalds } 1881da177e4SLinus Torvalds 189858119e1SArjan van de Ven static void put_buf(r1bio_t *r1_bio) 1901da177e4SLinus Torvalds { 191070ec55dSNeilBrown conf_t *conf = r1_bio->mddev->private; 1923e198f78SNeilBrown int i; 1933e198f78SNeilBrown 1943e198f78SNeilBrown for (i=0; i<conf->raid_disks; i++) { 1953e198f78SNeilBrown struct bio *bio = r1_bio->bios[i]; 1963e198f78SNeilBrown if (bio->bi_end_io) 1973e198f78SNeilBrown rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); 1983e198f78SNeilBrown } 1991da177e4SLinus Torvalds 2001da177e4SLinus Torvalds mempool_free(r1_bio, conf->r1buf_pool); 2011da177e4SLinus Torvalds 20217999be4SNeilBrown lower_barrier(conf); 2031da177e4SLinus Torvalds } 2041da177e4SLinus Torvalds 2051da177e4SLinus Torvalds static void reschedule_retry(r1bio_t *r1_bio) 2061da177e4SLinus Torvalds { 2071da177e4SLinus Torvalds unsigned long flags; 2081da177e4SLinus Torvalds mddev_t *mddev = r1_bio->mddev; 209070ec55dSNeilBrown conf_t *conf = mddev->private; 2101da177e4SLinus Torvalds 2111da177e4SLinus Torvalds spin_lock_irqsave(&conf->device_lock, flags); 2121da177e4SLinus Torvalds list_add(&r1_bio->retry_list, &conf->retry_list); 213ddaf22abSNeilBrown conf->nr_queued ++; 2141da177e4SLinus Torvalds spin_unlock_irqrestore(&conf->device_lock, flags); 2151da177e4SLinus Torvalds 21617999be4SNeilBrown wake_up(&conf->wait_barrier); 2171da177e4SLinus Torvalds md_wakeup_thread(mddev->thread); 2181da177e4SLinus Torvalds } 2191da177e4SLinus Torvalds 2201da177e4SLinus Torvalds /* 2211da177e4SLinus Torvalds * raid_end_bio_io() is called when we have finished servicing a mirrored 2221da177e4SLinus Torvalds * operation and are ready to return a success/failure code to the buffer 2231da177e4SLinus Torvalds * cache layer. 2241da177e4SLinus Torvalds */ 2251da177e4SLinus Torvalds static void raid_end_bio_io(r1bio_t *r1_bio) 2261da177e4SLinus Torvalds { 2271da177e4SLinus Torvalds struct bio *bio = r1_bio->master_bio; 2281da177e4SLinus Torvalds 2294b6d287fSNeilBrown /* if nobody has done the final endio yet, do it now */ 2304b6d287fSNeilBrown if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 2314b6d287fSNeilBrown PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n", 2324b6d287fSNeilBrown (bio_data_dir(bio) == WRITE) ? "write" : "read", 2334b6d287fSNeilBrown (unsigned long long) bio->bi_sector, 2344b6d287fSNeilBrown (unsigned long long) bio->bi_sector + 2354b6d287fSNeilBrown (bio->bi_size >> 9) - 1); 2364b6d287fSNeilBrown 2376712ecf8SNeilBrown bio_endio(bio, 2381da177e4SLinus Torvalds test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO); 2394b6d287fSNeilBrown } 2401da177e4SLinus Torvalds free_r1bio(r1_bio); 2411da177e4SLinus Torvalds } 2421da177e4SLinus Torvalds 2431da177e4SLinus Torvalds /* 2441da177e4SLinus Torvalds * Update disk head position estimator based on IRQ completion info. 2451da177e4SLinus Torvalds */ 2461da177e4SLinus Torvalds static inline void update_head_pos(int disk, r1bio_t *r1_bio) 2471da177e4SLinus Torvalds { 248070ec55dSNeilBrown conf_t *conf = r1_bio->mddev->private; 2491da177e4SLinus Torvalds 2501da177e4SLinus Torvalds conf->mirrors[disk].head_position = 2511da177e4SLinus Torvalds r1_bio->sector + (r1_bio->sectors); 2521da177e4SLinus Torvalds } 2531da177e4SLinus Torvalds 2546712ecf8SNeilBrown static void raid1_end_read_request(struct bio *bio, int error) 2551da177e4SLinus Torvalds { 2561da177e4SLinus Torvalds int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 2577b92813cSH Hartley Sweeten r1bio_t *r1_bio = bio->bi_private; 2581da177e4SLinus Torvalds int mirror; 259070ec55dSNeilBrown conf_t *conf = r1_bio->mddev->private; 2601da177e4SLinus Torvalds 2611da177e4SLinus Torvalds mirror = r1_bio->read_disk; 2621da177e4SLinus Torvalds /* 2631da177e4SLinus Torvalds * this branch is our 'one mirror IO has finished' event handler: 2641da177e4SLinus Torvalds */ 265ddaf22abSNeilBrown update_head_pos(mirror, r1_bio); 266ddaf22abSNeilBrown 267220946c9SNeilBrown if (uptodate) 2681da177e4SLinus Torvalds set_bit(R1BIO_Uptodate, &r1_bio->state); 269dd00a99eSNeilBrown else { 270dd00a99eSNeilBrown /* If all other devices have failed, we want to return 271dd00a99eSNeilBrown * the error upwards rather than fail the last device. 272dd00a99eSNeilBrown * Here we redefine "uptodate" to mean "Don't want to retry" 273dd00a99eSNeilBrown */ 274dd00a99eSNeilBrown unsigned long flags; 275dd00a99eSNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 276dd00a99eSNeilBrown if (r1_bio->mddev->degraded == conf->raid_disks || 277dd00a99eSNeilBrown (r1_bio->mddev->degraded == conf->raid_disks-1 && 278dd00a99eSNeilBrown !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) 279dd00a99eSNeilBrown uptodate = 1; 280dd00a99eSNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 281dd00a99eSNeilBrown } 2821da177e4SLinus Torvalds 283dd00a99eSNeilBrown if (uptodate) 2841da177e4SLinus Torvalds raid_end_bio_io(r1_bio); 285dd00a99eSNeilBrown else { 2861da177e4SLinus Torvalds /* 2871da177e4SLinus Torvalds * oops, read error: 2881da177e4SLinus Torvalds */ 2891da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 2901da177e4SLinus Torvalds if (printk_ratelimit()) 2919dd1e2faSNeilBrown printk(KERN_ERR "md/raid1:%s: %s: rescheduling sector %llu\n", 2929dd1e2faSNeilBrown mdname(conf->mddev), 2931da177e4SLinus Torvalds bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector); 2941da177e4SLinus Torvalds reschedule_retry(r1_bio); 2951da177e4SLinus Torvalds } 2961da177e4SLinus Torvalds 2971da177e4SLinus Torvalds rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); 2981da177e4SLinus Torvalds } 2991da177e4SLinus Torvalds 300af6d7b76SNeilBrown static void r1_bio_write_done(r1bio_t *r1_bio) 3014e78064fSNeilBrown { 3024e78064fSNeilBrown if (atomic_dec_and_test(&r1_bio->remaining)) 3034e78064fSNeilBrown { 3044e78064fSNeilBrown /* it really is the end of this request */ 3054e78064fSNeilBrown if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 3064e78064fSNeilBrown /* free extra copy of the data pages */ 307af6d7b76SNeilBrown int i = r1_bio->behind_page_count; 3084e78064fSNeilBrown while (i--) 309af6d7b76SNeilBrown safe_put_page(r1_bio->behind_pages[i]); 310af6d7b76SNeilBrown kfree(r1_bio->behind_pages); 311af6d7b76SNeilBrown r1_bio->behind_pages = NULL; 3124e78064fSNeilBrown } 3134e78064fSNeilBrown /* clear the bitmap if all writes complete successfully */ 3144e78064fSNeilBrown bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, 3154e78064fSNeilBrown r1_bio->sectors, 3164e78064fSNeilBrown !test_bit(R1BIO_Degraded, &r1_bio->state), 317af6d7b76SNeilBrown test_bit(R1BIO_BehindIO, &r1_bio->state)); 3184e78064fSNeilBrown md_write_end(r1_bio->mddev); 3194e78064fSNeilBrown raid_end_bio_io(r1_bio); 3204e78064fSNeilBrown } 3214e78064fSNeilBrown } 3224e78064fSNeilBrown 3236712ecf8SNeilBrown static void raid1_end_write_request(struct bio *bio, int error) 3241da177e4SLinus Torvalds { 3251da177e4SLinus Torvalds int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 3267b92813cSH Hartley Sweeten r1bio_t *r1_bio = bio->bi_private; 327a9701a30SNeilBrown int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); 328070ec55dSNeilBrown conf_t *conf = r1_bio->mddev->private; 32904b857f7SNeilBrown struct bio *to_put = NULL; 3301da177e4SLinus Torvalds 3311da177e4SLinus Torvalds 3321da177e4SLinus Torvalds for (mirror = 0; mirror < conf->raid_disks; mirror++) 3331da177e4SLinus Torvalds if (r1_bio->bios[mirror] == bio) 3341da177e4SLinus Torvalds break; 3351da177e4SLinus Torvalds 3361da177e4SLinus Torvalds /* 337e9c7469bSTejun Heo * 'one mirror IO has finished' event handler: 3381da177e4SLinus Torvalds */ 339a9701a30SNeilBrown r1_bio->bios[mirror] = NULL; 34004b857f7SNeilBrown to_put = bio; 341191ea9b2SNeilBrown if (!uptodate) { 3421da177e4SLinus Torvalds md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); 343191ea9b2SNeilBrown /* an I/O failed, we can't clear the bitmap */ 344191ea9b2SNeilBrown set_bit(R1BIO_Degraded, &r1_bio->state); 345191ea9b2SNeilBrown } else 3461da177e4SLinus Torvalds /* 347e9c7469bSTejun Heo * Set R1BIO_Uptodate in our master bio, so that we 348e9c7469bSTejun Heo * will return a good error code for to the higher 349e9c7469bSTejun Heo * levels even if IO on some other mirrored buffer 350e9c7469bSTejun Heo * fails. 3511da177e4SLinus Torvalds * 352e9c7469bSTejun Heo * The 'master' represents the composite IO operation 353e9c7469bSTejun Heo * to user-side. So if something waits for IO, then it 354e9c7469bSTejun Heo * will wait for the 'master' bio. 3551da177e4SLinus Torvalds */ 3561da177e4SLinus Torvalds set_bit(R1BIO_Uptodate, &r1_bio->state); 3571da177e4SLinus Torvalds 3581da177e4SLinus Torvalds update_head_pos(mirror, r1_bio); 3591da177e4SLinus Torvalds 3604b6d287fSNeilBrown if (behind) { 3614b6d287fSNeilBrown if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) 3624b6d287fSNeilBrown atomic_dec(&r1_bio->behind_remaining); 3634b6d287fSNeilBrown 364e9c7469bSTejun Heo /* 365e9c7469bSTejun Heo * In behind mode, we ACK the master bio once the I/O 366e9c7469bSTejun Heo * has safely reached all non-writemostly 367e9c7469bSTejun Heo * disks. Setting the Returned bit ensures that this 368e9c7469bSTejun Heo * gets done only once -- we don't ever want to return 369e9c7469bSTejun Heo * -EIO here, instead we'll wait 370e9c7469bSTejun Heo */ 3714b6d287fSNeilBrown if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && 3724b6d287fSNeilBrown test_bit(R1BIO_Uptodate, &r1_bio->state)) { 3734b6d287fSNeilBrown /* Maybe we can return now */ 3744b6d287fSNeilBrown if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 3754b6d287fSNeilBrown struct bio *mbio = r1_bio->master_bio; 3764b6d287fSNeilBrown PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n", 3774b6d287fSNeilBrown (unsigned long long) mbio->bi_sector, 3784b6d287fSNeilBrown (unsigned long long) mbio->bi_sector + 3794b6d287fSNeilBrown (mbio->bi_size >> 9) - 1); 3806712ecf8SNeilBrown bio_endio(mbio, 0); 3814b6d287fSNeilBrown } 3824b6d287fSNeilBrown } 3834b6d287fSNeilBrown } 3845e7dd2abSNeilBrown rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); 385e9c7469bSTejun Heo 3861da177e4SLinus Torvalds /* 3871da177e4SLinus Torvalds * Let's see if all mirrored write operations have finished 3881da177e4SLinus Torvalds * already. 3891da177e4SLinus Torvalds */ 390af6d7b76SNeilBrown r1_bio_write_done(r1_bio); 391c70810b3SNeilBrown 39204b857f7SNeilBrown if (to_put) 39304b857f7SNeilBrown bio_put(to_put); 3941da177e4SLinus Torvalds } 3951da177e4SLinus Torvalds 3961da177e4SLinus Torvalds 3971da177e4SLinus Torvalds /* 3981da177e4SLinus Torvalds * This routine returns the disk from which the requested read should 3991da177e4SLinus Torvalds * be done. There is a per-array 'next expected sequential IO' sector 4001da177e4SLinus Torvalds * number - if this matches on the next IO then we use the last disk. 4011da177e4SLinus Torvalds * There is also a per-disk 'last know head position' sector that is 4021da177e4SLinus Torvalds * maintained from IRQ contexts, both the normal and the resync IO 4031da177e4SLinus Torvalds * completion handlers update this position correctly. If there is no 4041da177e4SLinus Torvalds * perfect sequential match then we pick the disk whose head is closest. 4051da177e4SLinus Torvalds * 4061da177e4SLinus Torvalds * If there are 2 mirrors in the same 2 devices, performance degrades 4071da177e4SLinus Torvalds * because position is mirror, not device based. 4081da177e4SLinus Torvalds * 4091da177e4SLinus Torvalds * The rdev for the device selected will have nr_pending incremented. 4101da177e4SLinus Torvalds */ 4111da177e4SLinus Torvalds static int read_balance(conf_t *conf, r1bio_t *r1_bio) 4121da177e4SLinus Torvalds { 413af3a2cd6SNeilBrown const sector_t this_sector = r1_bio->sector; 4141da177e4SLinus Torvalds const int sectors = r1_bio->sectors; 415f3ac8bf7SNeilBrown int start_disk; 41676073054SNeilBrown int best_disk; 417f3ac8bf7SNeilBrown int i; 41876073054SNeilBrown sector_t best_dist; 4198ddf9efeSNeilBrown mdk_rdev_t *rdev; 420f3ac8bf7SNeilBrown int choose_first; 4211da177e4SLinus Torvalds 4221da177e4SLinus Torvalds rcu_read_lock(); 4231da177e4SLinus Torvalds /* 4248ddf9efeSNeilBrown * Check if we can balance. We can balance on the whole 4251da177e4SLinus Torvalds * device if no resync is going on, or below the resync window. 4261da177e4SLinus Torvalds * We take the first readable disk when above the resync window. 4271da177e4SLinus Torvalds */ 4281da177e4SLinus Torvalds retry: 42976073054SNeilBrown best_disk = -1; 43076073054SNeilBrown best_dist = MaxSector; 4311da177e4SLinus Torvalds if (conf->mddev->recovery_cp < MaxSector && 4321da177e4SLinus Torvalds (this_sector + sectors >= conf->next_resync)) { 433f3ac8bf7SNeilBrown choose_first = 1; 434f3ac8bf7SNeilBrown start_disk = 0; 435f3ac8bf7SNeilBrown } else { 436f3ac8bf7SNeilBrown choose_first = 0; 437f3ac8bf7SNeilBrown start_disk = conf->last_used; 4381da177e4SLinus Torvalds } 4391da177e4SLinus Torvalds 440f3ac8bf7SNeilBrown for (i = 0 ; i < conf->raid_disks ; i++) { 44176073054SNeilBrown sector_t dist; 442f3ac8bf7SNeilBrown int disk = start_disk + i; 443f3ac8bf7SNeilBrown if (disk >= conf->raid_disks) 444f3ac8bf7SNeilBrown disk -= conf->raid_disks; 4458ddf9efeSNeilBrown 446f3ac8bf7SNeilBrown rdev = rcu_dereference(conf->mirrors[disk].rdev); 447f3ac8bf7SNeilBrown if (r1_bio->bios[disk] == IO_BLOCKED 448f3ac8bf7SNeilBrown || rdev == NULL 44976073054SNeilBrown || test_bit(Faulty, &rdev->flags)) 450f3ac8bf7SNeilBrown continue; 45176073054SNeilBrown if (!test_bit(In_sync, &rdev->flags) && 45276073054SNeilBrown rdev->recovery_offset < this_sector + sectors) 45376073054SNeilBrown continue; 45476073054SNeilBrown if (test_bit(WriteMostly, &rdev->flags)) { 45576073054SNeilBrown /* Don't balance among write-mostly, just 45676073054SNeilBrown * use the first as a last resort */ 45776073054SNeilBrown if (best_disk < 0) 45876073054SNeilBrown best_disk = disk; 45976073054SNeilBrown continue; 4608ddf9efeSNeilBrown } 46176073054SNeilBrown /* This is a reasonable device to use. It might 46276073054SNeilBrown * even be best. 4631da177e4SLinus Torvalds */ 46476073054SNeilBrown dist = abs(this_sector - conf->mirrors[disk].head_position); 46576073054SNeilBrown if (choose_first 46676073054SNeilBrown /* Don't change to another disk for sequential reads */ 46776073054SNeilBrown || conf->next_seq_sect == this_sector 46876073054SNeilBrown || dist == 0 46976073054SNeilBrown /* If device is idle, use it */ 47076073054SNeilBrown || atomic_read(&rdev->nr_pending) == 0) { 47176073054SNeilBrown best_disk = disk; 4721da177e4SLinus Torvalds break; 4731da177e4SLinus Torvalds } 47476073054SNeilBrown if (dist < best_dist) { 47576073054SNeilBrown best_dist = dist; 47676073054SNeilBrown best_disk = disk; 4771da177e4SLinus Torvalds } 478f3ac8bf7SNeilBrown } 4791da177e4SLinus Torvalds 48076073054SNeilBrown if (best_disk >= 0) { 48176073054SNeilBrown rdev = rcu_dereference(conf->mirrors[best_disk].rdev); 4828ddf9efeSNeilBrown if (!rdev) 4838ddf9efeSNeilBrown goto retry; 4848ddf9efeSNeilBrown atomic_inc(&rdev->nr_pending); 48576073054SNeilBrown if (test_bit(Faulty, &rdev->flags)) { 4861da177e4SLinus Torvalds /* cannot risk returning a device that failed 4871da177e4SLinus Torvalds * before we inc'ed nr_pending 4881da177e4SLinus Torvalds */ 48903c902e1SNeilBrown rdev_dec_pending(rdev, conf->mddev); 4901da177e4SLinus Torvalds goto retry; 4911da177e4SLinus Torvalds } 4928ddf9efeSNeilBrown conf->next_seq_sect = this_sector + sectors; 49376073054SNeilBrown conf->last_used = best_disk; 4941da177e4SLinus Torvalds } 4951da177e4SLinus Torvalds rcu_read_unlock(); 4961da177e4SLinus Torvalds 49776073054SNeilBrown return best_disk; 4981da177e4SLinus Torvalds } 4991da177e4SLinus Torvalds 5000d129228SNeilBrown static int raid1_congested(void *data, int bits) 5010d129228SNeilBrown { 5020d129228SNeilBrown mddev_t *mddev = data; 503070ec55dSNeilBrown conf_t *conf = mddev->private; 5040d129228SNeilBrown int i, ret = 0; 5050d129228SNeilBrown 5063fa841d7SNeilBrown if (mddev_congested(mddev, bits)) 5073fa841d7SNeilBrown return 1; 5083fa841d7SNeilBrown 5090d129228SNeilBrown rcu_read_lock(); 5100d129228SNeilBrown for (i = 0; i < mddev->raid_disks; i++) { 5110d129228SNeilBrown mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 5120d129228SNeilBrown if (rdev && !test_bit(Faulty, &rdev->flags)) { 513165125e1SJens Axboe struct request_queue *q = bdev_get_queue(rdev->bdev); 5140d129228SNeilBrown 5150d129228SNeilBrown /* Note the '|| 1' - when read_balance prefers 5160d129228SNeilBrown * non-congested targets, it can be removed 5170d129228SNeilBrown */ 51891a9e99dSAlexander Beregalov if ((bits & (1<<BDI_async_congested)) || 1) 5190d129228SNeilBrown ret |= bdi_congested(&q->backing_dev_info, bits); 5200d129228SNeilBrown else 5210d129228SNeilBrown ret &= bdi_congested(&q->backing_dev_info, bits); 5220d129228SNeilBrown } 5230d129228SNeilBrown } 5240d129228SNeilBrown rcu_read_unlock(); 5250d129228SNeilBrown return ret; 5260d129228SNeilBrown } 5270d129228SNeilBrown 5280d129228SNeilBrown 5297eaceaccSJens Axboe static void flush_pending_writes(conf_t *conf) 530a35e63efSNeilBrown { 531a35e63efSNeilBrown /* Any writes that have been queued but are awaiting 532a35e63efSNeilBrown * bitmap updates get flushed here. 533a35e63efSNeilBrown */ 534a35e63efSNeilBrown spin_lock_irq(&conf->device_lock); 535a35e63efSNeilBrown 536a35e63efSNeilBrown if (conf->pending_bio_list.head) { 537a35e63efSNeilBrown struct bio *bio; 538a35e63efSNeilBrown bio = bio_list_get(&conf->pending_bio_list); 539a35e63efSNeilBrown spin_unlock_irq(&conf->device_lock); 540a35e63efSNeilBrown /* flush any pending bitmap writes to 541a35e63efSNeilBrown * disk before proceeding w/ I/O */ 542a35e63efSNeilBrown bitmap_unplug(conf->mddev->bitmap); 543a35e63efSNeilBrown 544a35e63efSNeilBrown while (bio) { /* submit pending writes */ 545a35e63efSNeilBrown struct bio *next = bio->bi_next; 546a35e63efSNeilBrown bio->bi_next = NULL; 547a35e63efSNeilBrown generic_make_request(bio); 548a35e63efSNeilBrown bio = next; 549a35e63efSNeilBrown } 550a35e63efSNeilBrown } else 551a35e63efSNeilBrown spin_unlock_irq(&conf->device_lock); 5527eaceaccSJens Axboe } 5537eaceaccSJens Axboe 55417999be4SNeilBrown /* Barriers.... 55517999be4SNeilBrown * Sometimes we need to suspend IO while we do something else, 55617999be4SNeilBrown * either some resync/recovery, or reconfigure the array. 55717999be4SNeilBrown * To do this we raise a 'barrier'. 55817999be4SNeilBrown * The 'barrier' is a counter that can be raised multiple times 55917999be4SNeilBrown * to count how many activities are happening which preclude 56017999be4SNeilBrown * normal IO. 56117999be4SNeilBrown * We can only raise the barrier if there is no pending IO. 56217999be4SNeilBrown * i.e. if nr_pending == 0. 56317999be4SNeilBrown * We choose only to raise the barrier if no-one is waiting for the 56417999be4SNeilBrown * barrier to go down. This means that as soon as an IO request 56517999be4SNeilBrown * is ready, no other operations which require a barrier will start 56617999be4SNeilBrown * until the IO request has had a chance. 56717999be4SNeilBrown * 56817999be4SNeilBrown * So: regular IO calls 'wait_barrier'. When that returns there 56917999be4SNeilBrown * is no backgroup IO happening, It must arrange to call 57017999be4SNeilBrown * allow_barrier when it has finished its IO. 57117999be4SNeilBrown * backgroup IO calls must call raise_barrier. Once that returns 57217999be4SNeilBrown * there is no normal IO happeing. It must arrange to call 57317999be4SNeilBrown * lower_barrier when the particular background IO completes. 5741da177e4SLinus Torvalds */ 5751da177e4SLinus Torvalds #define RESYNC_DEPTH 32 5761da177e4SLinus Torvalds 57717999be4SNeilBrown static void raise_barrier(conf_t *conf) 5781da177e4SLinus Torvalds { 5791da177e4SLinus Torvalds spin_lock_irq(&conf->resync_lock); 5801da177e4SLinus Torvalds 58117999be4SNeilBrown /* Wait until no block IO is waiting */ 58217999be4SNeilBrown wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, 583c3b328acSNeilBrown conf->resync_lock, ); 58417999be4SNeilBrown 58517999be4SNeilBrown /* block any new IO from starting */ 58617999be4SNeilBrown conf->barrier++; 58717999be4SNeilBrown 588046abeedSNeilBrown /* Now wait for all pending IO to complete */ 58917999be4SNeilBrown wait_event_lock_irq(conf->wait_barrier, 59017999be4SNeilBrown !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 591c3b328acSNeilBrown conf->resync_lock, ); 59217999be4SNeilBrown 5931da177e4SLinus Torvalds spin_unlock_irq(&conf->resync_lock); 5941da177e4SLinus Torvalds } 5951da177e4SLinus Torvalds 59617999be4SNeilBrown static void lower_barrier(conf_t *conf) 59717999be4SNeilBrown { 59817999be4SNeilBrown unsigned long flags; 599709ae487SNeilBrown BUG_ON(conf->barrier <= 0); 60017999be4SNeilBrown spin_lock_irqsave(&conf->resync_lock, flags); 60117999be4SNeilBrown conf->barrier--; 60217999be4SNeilBrown spin_unlock_irqrestore(&conf->resync_lock, flags); 60317999be4SNeilBrown wake_up(&conf->wait_barrier); 60417999be4SNeilBrown } 60517999be4SNeilBrown 60617999be4SNeilBrown static void wait_barrier(conf_t *conf) 60717999be4SNeilBrown { 60817999be4SNeilBrown spin_lock_irq(&conf->resync_lock); 60917999be4SNeilBrown if (conf->barrier) { 61017999be4SNeilBrown conf->nr_waiting++; 61117999be4SNeilBrown wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 61217999be4SNeilBrown conf->resync_lock, 613c3b328acSNeilBrown ); 61417999be4SNeilBrown conf->nr_waiting--; 61517999be4SNeilBrown } 61617999be4SNeilBrown conf->nr_pending++; 61717999be4SNeilBrown spin_unlock_irq(&conf->resync_lock); 61817999be4SNeilBrown } 61917999be4SNeilBrown 62017999be4SNeilBrown static void allow_barrier(conf_t *conf) 62117999be4SNeilBrown { 62217999be4SNeilBrown unsigned long flags; 62317999be4SNeilBrown spin_lock_irqsave(&conf->resync_lock, flags); 62417999be4SNeilBrown conf->nr_pending--; 62517999be4SNeilBrown spin_unlock_irqrestore(&conf->resync_lock, flags); 62617999be4SNeilBrown wake_up(&conf->wait_barrier); 62717999be4SNeilBrown } 62817999be4SNeilBrown 629ddaf22abSNeilBrown static void freeze_array(conf_t *conf) 630ddaf22abSNeilBrown { 631ddaf22abSNeilBrown /* stop syncio and normal IO and wait for everything to 632ddaf22abSNeilBrown * go quite. 633ddaf22abSNeilBrown * We increment barrier and nr_waiting, and then 6341c830532SNeilBrown * wait until nr_pending match nr_queued+1 6351c830532SNeilBrown * This is called in the context of one normal IO request 6361c830532SNeilBrown * that has failed. Thus any sync request that might be pending 6371c830532SNeilBrown * will be blocked by nr_pending, and we need to wait for 6381c830532SNeilBrown * pending IO requests to complete or be queued for re-try. 6391c830532SNeilBrown * Thus the number queued (nr_queued) plus this request (1) 6401c830532SNeilBrown * must match the number of pending IOs (nr_pending) before 6411c830532SNeilBrown * we continue. 642ddaf22abSNeilBrown */ 643ddaf22abSNeilBrown spin_lock_irq(&conf->resync_lock); 644ddaf22abSNeilBrown conf->barrier++; 645ddaf22abSNeilBrown conf->nr_waiting++; 646ddaf22abSNeilBrown wait_event_lock_irq(conf->wait_barrier, 6471c830532SNeilBrown conf->nr_pending == conf->nr_queued+1, 648ddaf22abSNeilBrown conf->resync_lock, 649c3b328acSNeilBrown flush_pending_writes(conf)); 650ddaf22abSNeilBrown spin_unlock_irq(&conf->resync_lock); 651ddaf22abSNeilBrown } 652ddaf22abSNeilBrown static void unfreeze_array(conf_t *conf) 653ddaf22abSNeilBrown { 654ddaf22abSNeilBrown /* reverse the effect of the freeze */ 655ddaf22abSNeilBrown spin_lock_irq(&conf->resync_lock); 656ddaf22abSNeilBrown conf->barrier--; 657ddaf22abSNeilBrown conf->nr_waiting--; 658ddaf22abSNeilBrown wake_up(&conf->wait_barrier); 659ddaf22abSNeilBrown spin_unlock_irq(&conf->resync_lock); 660ddaf22abSNeilBrown } 661ddaf22abSNeilBrown 66217999be4SNeilBrown 6634e78064fSNeilBrown /* duplicate the data pages for behind I/O 6644e78064fSNeilBrown */ 665af6d7b76SNeilBrown static void alloc_behind_pages(struct bio *bio, r1bio_t *r1_bio) 6664b6d287fSNeilBrown { 6674b6d287fSNeilBrown int i; 6684b6d287fSNeilBrown struct bio_vec *bvec; 669af6d7b76SNeilBrown struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page*), 6704b6d287fSNeilBrown GFP_NOIO); 6714b6d287fSNeilBrown if (unlikely(!pages)) 672af6d7b76SNeilBrown return; 6734b6d287fSNeilBrown 6744b6d287fSNeilBrown bio_for_each_segment(bvec, bio, i) { 675af6d7b76SNeilBrown pages[i] = alloc_page(GFP_NOIO); 676af6d7b76SNeilBrown if (unlikely(!pages[i])) 6774b6d287fSNeilBrown goto do_sync_io; 678af6d7b76SNeilBrown memcpy(kmap(pages[i]) + bvec->bv_offset, 6794b6d287fSNeilBrown kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len); 680af6d7b76SNeilBrown kunmap(pages[i]); 6814b6d287fSNeilBrown kunmap(bvec->bv_page); 6824b6d287fSNeilBrown } 683af6d7b76SNeilBrown r1_bio->behind_pages = pages; 684af6d7b76SNeilBrown r1_bio->behind_page_count = bio->bi_vcnt; 685af6d7b76SNeilBrown set_bit(R1BIO_BehindIO, &r1_bio->state); 686af6d7b76SNeilBrown return; 6874b6d287fSNeilBrown 6884b6d287fSNeilBrown do_sync_io: 689af6d7b76SNeilBrown for (i = 0; i < bio->bi_vcnt; i++) 690af6d7b76SNeilBrown if (pages[i]) 691af6d7b76SNeilBrown put_page(pages[i]); 6924b6d287fSNeilBrown kfree(pages); 6934b6d287fSNeilBrown PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 6944b6d287fSNeilBrown } 6954b6d287fSNeilBrown 69621a52c6dSNeilBrown static int make_request(mddev_t *mddev, struct bio * bio) 6971da177e4SLinus Torvalds { 698070ec55dSNeilBrown conf_t *conf = mddev->private; 6991da177e4SLinus Torvalds mirror_info_t *mirror; 7001da177e4SLinus Torvalds r1bio_t *r1_bio; 7011da177e4SLinus Torvalds struct bio *read_bio; 702191ea9b2SNeilBrown int i, targets = 0, disks; 70384255d10SNeilBrown struct bitmap *bitmap; 704191ea9b2SNeilBrown unsigned long flags; 705a362357bSJens Axboe const int rw = bio_data_dir(bio); 7062c7d46ecSNeilBrown const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 707e9c7469bSTejun Heo const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); 7086bfe0b49SDan Williams mdk_rdev_t *blocked_rdev; 709c3b328acSNeilBrown int plugged; 710191ea9b2SNeilBrown 7111da177e4SLinus Torvalds /* 7121da177e4SLinus Torvalds * Register the new request and wait if the reconstruction 7131da177e4SLinus Torvalds * thread has put up a bar for new requests. 7141da177e4SLinus Torvalds * Continue immediately if no resync is active currently. 7151da177e4SLinus Torvalds */ 71662de608dSNeilBrown 7173d310eb7SNeilBrown md_write_start(mddev, bio); /* wait on superblock update early */ 7183d310eb7SNeilBrown 7196eef4b21SNeilBrown if (bio_data_dir(bio) == WRITE && 7206eef4b21SNeilBrown bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo && 7216eef4b21SNeilBrown bio->bi_sector < mddev->suspend_hi) { 7226eef4b21SNeilBrown /* As the suspend_* range is controlled by 7236eef4b21SNeilBrown * userspace, we want an interruptible 7246eef4b21SNeilBrown * wait. 7256eef4b21SNeilBrown */ 7266eef4b21SNeilBrown DEFINE_WAIT(w); 7276eef4b21SNeilBrown for (;;) { 7286eef4b21SNeilBrown flush_signals(current); 7296eef4b21SNeilBrown prepare_to_wait(&conf->wait_barrier, 7306eef4b21SNeilBrown &w, TASK_INTERRUPTIBLE); 7316eef4b21SNeilBrown if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo || 7326eef4b21SNeilBrown bio->bi_sector >= mddev->suspend_hi) 7336eef4b21SNeilBrown break; 7346eef4b21SNeilBrown schedule(); 7356eef4b21SNeilBrown } 7366eef4b21SNeilBrown finish_wait(&conf->wait_barrier, &w); 7376eef4b21SNeilBrown } 73862de608dSNeilBrown 73917999be4SNeilBrown wait_barrier(conf); 7401da177e4SLinus Torvalds 74184255d10SNeilBrown bitmap = mddev->bitmap; 74284255d10SNeilBrown 7431da177e4SLinus Torvalds /* 7441da177e4SLinus Torvalds * make_request() can abort the operation when READA is being 7451da177e4SLinus Torvalds * used and no empty request is available. 7461da177e4SLinus Torvalds * 7471da177e4SLinus Torvalds */ 7481da177e4SLinus Torvalds r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 7491da177e4SLinus Torvalds 7501da177e4SLinus Torvalds r1_bio->master_bio = bio; 7511da177e4SLinus Torvalds r1_bio->sectors = bio->bi_size >> 9; 752191ea9b2SNeilBrown r1_bio->state = 0; 7531da177e4SLinus Torvalds r1_bio->mddev = mddev; 7541da177e4SLinus Torvalds r1_bio->sector = bio->bi_sector; 7551da177e4SLinus Torvalds 756a362357bSJens Axboe if (rw == READ) { 7571da177e4SLinus Torvalds /* 7581da177e4SLinus Torvalds * read balancing logic: 7591da177e4SLinus Torvalds */ 7601da177e4SLinus Torvalds int rdisk = read_balance(conf, r1_bio); 7611da177e4SLinus Torvalds 7621da177e4SLinus Torvalds if (rdisk < 0) { 7631da177e4SLinus Torvalds /* couldn't find anywhere to read from */ 7641da177e4SLinus Torvalds raid_end_bio_io(r1_bio); 7651da177e4SLinus Torvalds return 0; 7661da177e4SLinus Torvalds } 7671da177e4SLinus Torvalds mirror = conf->mirrors + rdisk; 7681da177e4SLinus Torvalds 769e555190dSNeilBrown if (test_bit(WriteMostly, &mirror->rdev->flags) && 770e555190dSNeilBrown bitmap) { 771e555190dSNeilBrown /* Reading from a write-mostly device must 772e555190dSNeilBrown * take care not to over-take any writes 773e555190dSNeilBrown * that are 'behind' 774e555190dSNeilBrown */ 775e555190dSNeilBrown wait_event(bitmap->behind_wait, 776e555190dSNeilBrown atomic_read(&bitmap->behind_writes) == 0); 777e555190dSNeilBrown } 7781da177e4SLinus Torvalds r1_bio->read_disk = rdisk; 7791da177e4SLinus Torvalds 780a167f663SNeilBrown read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 7811da177e4SLinus Torvalds 7821da177e4SLinus Torvalds r1_bio->bios[rdisk] = read_bio; 7831da177e4SLinus Torvalds 7841da177e4SLinus Torvalds read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 7851da177e4SLinus Torvalds read_bio->bi_bdev = mirror->rdev->bdev; 7861da177e4SLinus Torvalds read_bio->bi_end_io = raid1_end_read_request; 7877b6d91daSChristoph Hellwig read_bio->bi_rw = READ | do_sync; 7881da177e4SLinus Torvalds read_bio->bi_private = r1_bio; 7891da177e4SLinus Torvalds 7901da177e4SLinus Torvalds generic_make_request(read_bio); 7911da177e4SLinus Torvalds return 0; 7921da177e4SLinus Torvalds } 7931da177e4SLinus Torvalds 7941da177e4SLinus Torvalds /* 7951da177e4SLinus Torvalds * WRITE: 7961da177e4SLinus Torvalds */ 7971da177e4SLinus Torvalds /* first select target devices under spinlock and 7981da177e4SLinus Torvalds * inc refcount on their rdev. Record them by setting 7991da177e4SLinus Torvalds * bios[x] to bio 8001da177e4SLinus Torvalds */ 801c3b328acSNeilBrown plugged = mddev_check_plugged(mddev); 802c3b328acSNeilBrown 8031da177e4SLinus Torvalds disks = conf->raid_disks; 8046bfe0b49SDan Williams retry_write: 8056bfe0b49SDan Williams blocked_rdev = NULL; 8061da177e4SLinus Torvalds rcu_read_lock(); 8071da177e4SLinus Torvalds for (i = 0; i < disks; i++) { 8086bfe0b49SDan Williams mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 8096bfe0b49SDan Williams if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 8106bfe0b49SDan Williams atomic_inc(&rdev->nr_pending); 8116bfe0b49SDan Williams blocked_rdev = rdev; 8126bfe0b49SDan Williams break; 8136bfe0b49SDan Williams } 8146bfe0b49SDan Williams if (rdev && !test_bit(Faulty, &rdev->flags)) { 8151da177e4SLinus Torvalds atomic_inc(&rdev->nr_pending); 816b2d444d7SNeilBrown if (test_bit(Faulty, &rdev->flags)) { 81703c902e1SNeilBrown rdev_dec_pending(rdev, mddev); 8181da177e4SLinus Torvalds r1_bio->bios[i] = NULL; 819964147d5SNeilBrown } else { 8201da177e4SLinus Torvalds r1_bio->bios[i] = bio; 821191ea9b2SNeilBrown targets++; 822964147d5SNeilBrown } 8231da177e4SLinus Torvalds } else 8241da177e4SLinus Torvalds r1_bio->bios[i] = NULL; 8251da177e4SLinus Torvalds } 8261da177e4SLinus Torvalds rcu_read_unlock(); 8271da177e4SLinus Torvalds 8286bfe0b49SDan Williams if (unlikely(blocked_rdev)) { 8296bfe0b49SDan Williams /* Wait for this device to become unblocked */ 8306bfe0b49SDan Williams int j; 8316bfe0b49SDan Williams 8326bfe0b49SDan Williams for (j = 0; j < i; j++) 8336bfe0b49SDan Williams if (r1_bio->bios[j]) 8346bfe0b49SDan Williams rdev_dec_pending(conf->mirrors[j].rdev, mddev); 8356bfe0b49SDan Williams 8366bfe0b49SDan Williams allow_barrier(conf); 8376bfe0b49SDan Williams md_wait_for_blocked_rdev(blocked_rdev, mddev); 8386bfe0b49SDan Williams wait_barrier(conf); 8396bfe0b49SDan Williams goto retry_write; 8406bfe0b49SDan Williams } 8416bfe0b49SDan Williams 8424b6d287fSNeilBrown BUG_ON(targets == 0); /* we never fail the last device */ 8434b6d287fSNeilBrown 844191ea9b2SNeilBrown if (targets < conf->raid_disks) { 845191ea9b2SNeilBrown /* array is degraded, we will not clear the bitmap 846191ea9b2SNeilBrown * on I/O completion (see raid1_end_write_request) */ 847191ea9b2SNeilBrown set_bit(R1BIO_Degraded, &r1_bio->state); 848191ea9b2SNeilBrown } 84906d91a5fSNeilBrown 850e555190dSNeilBrown /* do behind I/O ? 851e555190dSNeilBrown * Not if there are too many, or cannot allocate memory, 852e555190dSNeilBrown * or a reader on WriteMostly is waiting for behind writes 853e555190dSNeilBrown * to flush */ 8544b6d287fSNeilBrown if (bitmap && 85542a04b50SNeilBrown (atomic_read(&bitmap->behind_writes) 85642a04b50SNeilBrown < mddev->bitmap_info.max_write_behind) && 857af6d7b76SNeilBrown !waitqueue_active(&bitmap->behind_wait)) 858af6d7b76SNeilBrown alloc_behind_pages(bio, r1_bio); 8594b6d287fSNeilBrown 8604e78064fSNeilBrown atomic_set(&r1_bio->remaining, 1); 8614b6d287fSNeilBrown atomic_set(&r1_bio->behind_remaining, 0); 862191ea9b2SNeilBrown 8634e78064fSNeilBrown bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors, 8644e78064fSNeilBrown test_bit(R1BIO_BehindIO, &r1_bio->state)); 8651da177e4SLinus Torvalds for (i = 0; i < disks; i++) { 8661da177e4SLinus Torvalds struct bio *mbio; 8671da177e4SLinus Torvalds if (!r1_bio->bios[i]) 8681da177e4SLinus Torvalds continue; 8691da177e4SLinus Torvalds 870a167f663SNeilBrown mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 8711da177e4SLinus Torvalds r1_bio->bios[i] = mbio; 8721da177e4SLinus Torvalds 8731da177e4SLinus Torvalds mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; 8741da177e4SLinus Torvalds mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 8751da177e4SLinus Torvalds mbio->bi_end_io = raid1_end_write_request; 876e9c7469bSTejun Heo mbio->bi_rw = WRITE | do_flush_fua | do_sync; 8771da177e4SLinus Torvalds mbio->bi_private = r1_bio; 8781da177e4SLinus Torvalds 879af6d7b76SNeilBrown if (r1_bio->behind_pages) { 8804b6d287fSNeilBrown struct bio_vec *bvec; 8814b6d287fSNeilBrown int j; 8824b6d287fSNeilBrown 8834b6d287fSNeilBrown /* Yes, I really want the '__' version so that 8844b6d287fSNeilBrown * we clear any unused pointer in the io_vec, rather 8854b6d287fSNeilBrown * than leave them unchanged. This is important 8864b6d287fSNeilBrown * because when we come to free the pages, we won't 887046abeedSNeilBrown * know the original bi_idx, so we just free 8884b6d287fSNeilBrown * them all 8894b6d287fSNeilBrown */ 8904b6d287fSNeilBrown __bio_for_each_segment(bvec, mbio, j, 0) 891af6d7b76SNeilBrown bvec->bv_page = r1_bio->behind_pages[j]; 8924b6d287fSNeilBrown if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) 8934b6d287fSNeilBrown atomic_inc(&r1_bio->behind_remaining); 8944b6d287fSNeilBrown } 8954b6d287fSNeilBrown 8961da177e4SLinus Torvalds atomic_inc(&r1_bio->remaining); 897191ea9b2SNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 8984e78064fSNeilBrown bio_list_add(&conf->pending_bio_list, mbio); 899191ea9b2SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 9004e78064fSNeilBrown } 901af6d7b76SNeilBrown r1_bio_write_done(r1_bio); 902191ea9b2SNeilBrown 903a35e63efSNeilBrown /* In case raid1d snuck in to freeze_array */ 904a35e63efSNeilBrown wake_up(&conf->wait_barrier); 905a35e63efSNeilBrown 906c3b328acSNeilBrown if (do_sync || !bitmap || !plugged) 907e3881a68SLars Ellenberg md_wakeup_thread(mddev->thread); 9081da177e4SLinus Torvalds 9091da177e4SLinus Torvalds return 0; 9101da177e4SLinus Torvalds } 9111da177e4SLinus Torvalds 9121da177e4SLinus Torvalds static void status(struct seq_file *seq, mddev_t *mddev) 9131da177e4SLinus Torvalds { 914070ec55dSNeilBrown conf_t *conf = mddev->private; 9151da177e4SLinus Torvalds int i; 9161da177e4SLinus Torvalds 9171da177e4SLinus Torvalds seq_printf(seq, " [%d/%d] [", conf->raid_disks, 91811ce99e6SNeilBrown conf->raid_disks - mddev->degraded); 919ddac7c7eSNeilBrown rcu_read_lock(); 920ddac7c7eSNeilBrown for (i = 0; i < conf->raid_disks; i++) { 921ddac7c7eSNeilBrown mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 9221da177e4SLinus Torvalds seq_printf(seq, "%s", 923ddac7c7eSNeilBrown rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 924ddac7c7eSNeilBrown } 925ddac7c7eSNeilBrown rcu_read_unlock(); 9261da177e4SLinus Torvalds seq_printf(seq, "]"); 9271da177e4SLinus Torvalds } 9281da177e4SLinus Torvalds 9291da177e4SLinus Torvalds 9301da177e4SLinus Torvalds static void error(mddev_t *mddev, mdk_rdev_t *rdev) 9311da177e4SLinus Torvalds { 9321da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 933070ec55dSNeilBrown conf_t *conf = mddev->private; 9341da177e4SLinus Torvalds 9351da177e4SLinus Torvalds /* 9361da177e4SLinus Torvalds * If it is not operational, then we have already marked it as dead 9371da177e4SLinus Torvalds * else if it is the last working disks, ignore the error, let the 9381da177e4SLinus Torvalds * next level up know. 9391da177e4SLinus Torvalds * else mark the drive as failed 9401da177e4SLinus Torvalds */ 941b2d444d7SNeilBrown if (test_bit(In_sync, &rdev->flags) 9424044ba58SNeilBrown && (conf->raid_disks - mddev->degraded) == 1) { 9431da177e4SLinus Torvalds /* 9441da177e4SLinus Torvalds * Don't fail the drive, act as though we were just a 9454044ba58SNeilBrown * normal single drive. 9464044ba58SNeilBrown * However don't try a recovery from this drive as 9474044ba58SNeilBrown * it is very likely to fail. 9481da177e4SLinus Torvalds */ 9494044ba58SNeilBrown mddev->recovery_disabled = 1; 9501da177e4SLinus Torvalds return; 9514044ba58SNeilBrown } 952c04be0aaSNeilBrown if (test_and_clear_bit(In_sync, &rdev->flags)) { 953c04be0aaSNeilBrown unsigned long flags; 954c04be0aaSNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 9551da177e4SLinus Torvalds mddev->degraded++; 956dd00a99eSNeilBrown set_bit(Faulty, &rdev->flags); 957c04be0aaSNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 9581da177e4SLinus Torvalds /* 9591da177e4SLinus Torvalds * if recovery is running, make sure it aborts. 9601da177e4SLinus Torvalds */ 961dfc70645SNeilBrown set_bit(MD_RECOVERY_INTR, &mddev->recovery); 962dd00a99eSNeilBrown } else 963b2d444d7SNeilBrown set_bit(Faulty, &rdev->flags); 964850b2b42SNeilBrown set_bit(MD_CHANGE_DEVS, &mddev->flags); 965067032bcSJoe Perches printk(KERN_ALERT 966067032bcSJoe Perches "md/raid1:%s: Disk failure on %s, disabling device.\n" 967067032bcSJoe Perches "md/raid1:%s: Operation continuing on %d devices.\n", 9689dd1e2faSNeilBrown mdname(mddev), bdevname(rdev->bdev, b), 9699dd1e2faSNeilBrown mdname(mddev), conf->raid_disks - mddev->degraded); 9701da177e4SLinus Torvalds } 9711da177e4SLinus Torvalds 9721da177e4SLinus Torvalds static void print_conf(conf_t *conf) 9731da177e4SLinus Torvalds { 9741da177e4SLinus Torvalds int i; 9751da177e4SLinus Torvalds 9769dd1e2faSNeilBrown printk(KERN_DEBUG "RAID1 conf printout:\n"); 9771da177e4SLinus Torvalds if (!conf) { 9789dd1e2faSNeilBrown printk(KERN_DEBUG "(!conf)\n"); 9791da177e4SLinus Torvalds return; 9801da177e4SLinus Torvalds } 9819dd1e2faSNeilBrown printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, 9821da177e4SLinus Torvalds conf->raid_disks); 9831da177e4SLinus Torvalds 984ddac7c7eSNeilBrown rcu_read_lock(); 9851da177e4SLinus Torvalds for (i = 0; i < conf->raid_disks; i++) { 9861da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 987ddac7c7eSNeilBrown mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 988ddac7c7eSNeilBrown if (rdev) 9899dd1e2faSNeilBrown printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", 990ddac7c7eSNeilBrown i, !test_bit(In_sync, &rdev->flags), 991ddac7c7eSNeilBrown !test_bit(Faulty, &rdev->flags), 992ddac7c7eSNeilBrown bdevname(rdev->bdev,b)); 9931da177e4SLinus Torvalds } 994ddac7c7eSNeilBrown rcu_read_unlock(); 9951da177e4SLinus Torvalds } 9961da177e4SLinus Torvalds 9971da177e4SLinus Torvalds static void close_sync(conf_t *conf) 9981da177e4SLinus Torvalds { 99917999be4SNeilBrown wait_barrier(conf); 100017999be4SNeilBrown allow_barrier(conf); 10011da177e4SLinus Torvalds 10021da177e4SLinus Torvalds mempool_destroy(conf->r1buf_pool); 10031da177e4SLinus Torvalds conf->r1buf_pool = NULL; 10041da177e4SLinus Torvalds } 10051da177e4SLinus Torvalds 10061da177e4SLinus Torvalds static int raid1_spare_active(mddev_t *mddev) 10071da177e4SLinus Torvalds { 10081da177e4SLinus Torvalds int i; 10091da177e4SLinus Torvalds conf_t *conf = mddev->private; 10106b965620SNeilBrown int count = 0; 10116b965620SNeilBrown unsigned long flags; 10121da177e4SLinus Torvalds 10131da177e4SLinus Torvalds /* 10141da177e4SLinus Torvalds * Find all failed disks within the RAID1 configuration 1015ddac7c7eSNeilBrown * and mark them readable. 1016ddac7c7eSNeilBrown * Called under mddev lock, so rcu protection not needed. 10171da177e4SLinus Torvalds */ 10181da177e4SLinus Torvalds for (i = 0; i < conf->raid_disks; i++) { 1019ddac7c7eSNeilBrown mdk_rdev_t *rdev = conf->mirrors[i].rdev; 1020ddac7c7eSNeilBrown if (rdev 1021ddac7c7eSNeilBrown && !test_bit(Faulty, &rdev->flags) 1022c04be0aaSNeilBrown && !test_and_set_bit(In_sync, &rdev->flags)) { 10236b965620SNeilBrown count++; 1024e6ffbcb6SAdrian Drzewiecki sysfs_notify_dirent(rdev->sysfs_state); 10251da177e4SLinus Torvalds } 10261da177e4SLinus Torvalds } 10276b965620SNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 10286b965620SNeilBrown mddev->degraded -= count; 10296b965620SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 10301da177e4SLinus Torvalds 10311da177e4SLinus Torvalds print_conf(conf); 10326b965620SNeilBrown return count; 10331da177e4SLinus Torvalds } 10341da177e4SLinus Torvalds 10351da177e4SLinus Torvalds 10361da177e4SLinus Torvalds static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 10371da177e4SLinus Torvalds { 10381da177e4SLinus Torvalds conf_t *conf = mddev->private; 1039199050eaSNeil Brown int err = -EEXIST; 104041158c7eSNeilBrown int mirror = 0; 10411da177e4SLinus Torvalds mirror_info_t *p; 10426c2fce2eSNeil Brown int first = 0; 10436c2fce2eSNeil Brown int last = mddev->raid_disks - 1; 10441da177e4SLinus Torvalds 10456c2fce2eSNeil Brown if (rdev->raid_disk >= 0) 10466c2fce2eSNeil Brown first = last = rdev->raid_disk; 10476c2fce2eSNeil Brown 10486c2fce2eSNeil Brown for (mirror = first; mirror <= last; mirror++) 10491da177e4SLinus Torvalds if ( !(p=conf->mirrors+mirror)->rdev) { 10501da177e4SLinus Torvalds 10518f6c2e4bSMartin K. Petersen disk_stack_limits(mddev->gendisk, rdev->bdev, 10528f6c2e4bSMartin K. Petersen rdev->data_offset << 9); 1053627a2d3cSNeilBrown /* as we don't honour merge_bvec_fn, we must 1054627a2d3cSNeilBrown * never risk violating it, so limit 1055627a2d3cSNeilBrown * ->max_segments to one lying with a single 1056627a2d3cSNeilBrown * page, as a one page request is never in 1057627a2d3cSNeilBrown * violation. 10581da177e4SLinus Torvalds */ 1059627a2d3cSNeilBrown if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { 1060627a2d3cSNeilBrown blk_queue_max_segments(mddev->queue, 1); 1061627a2d3cSNeilBrown blk_queue_segment_boundary(mddev->queue, 1062627a2d3cSNeilBrown PAGE_CACHE_SIZE - 1); 1063627a2d3cSNeilBrown } 10641da177e4SLinus Torvalds 10651da177e4SLinus Torvalds p->head_position = 0; 10661da177e4SLinus Torvalds rdev->raid_disk = mirror; 1067199050eaSNeil Brown err = 0; 10686aea114aSNeilBrown /* As all devices are equivalent, we don't need a full recovery 10696aea114aSNeilBrown * if this was recently any drive of the array 10706aea114aSNeilBrown */ 10716aea114aSNeilBrown if (rdev->saved_raid_disk < 0) 107241158c7eSNeilBrown conf->fullsync = 1; 1073d6065f7bSSuzanne Wood rcu_assign_pointer(p->rdev, rdev); 10741da177e4SLinus Torvalds break; 10751da177e4SLinus Torvalds } 1076ac5e7113SAndre Noll md_integrity_add_rdev(rdev, mddev); 10771da177e4SLinus Torvalds print_conf(conf); 1078199050eaSNeil Brown return err; 10791da177e4SLinus Torvalds } 10801da177e4SLinus Torvalds 10811da177e4SLinus Torvalds static int raid1_remove_disk(mddev_t *mddev, int number) 10821da177e4SLinus Torvalds { 10831da177e4SLinus Torvalds conf_t *conf = mddev->private; 10841da177e4SLinus Torvalds int err = 0; 10851da177e4SLinus Torvalds mdk_rdev_t *rdev; 10861da177e4SLinus Torvalds mirror_info_t *p = conf->mirrors+ number; 10871da177e4SLinus Torvalds 10881da177e4SLinus Torvalds print_conf(conf); 10891da177e4SLinus Torvalds rdev = p->rdev; 10901da177e4SLinus Torvalds if (rdev) { 1091b2d444d7SNeilBrown if (test_bit(In_sync, &rdev->flags) || 10921da177e4SLinus Torvalds atomic_read(&rdev->nr_pending)) { 10931da177e4SLinus Torvalds err = -EBUSY; 10941da177e4SLinus Torvalds goto abort; 10951da177e4SLinus Torvalds } 1096046abeedSNeilBrown /* Only remove non-faulty devices if recovery 1097dfc70645SNeilBrown * is not possible. 1098dfc70645SNeilBrown */ 1099dfc70645SNeilBrown if (!test_bit(Faulty, &rdev->flags) && 11008f9e0ee3SNeilBrown !mddev->recovery_disabled && 1101dfc70645SNeilBrown mddev->degraded < conf->raid_disks) { 1102dfc70645SNeilBrown err = -EBUSY; 1103dfc70645SNeilBrown goto abort; 1104dfc70645SNeilBrown } 11051da177e4SLinus Torvalds p->rdev = NULL; 1106fbd568a3SPaul E. McKenney synchronize_rcu(); 11071da177e4SLinus Torvalds if (atomic_read(&rdev->nr_pending)) { 11081da177e4SLinus Torvalds /* lost the race, try later */ 11091da177e4SLinus Torvalds err = -EBUSY; 11101da177e4SLinus Torvalds p->rdev = rdev; 1111ac5e7113SAndre Noll goto abort; 11121da177e4SLinus Torvalds } 1113a91a2785SMartin K. Petersen err = md_integrity_register(mddev); 11141da177e4SLinus Torvalds } 11151da177e4SLinus Torvalds abort: 11161da177e4SLinus Torvalds 11171da177e4SLinus Torvalds print_conf(conf); 11181da177e4SLinus Torvalds return err; 11191da177e4SLinus Torvalds } 11201da177e4SLinus Torvalds 11211da177e4SLinus Torvalds 11226712ecf8SNeilBrown static void end_sync_read(struct bio *bio, int error) 11231da177e4SLinus Torvalds { 11247b92813cSH Hartley Sweeten r1bio_t *r1_bio = bio->bi_private; 1125d11c171eSNeilBrown int i; 11261da177e4SLinus Torvalds 1127d11c171eSNeilBrown for (i=r1_bio->mddev->raid_disks; i--; ) 1128d11c171eSNeilBrown if (r1_bio->bios[i] == bio) 1129d11c171eSNeilBrown break; 1130d11c171eSNeilBrown BUG_ON(i < 0); 1131d11c171eSNeilBrown update_head_pos(i, r1_bio); 11321da177e4SLinus Torvalds /* 11331da177e4SLinus Torvalds * we have read a block, now it needs to be re-written, 11341da177e4SLinus Torvalds * or re-read if the read failed. 11351da177e4SLinus Torvalds * We don't do much here, just schedule handling by raid1d 11361da177e4SLinus Torvalds */ 113769382e85SNeilBrown if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 11381da177e4SLinus Torvalds set_bit(R1BIO_Uptodate, &r1_bio->state); 1139d11c171eSNeilBrown 1140d11c171eSNeilBrown if (atomic_dec_and_test(&r1_bio->remaining)) 11411da177e4SLinus Torvalds reschedule_retry(r1_bio); 11421da177e4SLinus Torvalds } 11431da177e4SLinus Torvalds 11446712ecf8SNeilBrown static void end_sync_write(struct bio *bio, int error) 11451da177e4SLinus Torvalds { 11461da177e4SLinus Torvalds int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 11477b92813cSH Hartley Sweeten r1bio_t *r1_bio = bio->bi_private; 11481da177e4SLinus Torvalds mddev_t *mddev = r1_bio->mddev; 1149070ec55dSNeilBrown conf_t *conf = mddev->private; 11501da177e4SLinus Torvalds int i; 11511da177e4SLinus Torvalds int mirror=0; 11521da177e4SLinus Torvalds 11531da177e4SLinus Torvalds for (i = 0; i < conf->raid_disks; i++) 11541da177e4SLinus Torvalds if (r1_bio->bios[i] == bio) { 11551da177e4SLinus Torvalds mirror = i; 11561da177e4SLinus Torvalds break; 11571da177e4SLinus Torvalds } 11586b1117d5SNeilBrown if (!uptodate) { 115957dab0bdSNeilBrown sector_t sync_blocks = 0; 11606b1117d5SNeilBrown sector_t s = r1_bio->sector; 11616b1117d5SNeilBrown long sectors_to_go = r1_bio->sectors; 11626b1117d5SNeilBrown /* make sure these bits doesn't get cleared. */ 11636b1117d5SNeilBrown do { 11645e3db645SNeilBrown bitmap_end_sync(mddev->bitmap, s, 11656b1117d5SNeilBrown &sync_blocks, 1); 11666b1117d5SNeilBrown s += sync_blocks; 11676b1117d5SNeilBrown sectors_to_go -= sync_blocks; 11686b1117d5SNeilBrown } while (sectors_to_go > 0); 11691da177e4SLinus Torvalds md_error(mddev, conf->mirrors[mirror].rdev); 11706b1117d5SNeilBrown } 1171e3b9703eSNeilBrown 11721da177e4SLinus Torvalds update_head_pos(mirror, r1_bio); 11731da177e4SLinus Torvalds 11741da177e4SLinus Torvalds if (atomic_dec_and_test(&r1_bio->remaining)) { 117573d5c38aSNeilBrown sector_t s = r1_bio->sectors; 11761da177e4SLinus Torvalds put_buf(r1_bio); 117773d5c38aSNeilBrown md_done_sync(mddev, s, uptodate); 11781da177e4SLinus Torvalds } 11791da177e4SLinus Torvalds } 11801da177e4SLinus Torvalds 1181a68e5870SNeilBrown static int fix_sync_read_error(r1bio_t *r1_bio) 11821da177e4SLinus Torvalds { 1183a68e5870SNeilBrown /* Try some synchronous reads of other devices to get 118469382e85SNeilBrown * good data, much like with normal read errors. Only 1185ddac7c7eSNeilBrown * read into the pages we already have so we don't 118669382e85SNeilBrown * need to re-issue the read request. 118769382e85SNeilBrown * We don't need to freeze the array, because being in an 118869382e85SNeilBrown * active sync request, there is no normal IO, and 118969382e85SNeilBrown * no overlapping syncs. 11901da177e4SLinus Torvalds */ 1191a68e5870SNeilBrown mddev_t *mddev = r1_bio->mddev; 1192a68e5870SNeilBrown conf_t *conf = mddev->private; 1193a68e5870SNeilBrown struct bio *bio = r1_bio->bios[r1_bio->read_disk]; 119469382e85SNeilBrown sector_t sect = r1_bio->sector; 119569382e85SNeilBrown int sectors = r1_bio->sectors; 119669382e85SNeilBrown int idx = 0; 119769382e85SNeilBrown 119869382e85SNeilBrown while(sectors) { 119969382e85SNeilBrown int s = sectors; 120069382e85SNeilBrown int d = r1_bio->read_disk; 120169382e85SNeilBrown int success = 0; 120269382e85SNeilBrown mdk_rdev_t *rdev; 120378d7f5f7SNeilBrown int start; 120469382e85SNeilBrown 120569382e85SNeilBrown if (s > (PAGE_SIZE>>9)) 120669382e85SNeilBrown s = PAGE_SIZE >> 9; 120769382e85SNeilBrown do { 120869382e85SNeilBrown if (r1_bio->bios[d]->bi_end_io == end_sync_read) { 1209ddac7c7eSNeilBrown /* No rcu protection needed here devices 1210ddac7c7eSNeilBrown * can only be removed when no resync is 1211ddac7c7eSNeilBrown * active, and resync is currently active 1212ddac7c7eSNeilBrown */ 121369382e85SNeilBrown rdev = conf->mirrors[d].rdev; 12142b193363SNeilBrown if (sync_page_io(rdev, 1215ccebd4c4SJonathan Brassow sect, 121669382e85SNeilBrown s<<9, 121769382e85SNeilBrown bio->bi_io_vec[idx].bv_page, 1218ccebd4c4SJonathan Brassow READ, false)) { 121969382e85SNeilBrown success = 1; 122069382e85SNeilBrown break; 122169382e85SNeilBrown } 122269382e85SNeilBrown } 122369382e85SNeilBrown d++; 122469382e85SNeilBrown if (d == conf->raid_disks) 122569382e85SNeilBrown d = 0; 122669382e85SNeilBrown } while (!success && d != r1_bio->read_disk); 122769382e85SNeilBrown 122878d7f5f7SNeilBrown if (!success) { 122978d7f5f7SNeilBrown char b[BDEVNAME_SIZE]; 123078d7f5f7SNeilBrown /* Cannot read from anywhere, array is toast */ 123178d7f5f7SNeilBrown md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); 123278d7f5f7SNeilBrown printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error" 123378d7f5f7SNeilBrown " for block %llu\n", 123478d7f5f7SNeilBrown mdname(mddev), 123578d7f5f7SNeilBrown bdevname(bio->bi_bdev, b), 123678d7f5f7SNeilBrown (unsigned long long)r1_bio->sector); 123778d7f5f7SNeilBrown md_done_sync(mddev, r1_bio->sectors, 0); 123878d7f5f7SNeilBrown put_buf(r1_bio); 123978d7f5f7SNeilBrown return 0; 124078d7f5f7SNeilBrown } 124178d7f5f7SNeilBrown 124278d7f5f7SNeilBrown start = d; 124369382e85SNeilBrown /* write it back and re-read */ 124469382e85SNeilBrown while (d != r1_bio->read_disk) { 124569382e85SNeilBrown if (d == 0) 124669382e85SNeilBrown d = conf->raid_disks; 124769382e85SNeilBrown d--; 124869382e85SNeilBrown if (r1_bio->bios[d]->bi_end_io != end_sync_read) 124969382e85SNeilBrown continue; 125069382e85SNeilBrown rdev = conf->mirrors[d].rdev; 12512b193363SNeilBrown if (sync_page_io(rdev, 1252ccebd4c4SJonathan Brassow sect, 125369382e85SNeilBrown s<<9, 125469382e85SNeilBrown bio->bi_io_vec[idx].bv_page, 125578d7f5f7SNeilBrown WRITE, false) == 0) { 125678d7f5f7SNeilBrown r1_bio->bios[d]->bi_end_io = NULL; 125778d7f5f7SNeilBrown rdev_dec_pending(rdev, mddev); 1258097426f6SNeilBrown md_error(mddev, rdev); 125978d7f5f7SNeilBrown } else 126078d7f5f7SNeilBrown atomic_add(s, &rdev->corrected_errors); 1261097426f6SNeilBrown } 1262097426f6SNeilBrown d = start; 1263097426f6SNeilBrown while (d != r1_bio->read_disk) { 1264097426f6SNeilBrown if (d == 0) 1265097426f6SNeilBrown d = conf->raid_disks; 1266097426f6SNeilBrown d--; 1267097426f6SNeilBrown if (r1_bio->bios[d]->bi_end_io != end_sync_read) 1268097426f6SNeilBrown continue; 1269097426f6SNeilBrown rdev = conf->mirrors[d].rdev; 12702b193363SNeilBrown if (sync_page_io(rdev, 1271ccebd4c4SJonathan Brassow sect, 127269382e85SNeilBrown s<<9, 127369382e85SNeilBrown bio->bi_io_vec[idx].bv_page, 1274ccebd4c4SJonathan Brassow READ, false) == 0) 127569382e85SNeilBrown md_error(mddev, rdev); 127669382e85SNeilBrown } 127769382e85SNeilBrown sectors -= s; 127869382e85SNeilBrown sect += s; 127969382e85SNeilBrown idx ++; 128069382e85SNeilBrown } 128178d7f5f7SNeilBrown set_bit(R1BIO_Uptodate, &r1_bio->state); 12827ca78d57SNeilBrown set_bit(BIO_UPTODATE, &bio->bi_flags); 1283a68e5870SNeilBrown return 1; 128469382e85SNeilBrown } 1285d11c171eSNeilBrown 1286a68e5870SNeilBrown static int process_checks(r1bio_t *r1_bio) 1287a68e5870SNeilBrown { 1288a68e5870SNeilBrown /* We have read all readable devices. If we haven't 1289a68e5870SNeilBrown * got the block, then there is no hope left. 1290a68e5870SNeilBrown * If we have, then we want to do a comparison 1291a68e5870SNeilBrown * and skip the write if everything is the same. 1292a68e5870SNeilBrown * If any blocks failed to read, then we need to 1293a68e5870SNeilBrown * attempt an over-write 1294a68e5870SNeilBrown */ 1295a68e5870SNeilBrown mddev_t *mddev = r1_bio->mddev; 1296a68e5870SNeilBrown conf_t *conf = mddev->private; 1297a68e5870SNeilBrown int primary; 1298a68e5870SNeilBrown int i; 1299a68e5870SNeilBrown 130078d7f5f7SNeilBrown for (primary = 0; primary < conf->raid_disks; primary++) 1301a68e5870SNeilBrown if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 1302a68e5870SNeilBrown test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) { 1303a68e5870SNeilBrown r1_bio->bios[primary]->bi_end_io = NULL; 1304a68e5870SNeilBrown rdev_dec_pending(conf->mirrors[primary].rdev, mddev); 1305a68e5870SNeilBrown break; 1306a68e5870SNeilBrown } 1307a68e5870SNeilBrown r1_bio->read_disk = primary; 130878d7f5f7SNeilBrown for (i = 0; i < conf->raid_disks; i++) { 1309a68e5870SNeilBrown int j; 1310a68e5870SNeilBrown int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9); 1311a68e5870SNeilBrown struct bio *pbio = r1_bio->bios[primary]; 1312a68e5870SNeilBrown struct bio *sbio = r1_bio->bios[i]; 131378d7f5f7SNeilBrown int size; 131478d7f5f7SNeilBrown 131578d7f5f7SNeilBrown if (r1_bio->bios[i]->bi_end_io != end_sync_read) 131678d7f5f7SNeilBrown continue; 1317a68e5870SNeilBrown 1318a68e5870SNeilBrown if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) { 1319a68e5870SNeilBrown for (j = vcnt; j-- ; ) { 1320a68e5870SNeilBrown struct page *p, *s; 1321a68e5870SNeilBrown p = pbio->bi_io_vec[j].bv_page; 1322a68e5870SNeilBrown s = sbio->bi_io_vec[j].bv_page; 1323a68e5870SNeilBrown if (memcmp(page_address(p), 1324a68e5870SNeilBrown page_address(s), 1325a68e5870SNeilBrown PAGE_SIZE)) 1326a68e5870SNeilBrown break; 1327a68e5870SNeilBrown } 1328a68e5870SNeilBrown } else 1329a68e5870SNeilBrown j = 0; 1330a68e5870SNeilBrown if (j >= 0) 1331a68e5870SNeilBrown mddev->resync_mismatches += r1_bio->sectors; 1332a68e5870SNeilBrown if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) 1333a68e5870SNeilBrown && test_bit(BIO_UPTODATE, &sbio->bi_flags))) { 133478d7f5f7SNeilBrown /* No need to write to this device. */ 1335a68e5870SNeilBrown sbio->bi_end_io = NULL; 1336a68e5870SNeilBrown rdev_dec_pending(conf->mirrors[i].rdev, mddev); 133778d7f5f7SNeilBrown continue; 133878d7f5f7SNeilBrown } 1339a68e5870SNeilBrown /* fixup the bio for reuse */ 1340a68e5870SNeilBrown sbio->bi_vcnt = vcnt; 1341a68e5870SNeilBrown sbio->bi_size = r1_bio->sectors << 9; 1342a68e5870SNeilBrown sbio->bi_idx = 0; 1343a68e5870SNeilBrown sbio->bi_phys_segments = 0; 1344a68e5870SNeilBrown sbio->bi_flags &= ~(BIO_POOL_MASK - 1); 1345a68e5870SNeilBrown sbio->bi_flags |= 1 << BIO_UPTODATE; 1346a68e5870SNeilBrown sbio->bi_next = NULL; 1347a68e5870SNeilBrown sbio->bi_sector = r1_bio->sector + 1348a68e5870SNeilBrown conf->mirrors[i].rdev->data_offset; 1349a68e5870SNeilBrown sbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1350a68e5870SNeilBrown size = sbio->bi_size; 1351a68e5870SNeilBrown for (j = 0; j < vcnt ; j++) { 1352a68e5870SNeilBrown struct bio_vec *bi; 1353a68e5870SNeilBrown bi = &sbio->bi_io_vec[j]; 1354a68e5870SNeilBrown bi->bv_offset = 0; 1355a68e5870SNeilBrown if (size > PAGE_SIZE) 1356a68e5870SNeilBrown bi->bv_len = PAGE_SIZE; 1357a68e5870SNeilBrown else 1358a68e5870SNeilBrown bi->bv_len = size; 1359a68e5870SNeilBrown size -= PAGE_SIZE; 1360a68e5870SNeilBrown memcpy(page_address(bi->bv_page), 1361a68e5870SNeilBrown page_address(pbio->bi_io_vec[j].bv_page), 1362a68e5870SNeilBrown PAGE_SIZE); 1363a68e5870SNeilBrown } 1364a68e5870SNeilBrown } 1365a68e5870SNeilBrown return 0; 1366a68e5870SNeilBrown } 1367a68e5870SNeilBrown 1368a68e5870SNeilBrown static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) 1369a68e5870SNeilBrown { 1370a68e5870SNeilBrown conf_t *conf = mddev->private; 1371a68e5870SNeilBrown int i; 1372a68e5870SNeilBrown int disks = conf->raid_disks; 1373a68e5870SNeilBrown struct bio *bio, *wbio; 1374a68e5870SNeilBrown 1375a68e5870SNeilBrown bio = r1_bio->bios[r1_bio->read_disk]; 1376a68e5870SNeilBrown 1377a68e5870SNeilBrown if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) 1378a68e5870SNeilBrown /* ouch - failed to read all of that. */ 1379a68e5870SNeilBrown if (!fix_sync_read_error(r1_bio)) 1380a68e5870SNeilBrown return; 13817ca78d57SNeilBrown 13827ca78d57SNeilBrown if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 13837ca78d57SNeilBrown if (process_checks(r1_bio) < 0) 13847ca78d57SNeilBrown return; 1385d11c171eSNeilBrown /* 1386d11c171eSNeilBrown * schedule writes 1387d11c171eSNeilBrown */ 13881da177e4SLinus Torvalds atomic_set(&r1_bio->remaining, 1); 13891da177e4SLinus Torvalds for (i = 0; i < disks ; i++) { 13901da177e4SLinus Torvalds wbio = r1_bio->bios[i]; 13913e198f78SNeilBrown if (wbio->bi_end_io == NULL || 13923e198f78SNeilBrown (wbio->bi_end_io == end_sync_read && 13933e198f78SNeilBrown (i == r1_bio->read_disk || 13943e198f78SNeilBrown !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) 13951da177e4SLinus Torvalds continue; 13961da177e4SLinus Torvalds 13973e198f78SNeilBrown wbio->bi_rw = WRITE; 13983e198f78SNeilBrown wbio->bi_end_io = end_sync_write; 13991da177e4SLinus Torvalds atomic_inc(&r1_bio->remaining); 14001da177e4SLinus Torvalds md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9); 1401191ea9b2SNeilBrown 14021da177e4SLinus Torvalds generic_make_request(wbio); 14031da177e4SLinus Torvalds } 14041da177e4SLinus Torvalds 14051da177e4SLinus Torvalds if (atomic_dec_and_test(&r1_bio->remaining)) { 1406191ea9b2SNeilBrown /* if we're here, all write(s) have completed, so clean up */ 14071da177e4SLinus Torvalds md_done_sync(mddev, r1_bio->sectors, 1); 14081da177e4SLinus Torvalds put_buf(r1_bio); 14091da177e4SLinus Torvalds } 14101da177e4SLinus Torvalds } 14111da177e4SLinus Torvalds 14121da177e4SLinus Torvalds /* 14131da177e4SLinus Torvalds * This is a kernel thread which: 14141da177e4SLinus Torvalds * 14151da177e4SLinus Torvalds * 1. Retries failed read operations on working mirrors. 14161da177e4SLinus Torvalds * 2. Updates the raid superblock when problems encounter. 14171da177e4SLinus Torvalds * 3. Performs writes following reads for array syncronising. 14181da177e4SLinus Torvalds */ 14191da177e4SLinus Torvalds 1420867868fbSNeilBrown static void fix_read_error(conf_t *conf, int read_disk, 1421867868fbSNeilBrown sector_t sect, int sectors) 1422867868fbSNeilBrown { 1423867868fbSNeilBrown mddev_t *mddev = conf->mddev; 1424867868fbSNeilBrown while(sectors) { 1425867868fbSNeilBrown int s = sectors; 1426867868fbSNeilBrown int d = read_disk; 1427867868fbSNeilBrown int success = 0; 1428867868fbSNeilBrown int start; 1429867868fbSNeilBrown mdk_rdev_t *rdev; 1430867868fbSNeilBrown 1431867868fbSNeilBrown if (s > (PAGE_SIZE>>9)) 1432867868fbSNeilBrown s = PAGE_SIZE >> 9; 1433867868fbSNeilBrown 1434867868fbSNeilBrown do { 1435867868fbSNeilBrown /* Note: no rcu protection needed here 1436867868fbSNeilBrown * as this is synchronous in the raid1d thread 1437867868fbSNeilBrown * which is the thread that might remove 1438867868fbSNeilBrown * a device. If raid1d ever becomes multi-threaded.... 1439867868fbSNeilBrown */ 1440867868fbSNeilBrown rdev = conf->mirrors[d].rdev; 1441867868fbSNeilBrown if (rdev && 1442867868fbSNeilBrown test_bit(In_sync, &rdev->flags) && 1443ccebd4c4SJonathan Brassow sync_page_io(rdev, sect, s<<9, 1444ccebd4c4SJonathan Brassow conf->tmppage, READ, false)) 1445867868fbSNeilBrown success = 1; 1446867868fbSNeilBrown else { 1447867868fbSNeilBrown d++; 1448867868fbSNeilBrown if (d == conf->raid_disks) 1449867868fbSNeilBrown d = 0; 1450867868fbSNeilBrown } 1451867868fbSNeilBrown } while (!success && d != read_disk); 1452867868fbSNeilBrown 1453867868fbSNeilBrown if (!success) { 1454867868fbSNeilBrown /* Cannot read from anywhere -- bye bye array */ 1455867868fbSNeilBrown md_error(mddev, conf->mirrors[read_disk].rdev); 1456867868fbSNeilBrown break; 1457867868fbSNeilBrown } 1458867868fbSNeilBrown /* write it back and re-read */ 1459867868fbSNeilBrown start = d; 1460867868fbSNeilBrown while (d != read_disk) { 1461867868fbSNeilBrown if (d==0) 1462867868fbSNeilBrown d = conf->raid_disks; 1463867868fbSNeilBrown d--; 1464867868fbSNeilBrown rdev = conf->mirrors[d].rdev; 1465867868fbSNeilBrown if (rdev && 1466867868fbSNeilBrown test_bit(In_sync, &rdev->flags)) { 1467ccebd4c4SJonathan Brassow if (sync_page_io(rdev, sect, s<<9, 1468ccebd4c4SJonathan Brassow conf->tmppage, WRITE, false) 1469867868fbSNeilBrown == 0) 1470867868fbSNeilBrown /* Well, this device is dead */ 1471867868fbSNeilBrown md_error(mddev, rdev); 1472867868fbSNeilBrown } 1473867868fbSNeilBrown } 1474867868fbSNeilBrown d = start; 1475867868fbSNeilBrown while (d != read_disk) { 1476867868fbSNeilBrown char b[BDEVNAME_SIZE]; 1477867868fbSNeilBrown if (d==0) 1478867868fbSNeilBrown d = conf->raid_disks; 1479867868fbSNeilBrown d--; 1480867868fbSNeilBrown rdev = conf->mirrors[d].rdev; 1481867868fbSNeilBrown if (rdev && 1482867868fbSNeilBrown test_bit(In_sync, &rdev->flags)) { 1483ccebd4c4SJonathan Brassow if (sync_page_io(rdev, sect, s<<9, 1484ccebd4c4SJonathan Brassow conf->tmppage, READ, false) 1485867868fbSNeilBrown == 0) 1486867868fbSNeilBrown /* Well, this device is dead */ 1487867868fbSNeilBrown md_error(mddev, rdev); 1488867868fbSNeilBrown else { 1489867868fbSNeilBrown atomic_add(s, &rdev->corrected_errors); 1490867868fbSNeilBrown printk(KERN_INFO 14919dd1e2faSNeilBrown "md/raid1:%s: read error corrected " 1492867868fbSNeilBrown "(%d sectors at %llu on %s)\n", 1493867868fbSNeilBrown mdname(mddev), s, 1494969b755aSRandy Dunlap (unsigned long long)(sect + 1495969b755aSRandy Dunlap rdev->data_offset), 1496867868fbSNeilBrown bdevname(rdev->bdev, b)); 1497867868fbSNeilBrown } 1498867868fbSNeilBrown } 1499867868fbSNeilBrown } 1500867868fbSNeilBrown sectors -= s; 1501867868fbSNeilBrown sect += s; 1502867868fbSNeilBrown } 1503867868fbSNeilBrown } 1504867868fbSNeilBrown 15051da177e4SLinus Torvalds static void raid1d(mddev_t *mddev) 15061da177e4SLinus Torvalds { 15071da177e4SLinus Torvalds r1bio_t *r1_bio; 15081da177e4SLinus Torvalds struct bio *bio; 15091da177e4SLinus Torvalds unsigned long flags; 1510070ec55dSNeilBrown conf_t *conf = mddev->private; 15111da177e4SLinus Torvalds struct list_head *head = &conf->retry_list; 15121da177e4SLinus Torvalds mdk_rdev_t *rdev; 1513e1dfa0a2SNeilBrown struct blk_plug plug; 15141da177e4SLinus Torvalds 15151da177e4SLinus Torvalds md_check_recovery(mddev); 15161da177e4SLinus Torvalds 1517e1dfa0a2SNeilBrown blk_start_plug(&plug); 15181da177e4SLinus Torvalds for (;;) { 15191da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 1520a35e63efSNeilBrown 1521c3b328acSNeilBrown if (atomic_read(&mddev->plug_cnt) == 0) 15227eaceaccSJens Axboe flush_pending_writes(conf); 1523a35e63efSNeilBrown 15241da177e4SLinus Torvalds spin_lock_irqsave(&conf->device_lock, flags); 1525a35e63efSNeilBrown if (list_empty(head)) { 1526191ea9b2SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 15271da177e4SLinus Torvalds break; 1528a35e63efSNeilBrown } 15291da177e4SLinus Torvalds r1_bio = list_entry(head->prev, r1bio_t, retry_list); 15301da177e4SLinus Torvalds list_del(head->prev); 1531ddaf22abSNeilBrown conf->nr_queued--; 15321da177e4SLinus Torvalds spin_unlock_irqrestore(&conf->device_lock, flags); 15331da177e4SLinus Torvalds 15341da177e4SLinus Torvalds mddev = r1_bio->mddev; 1535070ec55dSNeilBrown conf = mddev->private; 15367eaceaccSJens Axboe if (test_bit(R1BIO_IsSync, &r1_bio->state)) 15371da177e4SLinus Torvalds sync_request_write(mddev, r1_bio); 15387eaceaccSJens Axboe else { 15391da177e4SLinus Torvalds int disk; 1540ddaf22abSNeilBrown 1541ddaf22abSNeilBrown /* we got a read error. Maybe the drive is bad. Maybe just 1542ddaf22abSNeilBrown * the block and we can fix it. 1543ddaf22abSNeilBrown * We freeze all other IO, and try reading the block from 1544ddaf22abSNeilBrown * other devices. When we find one, we re-write 1545ddaf22abSNeilBrown * and check it that fixes the read error. 1546ddaf22abSNeilBrown * This is all done synchronously while the array is 1547ddaf22abSNeilBrown * frozen 1548ddaf22abSNeilBrown */ 1549867868fbSNeilBrown if (mddev->ro == 0) { 1550ddaf22abSNeilBrown freeze_array(conf); 1551867868fbSNeilBrown fix_read_error(conf, r1_bio->read_disk, 1552867868fbSNeilBrown r1_bio->sector, 1553867868fbSNeilBrown r1_bio->sectors); 1554ddaf22abSNeilBrown unfreeze_array(conf); 1555d0e26078SNeilBrown } else 1556d0e26078SNeilBrown md_error(mddev, 1557d0e26078SNeilBrown conf->mirrors[r1_bio->read_disk].rdev); 1558ddaf22abSNeilBrown 15591da177e4SLinus Torvalds bio = r1_bio->bios[r1_bio->read_disk]; 1560d0e26078SNeilBrown if ((disk=read_balance(conf, r1_bio)) == -1) { 15619dd1e2faSNeilBrown printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O" 15621da177e4SLinus Torvalds " read error for block %llu\n", 15639dd1e2faSNeilBrown mdname(mddev), 15641da177e4SLinus Torvalds bdevname(bio->bi_bdev,b), 15651da177e4SLinus Torvalds (unsigned long long)r1_bio->sector); 15661da177e4SLinus Torvalds raid_end_bio_io(r1_bio); 15671da177e4SLinus Torvalds } else { 15682c7d46ecSNeilBrown const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC; 1569cf30a473SNeilBrown r1_bio->bios[r1_bio->read_disk] = 1570cf30a473SNeilBrown mddev->ro ? IO_BLOCKED : NULL; 15711da177e4SLinus Torvalds r1_bio->read_disk = disk; 15721da177e4SLinus Torvalds bio_put(bio); 1573a167f663SNeilBrown bio = bio_clone_mddev(r1_bio->master_bio, 1574a167f663SNeilBrown GFP_NOIO, mddev); 15751da177e4SLinus Torvalds r1_bio->bios[r1_bio->read_disk] = bio; 15761da177e4SLinus Torvalds rdev = conf->mirrors[disk].rdev; 15771da177e4SLinus Torvalds if (printk_ratelimit()) 15789dd1e2faSNeilBrown printk(KERN_ERR "md/raid1:%s: redirecting sector %llu to" 1579d754c5aeSNeilBrown " other mirror: %s\n", 15809dd1e2faSNeilBrown mdname(mddev), 1581d754c5aeSNeilBrown (unsigned long long)r1_bio->sector, 1582d754c5aeSNeilBrown bdevname(rdev->bdev,b)); 15831da177e4SLinus Torvalds bio->bi_sector = r1_bio->sector + rdev->data_offset; 15841da177e4SLinus Torvalds bio->bi_bdev = rdev->bdev; 15851da177e4SLinus Torvalds bio->bi_end_io = raid1_end_read_request; 15867b6d91daSChristoph Hellwig bio->bi_rw = READ | do_sync; 15871da177e4SLinus Torvalds bio->bi_private = r1_bio; 15881da177e4SLinus Torvalds generic_make_request(bio); 15891da177e4SLinus Torvalds } 15901da177e4SLinus Torvalds } 15911d9d5241SNeilBrown cond_resched(); 15921da177e4SLinus Torvalds } 1593e1dfa0a2SNeilBrown blk_finish_plug(&plug); 15941da177e4SLinus Torvalds } 15951da177e4SLinus Torvalds 15961da177e4SLinus Torvalds 15971da177e4SLinus Torvalds static int init_resync(conf_t *conf) 15981da177e4SLinus Torvalds { 15991da177e4SLinus Torvalds int buffs; 16001da177e4SLinus Torvalds 16011da177e4SLinus Torvalds buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 16029e77c485SEric Sesterhenn BUG_ON(conf->r1buf_pool); 16031da177e4SLinus Torvalds conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free, 16041da177e4SLinus Torvalds conf->poolinfo); 16051da177e4SLinus Torvalds if (!conf->r1buf_pool) 16061da177e4SLinus Torvalds return -ENOMEM; 16071da177e4SLinus Torvalds conf->next_resync = 0; 16081da177e4SLinus Torvalds return 0; 16091da177e4SLinus Torvalds } 16101da177e4SLinus Torvalds 16111da177e4SLinus Torvalds /* 16121da177e4SLinus Torvalds * perform a "sync" on one "block" 16131da177e4SLinus Torvalds * 16141da177e4SLinus Torvalds * We need to make sure that no normal I/O request - particularly write 16151da177e4SLinus Torvalds * requests - conflict with active sync requests. 16161da177e4SLinus Torvalds * 16171da177e4SLinus Torvalds * This is achieved by tracking pending requests and a 'barrier' concept 16181da177e4SLinus Torvalds * that can be installed to exclude normal IO requests. 16191da177e4SLinus Torvalds */ 16201da177e4SLinus Torvalds 162157afd89fSNeilBrown static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 16221da177e4SLinus Torvalds { 1623070ec55dSNeilBrown conf_t *conf = mddev->private; 16241da177e4SLinus Torvalds r1bio_t *r1_bio; 16251da177e4SLinus Torvalds struct bio *bio; 16261da177e4SLinus Torvalds sector_t max_sector, nr_sectors; 16273e198f78SNeilBrown int disk = -1; 16281da177e4SLinus Torvalds int i; 16293e198f78SNeilBrown int wonly = -1; 16303e198f78SNeilBrown int write_targets = 0, read_targets = 0; 163157dab0bdSNeilBrown sector_t sync_blocks; 1632e3b9703eSNeilBrown int still_degraded = 0; 16331da177e4SLinus Torvalds 16341da177e4SLinus Torvalds if (!conf->r1buf_pool) 16351da177e4SLinus Torvalds if (init_resync(conf)) 163657afd89fSNeilBrown return 0; 16371da177e4SLinus Torvalds 163858c0fed4SAndre Noll max_sector = mddev->dev_sectors; 16391da177e4SLinus Torvalds if (sector_nr >= max_sector) { 1640191ea9b2SNeilBrown /* If we aborted, we need to abort the 1641191ea9b2SNeilBrown * sync on the 'current' bitmap chunk (there will 1642191ea9b2SNeilBrown * only be one in raid1 resync. 1643191ea9b2SNeilBrown * We can find the current addess in mddev->curr_resync 1644191ea9b2SNeilBrown */ 16456a806c51SNeilBrown if (mddev->curr_resync < max_sector) /* aborted */ 16466a806c51SNeilBrown bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 1647191ea9b2SNeilBrown &sync_blocks, 1); 16486a806c51SNeilBrown else /* completed sync */ 1649191ea9b2SNeilBrown conf->fullsync = 0; 16506a806c51SNeilBrown 16516a806c51SNeilBrown bitmap_close_sync(mddev->bitmap); 16521da177e4SLinus Torvalds close_sync(conf); 16531da177e4SLinus Torvalds return 0; 16541da177e4SLinus Torvalds } 16551da177e4SLinus Torvalds 165607d84d10SNeilBrown if (mddev->bitmap == NULL && 165707d84d10SNeilBrown mddev->recovery_cp == MaxSector && 16586394cca5SNeilBrown !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 165907d84d10SNeilBrown conf->fullsync == 0) { 166007d84d10SNeilBrown *skipped = 1; 166107d84d10SNeilBrown return max_sector - sector_nr; 166207d84d10SNeilBrown } 16636394cca5SNeilBrown /* before building a request, check if we can skip these blocks.. 16646394cca5SNeilBrown * This call the bitmap_start_sync doesn't actually record anything 16656394cca5SNeilBrown */ 1666e3b9703eSNeilBrown if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 1667e5de485fSNeilBrown !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 1668191ea9b2SNeilBrown /* We can skip this block, and probably several more */ 1669191ea9b2SNeilBrown *skipped = 1; 1670191ea9b2SNeilBrown return sync_blocks; 1671191ea9b2SNeilBrown } 16721da177e4SLinus Torvalds /* 167317999be4SNeilBrown * If there is non-resync activity waiting for a turn, 167417999be4SNeilBrown * and resync is going fast enough, 167517999be4SNeilBrown * then let it though before starting on this new sync request. 16761da177e4SLinus Torvalds */ 167717999be4SNeilBrown if (!go_faster && conf->nr_waiting) 16781da177e4SLinus Torvalds msleep_interruptible(1000); 167917999be4SNeilBrown 1680b47490c9SNeilBrown bitmap_cond_end_sync(mddev->bitmap, sector_nr); 16811c4588e9SNeilBrown r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); 168217999be4SNeilBrown raise_barrier(conf); 168317999be4SNeilBrown 168417999be4SNeilBrown conf->next_resync = sector_nr; 16851da177e4SLinus Torvalds 16863e198f78SNeilBrown rcu_read_lock(); 16873e198f78SNeilBrown /* 16883e198f78SNeilBrown * If we get a correctably read error during resync or recovery, 16893e198f78SNeilBrown * we might want to read from a different device. So we 16903e198f78SNeilBrown * flag all drives that could conceivably be read from for READ, 16913e198f78SNeilBrown * and any others (which will be non-In_sync devices) for WRITE. 16923e198f78SNeilBrown * If a read fails, we try reading from something else for which READ 16933e198f78SNeilBrown * is OK. 16943e198f78SNeilBrown */ 16951da177e4SLinus Torvalds 16961da177e4SLinus Torvalds r1_bio->mddev = mddev; 16971da177e4SLinus Torvalds r1_bio->sector = sector_nr; 1698191ea9b2SNeilBrown r1_bio->state = 0; 16991da177e4SLinus Torvalds set_bit(R1BIO_IsSync, &r1_bio->state); 17001da177e4SLinus Torvalds 17011da177e4SLinus Torvalds for (i=0; i < conf->raid_disks; i++) { 17023e198f78SNeilBrown mdk_rdev_t *rdev; 17031da177e4SLinus Torvalds bio = r1_bio->bios[i]; 17041da177e4SLinus Torvalds 17051da177e4SLinus Torvalds /* take from bio_init */ 17061da177e4SLinus Torvalds bio->bi_next = NULL; 1707db8d9d35SNeilBrown bio->bi_flags &= ~(BIO_POOL_MASK-1); 17081da177e4SLinus Torvalds bio->bi_flags |= 1 << BIO_UPTODATE; 1709db8d9d35SNeilBrown bio->bi_comp_cpu = -1; 1710802ba064SNeilBrown bio->bi_rw = READ; 17111da177e4SLinus Torvalds bio->bi_vcnt = 0; 17121da177e4SLinus Torvalds bio->bi_idx = 0; 17131da177e4SLinus Torvalds bio->bi_phys_segments = 0; 17141da177e4SLinus Torvalds bio->bi_size = 0; 17151da177e4SLinus Torvalds bio->bi_end_io = NULL; 17161da177e4SLinus Torvalds bio->bi_private = NULL; 17171da177e4SLinus Torvalds 17183e198f78SNeilBrown rdev = rcu_dereference(conf->mirrors[i].rdev); 17193e198f78SNeilBrown if (rdev == NULL || 17203e198f78SNeilBrown test_bit(Faulty, &rdev->flags)) { 1721e3b9703eSNeilBrown still_degraded = 1; 1722e3b9703eSNeilBrown continue; 17233e198f78SNeilBrown } else if (!test_bit(In_sync, &rdev->flags)) { 17241da177e4SLinus Torvalds bio->bi_rw = WRITE; 17251da177e4SLinus Torvalds bio->bi_end_io = end_sync_write; 17261da177e4SLinus Torvalds write_targets ++; 17273e198f78SNeilBrown } else { 17283e198f78SNeilBrown /* may need to read from here */ 17293e198f78SNeilBrown bio->bi_rw = READ; 17303e198f78SNeilBrown bio->bi_end_io = end_sync_read; 17313e198f78SNeilBrown if (test_bit(WriteMostly, &rdev->flags)) { 17323e198f78SNeilBrown if (wonly < 0) 17333e198f78SNeilBrown wonly = i; 17343e198f78SNeilBrown } else { 17353e198f78SNeilBrown if (disk < 0) 17363e198f78SNeilBrown disk = i; 17373e198f78SNeilBrown } 17383e198f78SNeilBrown read_targets++; 17393e198f78SNeilBrown } 17403e198f78SNeilBrown atomic_inc(&rdev->nr_pending); 17413e198f78SNeilBrown bio->bi_sector = sector_nr + rdev->data_offset; 17423e198f78SNeilBrown bio->bi_bdev = rdev->bdev; 17431da177e4SLinus Torvalds bio->bi_private = r1_bio; 17441da177e4SLinus Torvalds } 17453e198f78SNeilBrown rcu_read_unlock(); 17463e198f78SNeilBrown if (disk < 0) 17473e198f78SNeilBrown disk = wonly; 17483e198f78SNeilBrown r1_bio->read_disk = disk; 1749191ea9b2SNeilBrown 17503e198f78SNeilBrown if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) 17513e198f78SNeilBrown /* extra read targets are also write targets */ 17523e198f78SNeilBrown write_targets += read_targets-1; 17533e198f78SNeilBrown 17543e198f78SNeilBrown if (write_targets == 0 || read_targets == 0) { 17551da177e4SLinus Torvalds /* There is nowhere to write, so all non-sync 17561da177e4SLinus Torvalds * drives must be failed - so we are finished 17571da177e4SLinus Torvalds */ 175857afd89fSNeilBrown sector_t rv = max_sector - sector_nr; 175957afd89fSNeilBrown *skipped = 1; 17601da177e4SLinus Torvalds put_buf(r1_bio); 17611da177e4SLinus Torvalds return rv; 17621da177e4SLinus Torvalds } 17631da177e4SLinus Torvalds 1764c6207277SNeilBrown if (max_sector > mddev->resync_max) 1765c6207277SNeilBrown max_sector = mddev->resync_max; /* Don't do IO beyond here */ 17661da177e4SLinus Torvalds nr_sectors = 0; 1767289e99e8SNeilBrown sync_blocks = 0; 17681da177e4SLinus Torvalds do { 17691da177e4SLinus Torvalds struct page *page; 17701da177e4SLinus Torvalds int len = PAGE_SIZE; 17711da177e4SLinus Torvalds if (sector_nr + (len>>9) > max_sector) 17721da177e4SLinus Torvalds len = (max_sector - sector_nr) << 9; 17731da177e4SLinus Torvalds if (len == 0) 17741da177e4SLinus Torvalds break; 1775ab7a30c7SNeilBrown if (sync_blocks == 0) { 17766a806c51SNeilBrown if (!bitmap_start_sync(mddev->bitmap, sector_nr, 1777e3b9703eSNeilBrown &sync_blocks, still_degraded) && 1778e5de485fSNeilBrown !conf->fullsync && 1779e5de485fSNeilBrown !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 1780191ea9b2SNeilBrown break; 17819e77c485SEric Sesterhenn BUG_ON(sync_blocks < (PAGE_SIZE>>9)); 17827571ae88SNeilBrown if ((len >> 9) > sync_blocks) 17836a806c51SNeilBrown len = sync_blocks<<9; 1784ab7a30c7SNeilBrown } 1785191ea9b2SNeilBrown 17861da177e4SLinus Torvalds for (i=0 ; i < conf->raid_disks; i++) { 17871da177e4SLinus Torvalds bio = r1_bio->bios[i]; 17881da177e4SLinus Torvalds if (bio->bi_end_io) { 1789d11c171eSNeilBrown page = bio->bi_io_vec[bio->bi_vcnt].bv_page; 17901da177e4SLinus Torvalds if (bio_add_page(bio, page, len, 0) == 0) { 17911da177e4SLinus Torvalds /* stop here */ 1792d11c171eSNeilBrown bio->bi_io_vec[bio->bi_vcnt].bv_page = page; 17931da177e4SLinus Torvalds while (i > 0) { 17941da177e4SLinus Torvalds i--; 17951da177e4SLinus Torvalds bio = r1_bio->bios[i]; 17966a806c51SNeilBrown if (bio->bi_end_io==NULL) 17976a806c51SNeilBrown continue; 17981da177e4SLinus Torvalds /* remove last page from this bio */ 17991da177e4SLinus Torvalds bio->bi_vcnt--; 18001da177e4SLinus Torvalds bio->bi_size -= len; 18011da177e4SLinus Torvalds bio->bi_flags &= ~(1<< BIO_SEG_VALID); 18021da177e4SLinus Torvalds } 18031da177e4SLinus Torvalds goto bio_full; 18041da177e4SLinus Torvalds } 18051da177e4SLinus Torvalds } 18061da177e4SLinus Torvalds } 18071da177e4SLinus Torvalds nr_sectors += len>>9; 18081da177e4SLinus Torvalds sector_nr += len>>9; 1809191ea9b2SNeilBrown sync_blocks -= (len>>9); 18101da177e4SLinus Torvalds } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); 18111da177e4SLinus Torvalds bio_full: 18121da177e4SLinus Torvalds r1_bio->sectors = nr_sectors; 18131da177e4SLinus Torvalds 1814d11c171eSNeilBrown /* For a user-requested sync, we read all readable devices and do a 1815d11c171eSNeilBrown * compare 1816d11c171eSNeilBrown */ 1817d11c171eSNeilBrown if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 1818d11c171eSNeilBrown atomic_set(&r1_bio->remaining, read_targets); 1819d11c171eSNeilBrown for (i=0; i<conf->raid_disks; i++) { 1820d11c171eSNeilBrown bio = r1_bio->bios[i]; 1821d11c171eSNeilBrown if (bio->bi_end_io == end_sync_read) { 1822ddac7c7eSNeilBrown md_sync_acct(bio->bi_bdev, nr_sectors); 18231da177e4SLinus Torvalds generic_make_request(bio); 1824d11c171eSNeilBrown } 1825d11c171eSNeilBrown } 1826d11c171eSNeilBrown } else { 1827d11c171eSNeilBrown atomic_set(&r1_bio->remaining, 1); 1828d11c171eSNeilBrown bio = r1_bio->bios[r1_bio->read_disk]; 1829ddac7c7eSNeilBrown md_sync_acct(bio->bi_bdev, nr_sectors); 1830d11c171eSNeilBrown generic_make_request(bio); 1831d11c171eSNeilBrown 1832d11c171eSNeilBrown } 18331da177e4SLinus Torvalds return nr_sectors; 18341da177e4SLinus Torvalds } 18351da177e4SLinus Torvalds 183680c3a6ceSDan Williams static sector_t raid1_size(mddev_t *mddev, sector_t sectors, int raid_disks) 183780c3a6ceSDan Williams { 183880c3a6ceSDan Williams if (sectors) 183980c3a6ceSDan Williams return sectors; 184080c3a6ceSDan Williams 184180c3a6ceSDan Williams return mddev->dev_sectors; 184280c3a6ceSDan Williams } 184380c3a6ceSDan Williams 1844709ae487SNeilBrown static conf_t *setup_conf(mddev_t *mddev) 18451da177e4SLinus Torvalds { 18461da177e4SLinus Torvalds conf_t *conf; 1847709ae487SNeilBrown int i; 18481da177e4SLinus Torvalds mirror_info_t *disk; 18491da177e4SLinus Torvalds mdk_rdev_t *rdev; 1850709ae487SNeilBrown int err = -ENOMEM; 18511da177e4SLinus Torvalds 18529ffae0cfSNeilBrown conf = kzalloc(sizeof(conf_t), GFP_KERNEL); 18531da177e4SLinus Torvalds if (!conf) 1854709ae487SNeilBrown goto abort; 18551da177e4SLinus Torvalds 18569ffae0cfSNeilBrown conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks, 18571da177e4SLinus Torvalds GFP_KERNEL); 18581da177e4SLinus Torvalds if (!conf->mirrors) 1859709ae487SNeilBrown goto abort; 18601da177e4SLinus Torvalds 1861ddaf22abSNeilBrown conf->tmppage = alloc_page(GFP_KERNEL); 1862ddaf22abSNeilBrown if (!conf->tmppage) 1863709ae487SNeilBrown goto abort; 1864ddaf22abSNeilBrown 1865709ae487SNeilBrown conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); 18661da177e4SLinus Torvalds if (!conf->poolinfo) 1867709ae487SNeilBrown goto abort; 18681da177e4SLinus Torvalds conf->poolinfo->raid_disks = mddev->raid_disks; 18691da177e4SLinus Torvalds conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, 18701da177e4SLinus Torvalds r1bio_pool_free, 18711da177e4SLinus Torvalds conf->poolinfo); 18721da177e4SLinus Torvalds if (!conf->r1bio_pool) 1873709ae487SNeilBrown goto abort; 1874709ae487SNeilBrown 1875ed9bfdf1SNeilBrown conf->poolinfo->mddev = mddev; 18761da177e4SLinus Torvalds 1877e7e72bf6SNeil Brown spin_lock_init(&conf->device_lock); 1878159ec1fcSCheng Renquan list_for_each_entry(rdev, &mddev->disks, same_set) { 1879709ae487SNeilBrown int disk_idx = rdev->raid_disk; 18801da177e4SLinus Torvalds if (disk_idx >= mddev->raid_disks 18811da177e4SLinus Torvalds || disk_idx < 0) 18821da177e4SLinus Torvalds continue; 18831da177e4SLinus Torvalds disk = conf->mirrors + disk_idx; 18841da177e4SLinus Torvalds 18851da177e4SLinus Torvalds disk->rdev = rdev; 18861da177e4SLinus Torvalds 18871da177e4SLinus Torvalds disk->head_position = 0; 18881da177e4SLinus Torvalds } 18891da177e4SLinus Torvalds conf->raid_disks = mddev->raid_disks; 18901da177e4SLinus Torvalds conf->mddev = mddev; 18911da177e4SLinus Torvalds INIT_LIST_HEAD(&conf->retry_list); 18921da177e4SLinus Torvalds 18931da177e4SLinus Torvalds spin_lock_init(&conf->resync_lock); 189417999be4SNeilBrown init_waitqueue_head(&conf->wait_barrier); 18951da177e4SLinus Torvalds 1896191ea9b2SNeilBrown bio_list_init(&conf->pending_bio_list); 1897191ea9b2SNeilBrown 1898709ae487SNeilBrown conf->last_used = -1; 18991da177e4SLinus Torvalds for (i = 0; i < conf->raid_disks; i++) { 19001da177e4SLinus Torvalds 19011da177e4SLinus Torvalds disk = conf->mirrors + i; 19021da177e4SLinus Torvalds 19035fd6c1dcSNeilBrown if (!disk->rdev || 19045fd6c1dcSNeilBrown !test_bit(In_sync, &disk->rdev->flags)) { 19051da177e4SLinus Torvalds disk->head_position = 0; 1906918f0238SNeilBrown if (disk->rdev) 190717571284SNeilBrown conf->fullsync = 1; 1908709ae487SNeilBrown } else if (conf->last_used < 0) 1909709ae487SNeilBrown /* 1910709ae487SNeilBrown * The first working device is used as a 1911709ae487SNeilBrown * starting point to read balancing. 1912709ae487SNeilBrown */ 1913709ae487SNeilBrown conf->last_used = i; 19141da177e4SLinus Torvalds } 1915709ae487SNeilBrown 1916709ae487SNeilBrown err = -EIO; 1917709ae487SNeilBrown if (conf->last_used < 0) { 19189dd1e2faSNeilBrown printk(KERN_ERR "md/raid1:%s: no operational mirrors\n", 191911ce99e6SNeilBrown mdname(mddev)); 1920709ae487SNeilBrown goto abort; 192111ce99e6SNeilBrown } 1922709ae487SNeilBrown err = -ENOMEM; 1923709ae487SNeilBrown conf->thread = md_register_thread(raid1d, mddev, NULL); 1924709ae487SNeilBrown if (!conf->thread) { 19251da177e4SLinus Torvalds printk(KERN_ERR 19269dd1e2faSNeilBrown "md/raid1:%s: couldn't allocate thread\n", 19271da177e4SLinus Torvalds mdname(mddev)); 1928709ae487SNeilBrown goto abort; 19291da177e4SLinus Torvalds } 1930191ea9b2SNeilBrown 1931709ae487SNeilBrown return conf; 1932709ae487SNeilBrown 1933709ae487SNeilBrown abort: 1934709ae487SNeilBrown if (conf) { 1935709ae487SNeilBrown if (conf->r1bio_pool) 1936709ae487SNeilBrown mempool_destroy(conf->r1bio_pool); 1937709ae487SNeilBrown kfree(conf->mirrors); 1938709ae487SNeilBrown safe_put_page(conf->tmppage); 1939709ae487SNeilBrown kfree(conf->poolinfo); 1940709ae487SNeilBrown kfree(conf); 1941709ae487SNeilBrown } 1942709ae487SNeilBrown return ERR_PTR(err); 1943709ae487SNeilBrown } 1944709ae487SNeilBrown 1945709ae487SNeilBrown static int run(mddev_t *mddev) 1946709ae487SNeilBrown { 1947709ae487SNeilBrown conf_t *conf; 1948709ae487SNeilBrown int i; 1949709ae487SNeilBrown mdk_rdev_t *rdev; 1950709ae487SNeilBrown 1951709ae487SNeilBrown if (mddev->level != 1) { 19529dd1e2faSNeilBrown printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", 1953709ae487SNeilBrown mdname(mddev), mddev->level); 1954709ae487SNeilBrown return -EIO; 1955709ae487SNeilBrown } 1956709ae487SNeilBrown if (mddev->reshape_position != MaxSector) { 19579dd1e2faSNeilBrown printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n", 1958709ae487SNeilBrown mdname(mddev)); 1959709ae487SNeilBrown return -EIO; 1960709ae487SNeilBrown } 1961709ae487SNeilBrown /* 1962709ae487SNeilBrown * copy the already verified devices into our private RAID1 1963709ae487SNeilBrown * bookkeeping area. [whatever we allocate in run(), 1964709ae487SNeilBrown * should be freed in stop()] 1965709ae487SNeilBrown */ 1966709ae487SNeilBrown if (mddev->private == NULL) 1967709ae487SNeilBrown conf = setup_conf(mddev); 1968709ae487SNeilBrown else 1969709ae487SNeilBrown conf = mddev->private; 1970709ae487SNeilBrown 1971709ae487SNeilBrown if (IS_ERR(conf)) 1972709ae487SNeilBrown return PTR_ERR(conf); 1973709ae487SNeilBrown 1974709ae487SNeilBrown list_for_each_entry(rdev, &mddev->disks, same_set) { 1975709ae487SNeilBrown disk_stack_limits(mddev->gendisk, rdev->bdev, 1976709ae487SNeilBrown rdev->data_offset << 9); 1977709ae487SNeilBrown /* as we don't honour merge_bvec_fn, we must never risk 1978627a2d3cSNeilBrown * violating it, so limit ->max_segments to 1 lying within 1979627a2d3cSNeilBrown * a single page, as a one page request is never in violation. 1980709ae487SNeilBrown */ 1981627a2d3cSNeilBrown if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { 1982627a2d3cSNeilBrown blk_queue_max_segments(mddev->queue, 1); 1983627a2d3cSNeilBrown blk_queue_segment_boundary(mddev->queue, 1984627a2d3cSNeilBrown PAGE_CACHE_SIZE - 1); 1985627a2d3cSNeilBrown } 1986709ae487SNeilBrown } 1987709ae487SNeilBrown 1988709ae487SNeilBrown mddev->degraded = 0; 1989709ae487SNeilBrown for (i=0; i < conf->raid_disks; i++) 1990709ae487SNeilBrown if (conf->mirrors[i].rdev == NULL || 1991709ae487SNeilBrown !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || 1992709ae487SNeilBrown test_bit(Faulty, &conf->mirrors[i].rdev->flags)) 1993709ae487SNeilBrown mddev->degraded++; 1994709ae487SNeilBrown 1995709ae487SNeilBrown if (conf->raid_disks - mddev->degraded == 1) 1996709ae487SNeilBrown mddev->recovery_cp = MaxSector; 1997709ae487SNeilBrown 19988c6ac868SAndre Noll if (mddev->recovery_cp != MaxSector) 19999dd1e2faSNeilBrown printk(KERN_NOTICE "md/raid1:%s: not clean" 20008c6ac868SAndre Noll " -- starting background reconstruction\n", 20018c6ac868SAndre Noll mdname(mddev)); 20021da177e4SLinus Torvalds printk(KERN_INFO 20039dd1e2faSNeilBrown "md/raid1:%s: active with %d out of %d mirrors\n", 20041da177e4SLinus Torvalds mdname(mddev), mddev->raid_disks - mddev->degraded, 20051da177e4SLinus Torvalds mddev->raid_disks); 2006709ae487SNeilBrown 20071da177e4SLinus Torvalds /* 20081da177e4SLinus Torvalds * Ok, everything is just fine now 20091da177e4SLinus Torvalds */ 2010709ae487SNeilBrown mddev->thread = conf->thread; 2011709ae487SNeilBrown conf->thread = NULL; 2012709ae487SNeilBrown mddev->private = conf; 2013709ae487SNeilBrown 20141f403624SDan Williams md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 20151da177e4SLinus Torvalds 20160d129228SNeilBrown mddev->queue->backing_dev_info.congested_fn = raid1_congested; 20170d129228SNeilBrown mddev->queue->backing_dev_info.congested_data = mddev; 2018a91a2785SMartin K. Petersen return md_integrity_register(mddev); 20191da177e4SLinus Torvalds } 20201da177e4SLinus Torvalds 20211da177e4SLinus Torvalds static int stop(mddev_t *mddev) 20221da177e4SLinus Torvalds { 2023070ec55dSNeilBrown conf_t *conf = mddev->private; 20244b6d287fSNeilBrown struct bitmap *bitmap = mddev->bitmap; 20254b6d287fSNeilBrown 20264b6d287fSNeilBrown /* wait for behind writes to complete */ 2027e555190dSNeilBrown if (bitmap && atomic_read(&bitmap->behind_writes) > 0) { 20289dd1e2faSNeilBrown printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n", 20299dd1e2faSNeilBrown mdname(mddev)); 20304b6d287fSNeilBrown /* need to kick something here to make sure I/O goes? */ 2031e555190dSNeilBrown wait_event(bitmap->behind_wait, 2032e555190dSNeilBrown atomic_read(&bitmap->behind_writes) == 0); 20334b6d287fSNeilBrown } 20341da177e4SLinus Torvalds 2035409c57f3SNeilBrown raise_barrier(conf); 2036409c57f3SNeilBrown lower_barrier(conf); 2037409c57f3SNeilBrown 20381da177e4SLinus Torvalds md_unregister_thread(mddev->thread); 20391da177e4SLinus Torvalds mddev->thread = NULL; 20401da177e4SLinus Torvalds if (conf->r1bio_pool) 20411da177e4SLinus Torvalds mempool_destroy(conf->r1bio_pool); 20421da177e4SLinus Torvalds kfree(conf->mirrors); 20431da177e4SLinus Torvalds kfree(conf->poolinfo); 20441da177e4SLinus Torvalds kfree(conf); 20451da177e4SLinus Torvalds mddev->private = NULL; 20461da177e4SLinus Torvalds return 0; 20471da177e4SLinus Torvalds } 20481da177e4SLinus Torvalds 20491da177e4SLinus Torvalds static int raid1_resize(mddev_t *mddev, sector_t sectors) 20501da177e4SLinus Torvalds { 20511da177e4SLinus Torvalds /* no resync is happening, and there is enough space 20521da177e4SLinus Torvalds * on all devices, so we can resize. 20531da177e4SLinus Torvalds * We need to make sure resync covers any new space. 20541da177e4SLinus Torvalds * If the array is shrinking we should possibly wait until 20551da177e4SLinus Torvalds * any io in the removed space completes, but it hardly seems 20561da177e4SLinus Torvalds * worth it. 20571da177e4SLinus Torvalds */ 20581f403624SDan Williams md_set_array_sectors(mddev, raid1_size(mddev, sectors, 0)); 2059b522adcdSDan Williams if (mddev->array_sectors > raid1_size(mddev, sectors, 0)) 2060b522adcdSDan Williams return -EINVAL; 2061f233ea5cSAndre Noll set_capacity(mddev->gendisk, mddev->array_sectors); 2062449aad3eSNeilBrown revalidate_disk(mddev->gendisk); 2063b522adcdSDan Williams if (sectors > mddev->dev_sectors && 2064f233ea5cSAndre Noll mddev->recovery_cp == MaxSector) { 206558c0fed4SAndre Noll mddev->recovery_cp = mddev->dev_sectors; 20661da177e4SLinus Torvalds set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 20671da177e4SLinus Torvalds } 2068b522adcdSDan Williams mddev->dev_sectors = sectors; 20694b5c7ae8SNeilBrown mddev->resync_max_sectors = sectors; 20701da177e4SLinus Torvalds return 0; 20711da177e4SLinus Torvalds } 20721da177e4SLinus Torvalds 207363c70c4fSNeilBrown static int raid1_reshape(mddev_t *mddev) 20741da177e4SLinus Torvalds { 20751da177e4SLinus Torvalds /* We need to: 20761da177e4SLinus Torvalds * 1/ resize the r1bio_pool 20771da177e4SLinus Torvalds * 2/ resize conf->mirrors 20781da177e4SLinus Torvalds * 20791da177e4SLinus Torvalds * We allocate a new r1bio_pool if we can. 20801da177e4SLinus Torvalds * Then raise a device barrier and wait until all IO stops. 20811da177e4SLinus Torvalds * Then resize conf->mirrors and swap in the new r1bio pool. 20826ea9c07cSNeilBrown * 20836ea9c07cSNeilBrown * At the same time, we "pack" the devices so that all the missing 20846ea9c07cSNeilBrown * devices have the higher raid_disk numbers. 20851da177e4SLinus Torvalds */ 20861da177e4SLinus Torvalds mempool_t *newpool, *oldpool; 20871da177e4SLinus Torvalds struct pool_info *newpoolinfo; 20881da177e4SLinus Torvalds mirror_info_t *newmirrors; 2089070ec55dSNeilBrown conf_t *conf = mddev->private; 209063c70c4fSNeilBrown int cnt, raid_disks; 2091c04be0aaSNeilBrown unsigned long flags; 2092b5470dc5SDan Williams int d, d2, err; 20931da177e4SLinus Torvalds 209463c70c4fSNeilBrown /* Cannot change chunk_size, layout, or level */ 2095664e7c41SAndre Noll if (mddev->chunk_sectors != mddev->new_chunk_sectors || 209663c70c4fSNeilBrown mddev->layout != mddev->new_layout || 209763c70c4fSNeilBrown mddev->level != mddev->new_level) { 2098664e7c41SAndre Noll mddev->new_chunk_sectors = mddev->chunk_sectors; 209963c70c4fSNeilBrown mddev->new_layout = mddev->layout; 210063c70c4fSNeilBrown mddev->new_level = mddev->level; 210163c70c4fSNeilBrown return -EINVAL; 210263c70c4fSNeilBrown } 210363c70c4fSNeilBrown 2104b5470dc5SDan Williams err = md_allow_write(mddev); 2105b5470dc5SDan Williams if (err) 2106b5470dc5SDan Williams return err; 21072a2275d6SNeilBrown 210863c70c4fSNeilBrown raid_disks = mddev->raid_disks + mddev->delta_disks; 210963c70c4fSNeilBrown 21106ea9c07cSNeilBrown if (raid_disks < conf->raid_disks) { 21116ea9c07cSNeilBrown cnt=0; 21126ea9c07cSNeilBrown for (d= 0; d < conf->raid_disks; d++) 21131da177e4SLinus Torvalds if (conf->mirrors[d].rdev) 21146ea9c07cSNeilBrown cnt++; 21156ea9c07cSNeilBrown if (cnt > raid_disks) 21161da177e4SLinus Torvalds return -EBUSY; 21176ea9c07cSNeilBrown } 21181da177e4SLinus Torvalds 21191da177e4SLinus Torvalds newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL); 21201da177e4SLinus Torvalds if (!newpoolinfo) 21211da177e4SLinus Torvalds return -ENOMEM; 21221da177e4SLinus Torvalds newpoolinfo->mddev = mddev; 21231da177e4SLinus Torvalds newpoolinfo->raid_disks = raid_disks; 21241da177e4SLinus Torvalds 21251da177e4SLinus Torvalds newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, 21261da177e4SLinus Torvalds r1bio_pool_free, newpoolinfo); 21271da177e4SLinus Torvalds if (!newpool) { 21281da177e4SLinus Torvalds kfree(newpoolinfo); 21291da177e4SLinus Torvalds return -ENOMEM; 21301da177e4SLinus Torvalds } 21319ffae0cfSNeilBrown newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL); 21321da177e4SLinus Torvalds if (!newmirrors) { 21331da177e4SLinus Torvalds kfree(newpoolinfo); 21341da177e4SLinus Torvalds mempool_destroy(newpool); 21351da177e4SLinus Torvalds return -ENOMEM; 21361da177e4SLinus Torvalds } 21371da177e4SLinus Torvalds 213817999be4SNeilBrown raise_barrier(conf); 21391da177e4SLinus Torvalds 21401da177e4SLinus Torvalds /* ok, everything is stopped */ 21411da177e4SLinus Torvalds oldpool = conf->r1bio_pool; 21421da177e4SLinus Torvalds conf->r1bio_pool = newpool; 21436ea9c07cSNeilBrown 2144a88aa786SNeilBrown for (d = d2 = 0; d < conf->raid_disks; d++) { 2145a88aa786SNeilBrown mdk_rdev_t *rdev = conf->mirrors[d].rdev; 2146a88aa786SNeilBrown if (rdev && rdev->raid_disk != d2) { 2147a88aa786SNeilBrown char nm[20]; 2148a88aa786SNeilBrown sprintf(nm, "rd%d", rdev->raid_disk); 2149a88aa786SNeilBrown sysfs_remove_link(&mddev->kobj, nm); 2150a88aa786SNeilBrown rdev->raid_disk = d2; 2151a88aa786SNeilBrown sprintf(nm, "rd%d", rdev->raid_disk); 2152a88aa786SNeilBrown sysfs_remove_link(&mddev->kobj, nm); 2153a88aa786SNeilBrown if (sysfs_create_link(&mddev->kobj, 2154a88aa786SNeilBrown &rdev->kobj, nm)) 2155a88aa786SNeilBrown printk(KERN_WARNING 21569dd1e2faSNeilBrown "md/raid1:%s: cannot register " 21579dd1e2faSNeilBrown "%s\n", 21589dd1e2faSNeilBrown mdname(mddev), nm); 2159a88aa786SNeilBrown } 2160a88aa786SNeilBrown if (rdev) 2161a88aa786SNeilBrown newmirrors[d2++].rdev = rdev; 21626ea9c07cSNeilBrown } 21631da177e4SLinus Torvalds kfree(conf->mirrors); 21641da177e4SLinus Torvalds conf->mirrors = newmirrors; 21651da177e4SLinus Torvalds kfree(conf->poolinfo); 21661da177e4SLinus Torvalds conf->poolinfo = newpoolinfo; 21671da177e4SLinus Torvalds 2168c04be0aaSNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 21691da177e4SLinus Torvalds mddev->degraded += (raid_disks - conf->raid_disks); 2170c04be0aaSNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 21711da177e4SLinus Torvalds conf->raid_disks = mddev->raid_disks = raid_disks; 217263c70c4fSNeilBrown mddev->delta_disks = 0; 21731da177e4SLinus Torvalds 21746ea9c07cSNeilBrown conf->last_used = 0; /* just make sure it is in-range */ 217517999be4SNeilBrown lower_barrier(conf); 21761da177e4SLinus Torvalds 21771da177e4SLinus Torvalds set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 21781da177e4SLinus Torvalds md_wakeup_thread(mddev->thread); 21791da177e4SLinus Torvalds 21801da177e4SLinus Torvalds mempool_destroy(oldpool); 21811da177e4SLinus Torvalds return 0; 21821da177e4SLinus Torvalds } 21831da177e4SLinus Torvalds 2184500af87aSNeilBrown static void raid1_quiesce(mddev_t *mddev, int state) 218536fa3063SNeilBrown { 2186070ec55dSNeilBrown conf_t *conf = mddev->private; 218736fa3063SNeilBrown 218836fa3063SNeilBrown switch(state) { 21896eef4b21SNeilBrown case 2: /* wake for suspend */ 21906eef4b21SNeilBrown wake_up(&conf->wait_barrier); 21916eef4b21SNeilBrown break; 21929e6603daSNeilBrown case 1: 219317999be4SNeilBrown raise_barrier(conf); 219436fa3063SNeilBrown break; 21959e6603daSNeilBrown case 0: 219617999be4SNeilBrown lower_barrier(conf); 219736fa3063SNeilBrown break; 219836fa3063SNeilBrown } 219936fa3063SNeilBrown } 220036fa3063SNeilBrown 2201709ae487SNeilBrown static void *raid1_takeover(mddev_t *mddev) 2202709ae487SNeilBrown { 2203709ae487SNeilBrown /* raid1 can take over: 2204709ae487SNeilBrown * raid5 with 2 devices, any layout or chunk size 2205709ae487SNeilBrown */ 2206709ae487SNeilBrown if (mddev->level == 5 && mddev->raid_disks == 2) { 2207709ae487SNeilBrown conf_t *conf; 2208709ae487SNeilBrown mddev->new_level = 1; 2209709ae487SNeilBrown mddev->new_layout = 0; 2210709ae487SNeilBrown mddev->new_chunk_sectors = 0; 2211709ae487SNeilBrown conf = setup_conf(mddev); 2212709ae487SNeilBrown if (!IS_ERR(conf)) 2213709ae487SNeilBrown conf->barrier = 1; 2214709ae487SNeilBrown return conf; 2215709ae487SNeilBrown } 2216709ae487SNeilBrown return ERR_PTR(-EINVAL); 2217709ae487SNeilBrown } 22181da177e4SLinus Torvalds 22192604b703SNeilBrown static struct mdk_personality raid1_personality = 22201da177e4SLinus Torvalds { 22211da177e4SLinus Torvalds .name = "raid1", 22222604b703SNeilBrown .level = 1, 22231da177e4SLinus Torvalds .owner = THIS_MODULE, 22241da177e4SLinus Torvalds .make_request = make_request, 22251da177e4SLinus Torvalds .run = run, 22261da177e4SLinus Torvalds .stop = stop, 22271da177e4SLinus Torvalds .status = status, 22281da177e4SLinus Torvalds .error_handler = error, 22291da177e4SLinus Torvalds .hot_add_disk = raid1_add_disk, 22301da177e4SLinus Torvalds .hot_remove_disk= raid1_remove_disk, 22311da177e4SLinus Torvalds .spare_active = raid1_spare_active, 22321da177e4SLinus Torvalds .sync_request = sync_request, 22331da177e4SLinus Torvalds .resize = raid1_resize, 223480c3a6ceSDan Williams .size = raid1_size, 223563c70c4fSNeilBrown .check_reshape = raid1_reshape, 223636fa3063SNeilBrown .quiesce = raid1_quiesce, 2237709ae487SNeilBrown .takeover = raid1_takeover, 22381da177e4SLinus Torvalds }; 22391da177e4SLinus Torvalds 22401da177e4SLinus Torvalds static int __init raid_init(void) 22411da177e4SLinus Torvalds { 22422604b703SNeilBrown return register_md_personality(&raid1_personality); 22431da177e4SLinus Torvalds } 22441da177e4SLinus Torvalds 22451da177e4SLinus Torvalds static void raid_exit(void) 22461da177e4SLinus Torvalds { 22472604b703SNeilBrown unregister_md_personality(&raid1_personality); 22481da177e4SLinus Torvalds } 22491da177e4SLinus Torvalds 22501da177e4SLinus Torvalds module_init(raid_init); 22511da177e4SLinus Torvalds module_exit(raid_exit); 22521da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 22530efb9e61SNeilBrown MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD"); 22541da177e4SLinus Torvalds MODULE_ALIAS("md-personality-3"); /* RAID1 */ 2255d9d166c2SNeilBrown MODULE_ALIAS("md-raid1"); 22562604b703SNeilBrown MODULE_ALIAS("md-level-1"); 2257