1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 #include <linux/module.h> 36 #include <linux/config.h> 37 #include <linux/kthread.h> 38 #include <linux/linkage.h> 39 #include <linux/raid/md.h> 40 #include <linux/raid/bitmap.h> 41 #include <linux/sysctl.h> 42 #include <linux/devfs_fs_kernel.h> 43 #include <linux/buffer_head.h> /* for invalidate_bdev */ 44 #include <linux/suspend.h> 45 #include <linux/poll.h> 46 47 #include <linux/init.h> 48 49 #include <linux/file.h> 50 51 #ifdef CONFIG_KMOD 52 #include <linux/kmod.h> 53 #endif 54 55 #include <asm/unaligned.h> 56 57 #define MAJOR_NR MD_MAJOR 58 #define MD_DRIVER 59 60 /* 63 partitions with the alternate major number (mdp) */ 61 #define MdpMinorShift 6 62 63 #define DEBUG 0 64 #define dprintk(x...) ((void)(DEBUG && printk(x))) 65 66 67 #ifndef MODULE 68 static void autostart_arrays (int part); 69 #endif 70 71 static LIST_HEAD(pers_list); 72 static DEFINE_SPINLOCK(pers_lock); 73 74 /* 75 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 76 * is 1000 KB/sec, so the extra system load does not show up that much. 77 * Increase it if you want to have more _guaranteed_ speed. Note that 78 * the RAID driver will use the maximum available bandwidth if the IO 79 * subsystem is idle. There is also an 'absolute maximum' reconstruction 80 * speed limit - in case reconstruction slows down your system despite 81 * idle IO detection. 82 * 83 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 84 * or /sys/block/mdX/md/sync_speed_{min,max} 85 */ 86 87 static int sysctl_speed_limit_min = 1000; 88 static int sysctl_speed_limit_max = 200000; 89 static inline int speed_min(mddev_t *mddev) 90 { 91 return mddev->sync_speed_min ? 92 mddev->sync_speed_min : sysctl_speed_limit_min; 93 } 94 95 static inline int speed_max(mddev_t *mddev) 96 { 97 return mddev->sync_speed_max ? 98 mddev->sync_speed_max : sysctl_speed_limit_max; 99 } 100 101 static struct ctl_table_header *raid_table_header; 102 103 static ctl_table raid_table[] = { 104 { 105 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN, 106 .procname = "speed_limit_min", 107 .data = &sysctl_speed_limit_min, 108 .maxlen = sizeof(int), 109 .mode = 0644, 110 .proc_handler = &proc_dointvec, 111 }, 112 { 113 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX, 114 .procname = "speed_limit_max", 115 .data = &sysctl_speed_limit_max, 116 .maxlen = sizeof(int), 117 .mode = 0644, 118 .proc_handler = &proc_dointvec, 119 }, 120 { .ctl_name = 0 } 121 }; 122 123 static ctl_table raid_dir_table[] = { 124 { 125 .ctl_name = DEV_RAID, 126 .procname = "raid", 127 .maxlen = 0, 128 .mode = 0555, 129 .child = raid_table, 130 }, 131 { .ctl_name = 0 } 132 }; 133 134 static ctl_table raid_root_table[] = { 135 { 136 .ctl_name = CTL_DEV, 137 .procname = "dev", 138 .maxlen = 0, 139 .mode = 0555, 140 .child = raid_dir_table, 141 }, 142 { .ctl_name = 0 } 143 }; 144 145 static struct block_device_operations md_fops; 146 147 static int start_readonly; 148 149 /* 150 * We have a system wide 'event count' that is incremented 151 * on any 'interesting' event, and readers of /proc/mdstat 152 * can use 'poll' or 'select' to find out when the event 153 * count increases. 154 * 155 * Events are: 156 * start array, stop array, error, add device, remove device, 157 * start build, activate spare 158 */ 159 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 160 static atomic_t md_event_count; 161 static void md_new_event(mddev_t *mddev) 162 { 163 atomic_inc(&md_event_count); 164 wake_up(&md_event_waiters); 165 } 166 167 /* 168 * Enables to iterate over all existing md arrays 169 * all_mddevs_lock protects this list. 170 */ 171 static LIST_HEAD(all_mddevs); 172 static DEFINE_SPINLOCK(all_mddevs_lock); 173 174 175 /* 176 * iterates through all used mddevs in the system. 177 * We take care to grab the all_mddevs_lock whenever navigating 178 * the list, and to always hold a refcount when unlocked. 179 * Any code which breaks out of this loop while own 180 * a reference to the current mddev and must mddev_put it. 181 */ 182 #define ITERATE_MDDEV(mddev,tmp) \ 183 \ 184 for (({ spin_lock(&all_mddevs_lock); \ 185 tmp = all_mddevs.next; \ 186 mddev = NULL;}); \ 187 ({ if (tmp != &all_mddevs) \ 188 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\ 189 spin_unlock(&all_mddevs_lock); \ 190 if (mddev) mddev_put(mddev); \ 191 mddev = list_entry(tmp, mddev_t, all_mddevs); \ 192 tmp != &all_mddevs;}); \ 193 ({ spin_lock(&all_mddevs_lock); \ 194 tmp = tmp->next;}) \ 195 ) 196 197 198 static int md_fail_request (request_queue_t *q, struct bio *bio) 199 { 200 bio_io_error(bio, bio->bi_size); 201 return 0; 202 } 203 204 static inline mddev_t *mddev_get(mddev_t *mddev) 205 { 206 atomic_inc(&mddev->active); 207 return mddev; 208 } 209 210 static void mddev_put(mddev_t *mddev) 211 { 212 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 213 return; 214 if (!mddev->raid_disks && list_empty(&mddev->disks)) { 215 list_del(&mddev->all_mddevs); 216 blk_put_queue(mddev->queue); 217 kobject_unregister(&mddev->kobj); 218 } 219 spin_unlock(&all_mddevs_lock); 220 } 221 222 static mddev_t * mddev_find(dev_t unit) 223 { 224 mddev_t *mddev, *new = NULL; 225 226 retry: 227 spin_lock(&all_mddevs_lock); 228 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 229 if (mddev->unit == unit) { 230 mddev_get(mddev); 231 spin_unlock(&all_mddevs_lock); 232 kfree(new); 233 return mddev; 234 } 235 236 if (new) { 237 list_add(&new->all_mddevs, &all_mddevs); 238 spin_unlock(&all_mddevs_lock); 239 return new; 240 } 241 spin_unlock(&all_mddevs_lock); 242 243 new = kzalloc(sizeof(*new), GFP_KERNEL); 244 if (!new) 245 return NULL; 246 247 new->unit = unit; 248 if (MAJOR(unit) == MD_MAJOR) 249 new->md_minor = MINOR(unit); 250 else 251 new->md_minor = MINOR(unit) >> MdpMinorShift; 252 253 init_MUTEX(&new->reconfig_sem); 254 INIT_LIST_HEAD(&new->disks); 255 INIT_LIST_HEAD(&new->all_mddevs); 256 init_timer(&new->safemode_timer); 257 atomic_set(&new->active, 1); 258 spin_lock_init(&new->write_lock); 259 init_waitqueue_head(&new->sb_wait); 260 261 new->queue = blk_alloc_queue(GFP_KERNEL); 262 if (!new->queue) { 263 kfree(new); 264 return NULL; 265 } 266 267 blk_queue_make_request(new->queue, md_fail_request); 268 269 goto retry; 270 } 271 272 static inline int mddev_lock(mddev_t * mddev) 273 { 274 return down_interruptible(&mddev->reconfig_sem); 275 } 276 277 static inline void mddev_lock_uninterruptible(mddev_t * mddev) 278 { 279 down(&mddev->reconfig_sem); 280 } 281 282 static inline int mddev_trylock(mddev_t * mddev) 283 { 284 return down_trylock(&mddev->reconfig_sem); 285 } 286 287 static inline void mddev_unlock(mddev_t * mddev) 288 { 289 up(&mddev->reconfig_sem); 290 291 md_wakeup_thread(mddev->thread); 292 } 293 294 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 295 { 296 mdk_rdev_t * rdev; 297 struct list_head *tmp; 298 299 ITERATE_RDEV(mddev,rdev,tmp) { 300 if (rdev->desc_nr == nr) 301 return rdev; 302 } 303 return NULL; 304 } 305 306 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) 307 { 308 struct list_head *tmp; 309 mdk_rdev_t *rdev; 310 311 ITERATE_RDEV(mddev,rdev,tmp) { 312 if (rdev->bdev->bd_dev == dev) 313 return rdev; 314 } 315 return NULL; 316 } 317 318 static struct mdk_personality *find_pers(int level, char *clevel) 319 { 320 struct mdk_personality *pers; 321 list_for_each_entry(pers, &pers_list, list) { 322 if (level != LEVEL_NONE && pers->level == level) 323 return pers; 324 if (strcmp(pers->name, clevel)==0) 325 return pers; 326 } 327 return NULL; 328 } 329 330 static inline sector_t calc_dev_sboffset(struct block_device *bdev) 331 { 332 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 333 return MD_NEW_SIZE_BLOCKS(size); 334 } 335 336 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size) 337 { 338 sector_t size; 339 340 size = rdev->sb_offset; 341 342 if (chunk_size) 343 size &= ~((sector_t)chunk_size/1024 - 1); 344 return size; 345 } 346 347 static int alloc_disk_sb(mdk_rdev_t * rdev) 348 { 349 if (rdev->sb_page) 350 MD_BUG(); 351 352 rdev->sb_page = alloc_page(GFP_KERNEL); 353 if (!rdev->sb_page) { 354 printk(KERN_ALERT "md: out of memory.\n"); 355 return -EINVAL; 356 } 357 358 return 0; 359 } 360 361 static void free_disk_sb(mdk_rdev_t * rdev) 362 { 363 if (rdev->sb_page) { 364 put_page(rdev->sb_page); 365 rdev->sb_loaded = 0; 366 rdev->sb_page = NULL; 367 rdev->sb_offset = 0; 368 rdev->size = 0; 369 } 370 } 371 372 373 static int super_written(struct bio *bio, unsigned int bytes_done, int error) 374 { 375 mdk_rdev_t *rdev = bio->bi_private; 376 mddev_t *mddev = rdev->mddev; 377 if (bio->bi_size) 378 return 1; 379 380 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) 381 md_error(mddev, rdev); 382 383 if (atomic_dec_and_test(&mddev->pending_writes)) 384 wake_up(&mddev->sb_wait); 385 bio_put(bio); 386 return 0; 387 } 388 389 static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error) 390 { 391 struct bio *bio2 = bio->bi_private; 392 mdk_rdev_t *rdev = bio2->bi_private; 393 mddev_t *mddev = rdev->mddev; 394 if (bio->bi_size) 395 return 1; 396 397 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && 398 error == -EOPNOTSUPP) { 399 unsigned long flags; 400 /* barriers don't appear to be supported :-( */ 401 set_bit(BarriersNotsupp, &rdev->flags); 402 mddev->barriers_work = 0; 403 spin_lock_irqsave(&mddev->write_lock, flags); 404 bio2->bi_next = mddev->biolist; 405 mddev->biolist = bio2; 406 spin_unlock_irqrestore(&mddev->write_lock, flags); 407 wake_up(&mddev->sb_wait); 408 bio_put(bio); 409 return 0; 410 } 411 bio_put(bio2); 412 bio->bi_private = rdev; 413 return super_written(bio, bytes_done, error); 414 } 415 416 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 417 sector_t sector, int size, struct page *page) 418 { 419 /* write first size bytes of page to sector of rdev 420 * Increment mddev->pending_writes before returning 421 * and decrement it on completion, waking up sb_wait 422 * if zero is reached. 423 * If an error occurred, call md_error 424 * 425 * As we might need to resubmit the request if BIO_RW_BARRIER 426 * causes ENOTSUPP, we allocate a spare bio... 427 */ 428 struct bio *bio = bio_alloc(GFP_NOIO, 1); 429 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC); 430 431 bio->bi_bdev = rdev->bdev; 432 bio->bi_sector = sector; 433 bio_add_page(bio, page, size, 0); 434 bio->bi_private = rdev; 435 bio->bi_end_io = super_written; 436 bio->bi_rw = rw; 437 438 atomic_inc(&mddev->pending_writes); 439 if (!test_bit(BarriersNotsupp, &rdev->flags)) { 440 struct bio *rbio; 441 rw |= (1<<BIO_RW_BARRIER); 442 rbio = bio_clone(bio, GFP_NOIO); 443 rbio->bi_private = bio; 444 rbio->bi_end_io = super_written_barrier; 445 submit_bio(rw, rbio); 446 } else 447 submit_bio(rw, bio); 448 } 449 450 void md_super_wait(mddev_t *mddev) 451 { 452 /* wait for all superblock writes that were scheduled to complete. 453 * if any had to be retried (due to BARRIER problems), retry them 454 */ 455 DEFINE_WAIT(wq); 456 for(;;) { 457 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); 458 if (atomic_read(&mddev->pending_writes)==0) 459 break; 460 while (mddev->biolist) { 461 struct bio *bio; 462 spin_lock_irq(&mddev->write_lock); 463 bio = mddev->biolist; 464 mddev->biolist = bio->bi_next ; 465 bio->bi_next = NULL; 466 spin_unlock_irq(&mddev->write_lock); 467 submit_bio(bio->bi_rw, bio); 468 } 469 schedule(); 470 } 471 finish_wait(&mddev->sb_wait, &wq); 472 } 473 474 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error) 475 { 476 if (bio->bi_size) 477 return 1; 478 479 complete((struct completion*)bio->bi_private); 480 return 0; 481 } 482 483 int sync_page_io(struct block_device *bdev, sector_t sector, int size, 484 struct page *page, int rw) 485 { 486 struct bio *bio = bio_alloc(GFP_NOIO, 1); 487 struct completion event; 488 int ret; 489 490 rw |= (1 << BIO_RW_SYNC); 491 492 bio->bi_bdev = bdev; 493 bio->bi_sector = sector; 494 bio_add_page(bio, page, size, 0); 495 init_completion(&event); 496 bio->bi_private = &event; 497 bio->bi_end_io = bi_complete; 498 submit_bio(rw, bio); 499 wait_for_completion(&event); 500 501 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 502 bio_put(bio); 503 return ret; 504 } 505 EXPORT_SYMBOL_GPL(sync_page_io); 506 507 static int read_disk_sb(mdk_rdev_t * rdev, int size) 508 { 509 char b[BDEVNAME_SIZE]; 510 if (!rdev->sb_page) { 511 MD_BUG(); 512 return -EINVAL; 513 } 514 if (rdev->sb_loaded) 515 return 0; 516 517 518 if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ)) 519 goto fail; 520 rdev->sb_loaded = 1; 521 return 0; 522 523 fail: 524 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 525 bdevname(rdev->bdev,b)); 526 return -EINVAL; 527 } 528 529 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 530 { 531 if ( (sb1->set_uuid0 == sb2->set_uuid0) && 532 (sb1->set_uuid1 == sb2->set_uuid1) && 533 (sb1->set_uuid2 == sb2->set_uuid2) && 534 (sb1->set_uuid3 == sb2->set_uuid3)) 535 536 return 1; 537 538 return 0; 539 } 540 541 542 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 543 { 544 int ret; 545 mdp_super_t *tmp1, *tmp2; 546 547 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 548 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 549 550 if (!tmp1 || !tmp2) { 551 ret = 0; 552 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n"); 553 goto abort; 554 } 555 556 *tmp1 = *sb1; 557 *tmp2 = *sb2; 558 559 /* 560 * nr_disks is not constant 561 */ 562 tmp1->nr_disks = 0; 563 tmp2->nr_disks = 0; 564 565 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4)) 566 ret = 0; 567 else 568 ret = 1; 569 570 abort: 571 kfree(tmp1); 572 kfree(tmp2); 573 return ret; 574 } 575 576 static unsigned int calc_sb_csum(mdp_super_t * sb) 577 { 578 unsigned int disk_csum, csum; 579 580 disk_csum = sb->sb_csum; 581 sb->sb_csum = 0; 582 csum = csum_partial((void *)sb, MD_SB_BYTES, 0); 583 sb->sb_csum = disk_csum; 584 return csum; 585 } 586 587 588 /* 589 * Handle superblock details. 590 * We want to be able to handle multiple superblock formats 591 * so we have a common interface to them all, and an array of 592 * different handlers. 593 * We rely on user-space to write the initial superblock, and support 594 * reading and updating of superblocks. 595 * Interface methods are: 596 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version) 597 * loads and validates a superblock on dev. 598 * if refdev != NULL, compare superblocks on both devices 599 * Return: 600 * 0 - dev has a superblock that is compatible with refdev 601 * 1 - dev has a superblock that is compatible and newer than refdev 602 * so dev should be used as the refdev in future 603 * -EINVAL superblock incompatible or invalid 604 * -othererror e.g. -EIO 605 * 606 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev) 607 * Verify that dev is acceptable into mddev. 608 * The first time, mddev->raid_disks will be 0, and data from 609 * dev should be merged in. Subsequent calls check that dev 610 * is new enough. Return 0 or -EINVAL 611 * 612 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev) 613 * Update the superblock for rdev with data in mddev 614 * This does not write to disc. 615 * 616 */ 617 618 struct super_type { 619 char *name; 620 struct module *owner; 621 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version); 622 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); 623 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); 624 }; 625 626 /* 627 * load_super for 0.90.0 628 */ 629 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 630 { 631 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 632 mdp_super_t *sb; 633 int ret; 634 sector_t sb_offset; 635 636 /* 637 * Calculate the position of the superblock, 638 * it's at the end of the disk. 639 * 640 * It also happens to be a multiple of 4Kb. 641 */ 642 sb_offset = calc_dev_sboffset(rdev->bdev); 643 rdev->sb_offset = sb_offset; 644 645 ret = read_disk_sb(rdev, MD_SB_BYTES); 646 if (ret) return ret; 647 648 ret = -EINVAL; 649 650 bdevname(rdev->bdev, b); 651 sb = (mdp_super_t*)page_address(rdev->sb_page); 652 653 if (sb->md_magic != MD_SB_MAGIC) { 654 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 655 b); 656 goto abort; 657 } 658 659 if (sb->major_version != 0 || 660 sb->minor_version != 90) { 661 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 662 sb->major_version, sb->minor_version, 663 b); 664 goto abort; 665 } 666 667 if (sb->raid_disks <= 0) 668 goto abort; 669 670 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) { 671 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 672 b); 673 goto abort; 674 } 675 676 rdev->preferred_minor = sb->md_minor; 677 rdev->data_offset = 0; 678 rdev->sb_size = MD_SB_BYTES; 679 680 if (sb->level == LEVEL_MULTIPATH) 681 rdev->desc_nr = -1; 682 else 683 rdev->desc_nr = sb->this_disk.number; 684 685 if (refdev == 0) 686 ret = 1; 687 else { 688 __u64 ev1, ev2; 689 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page); 690 if (!uuid_equal(refsb, sb)) { 691 printk(KERN_WARNING "md: %s has different UUID to %s\n", 692 b, bdevname(refdev->bdev,b2)); 693 goto abort; 694 } 695 if (!sb_equal(refsb, sb)) { 696 printk(KERN_WARNING "md: %s has same UUID" 697 " but different superblock to %s\n", 698 b, bdevname(refdev->bdev, b2)); 699 goto abort; 700 } 701 ev1 = md_event(sb); 702 ev2 = md_event(refsb); 703 if (ev1 > ev2) 704 ret = 1; 705 else 706 ret = 0; 707 } 708 rdev->size = calc_dev_size(rdev, sb->chunk_size); 709 710 if (rdev->size < sb->size && sb->level > 1) 711 /* "this cannot possibly happen" ... */ 712 ret = -EINVAL; 713 714 abort: 715 return ret; 716 } 717 718 /* 719 * validate_super for 0.90.0 720 */ 721 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) 722 { 723 mdp_disk_t *desc; 724 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); 725 726 rdev->raid_disk = -1; 727 rdev->flags = 0; 728 if (mddev->raid_disks == 0) { 729 mddev->major_version = 0; 730 mddev->minor_version = sb->minor_version; 731 mddev->patch_version = sb->patch_version; 732 mddev->persistent = ! sb->not_persistent; 733 mddev->chunk_size = sb->chunk_size; 734 mddev->ctime = sb->ctime; 735 mddev->utime = sb->utime; 736 mddev->level = sb->level; 737 mddev->clevel[0] = 0; 738 mddev->layout = sb->layout; 739 mddev->raid_disks = sb->raid_disks; 740 mddev->size = sb->size; 741 mddev->events = md_event(sb); 742 mddev->bitmap_offset = 0; 743 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 744 745 if (sb->state & (1<<MD_SB_CLEAN)) 746 mddev->recovery_cp = MaxSector; 747 else { 748 if (sb->events_hi == sb->cp_events_hi && 749 sb->events_lo == sb->cp_events_lo) { 750 mddev->recovery_cp = sb->recovery_cp; 751 } else 752 mddev->recovery_cp = 0; 753 } 754 755 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 756 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 757 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 758 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 759 760 mddev->max_disks = MD_SB_DISKS; 761 762 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 763 mddev->bitmap_file == NULL) { 764 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6 765 && mddev->level != 10) { 766 /* FIXME use a better test */ 767 printk(KERN_WARNING "md: bitmaps not supported for this level.\n"); 768 return -EINVAL; 769 } 770 mddev->bitmap_offset = mddev->default_bitmap_offset; 771 } 772 773 } else if (mddev->pers == NULL) { 774 /* Insist on good event counter while assembling */ 775 __u64 ev1 = md_event(sb); 776 ++ev1; 777 if (ev1 < mddev->events) 778 return -EINVAL; 779 } else if (mddev->bitmap) { 780 /* if adding to array with a bitmap, then we can accept an 781 * older device ... but not too old. 782 */ 783 __u64 ev1 = md_event(sb); 784 if (ev1 < mddev->bitmap->events_cleared) 785 return 0; 786 } else /* just a hot-add of a new device, leave raid_disk at -1 */ 787 return 0; 788 789 if (mddev->level != LEVEL_MULTIPATH) { 790 desc = sb->disks + rdev->desc_nr; 791 792 if (desc->state & (1<<MD_DISK_FAULTY)) 793 set_bit(Faulty, &rdev->flags); 794 else if (desc->state & (1<<MD_DISK_SYNC) && 795 desc->raid_disk < mddev->raid_disks) { 796 set_bit(In_sync, &rdev->flags); 797 rdev->raid_disk = desc->raid_disk; 798 } 799 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 800 set_bit(WriteMostly, &rdev->flags); 801 } else /* MULTIPATH are always insync */ 802 set_bit(In_sync, &rdev->flags); 803 return 0; 804 } 805 806 /* 807 * sync_super for 0.90.0 808 */ 809 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) 810 { 811 mdp_super_t *sb; 812 struct list_head *tmp; 813 mdk_rdev_t *rdev2; 814 int next_spare = mddev->raid_disks; 815 816 817 /* make rdev->sb match mddev data.. 818 * 819 * 1/ zero out disks 820 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 821 * 3/ any empty disks < next_spare become removed 822 * 823 * disks[0] gets initialised to REMOVED because 824 * we cannot be sure from other fields if it has 825 * been initialised or not. 826 */ 827 int i; 828 int active=0, working=0,failed=0,spare=0,nr_disks=0; 829 830 rdev->sb_size = MD_SB_BYTES; 831 832 sb = (mdp_super_t*)page_address(rdev->sb_page); 833 834 memset(sb, 0, sizeof(*sb)); 835 836 sb->md_magic = MD_SB_MAGIC; 837 sb->major_version = mddev->major_version; 838 sb->minor_version = mddev->minor_version; 839 sb->patch_version = mddev->patch_version; 840 sb->gvalid_words = 0; /* ignored */ 841 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 842 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 843 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 844 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 845 846 sb->ctime = mddev->ctime; 847 sb->level = mddev->level; 848 sb->size = mddev->size; 849 sb->raid_disks = mddev->raid_disks; 850 sb->md_minor = mddev->md_minor; 851 sb->not_persistent = !mddev->persistent; 852 sb->utime = mddev->utime; 853 sb->state = 0; 854 sb->events_hi = (mddev->events>>32); 855 sb->events_lo = (u32)mddev->events; 856 857 if (mddev->in_sync) 858 { 859 sb->recovery_cp = mddev->recovery_cp; 860 sb->cp_events_hi = (mddev->events>>32); 861 sb->cp_events_lo = (u32)mddev->events; 862 if (mddev->recovery_cp == MaxSector) 863 sb->state = (1<< MD_SB_CLEAN); 864 } else 865 sb->recovery_cp = 0; 866 867 sb->layout = mddev->layout; 868 sb->chunk_size = mddev->chunk_size; 869 870 if (mddev->bitmap && mddev->bitmap_file == NULL) 871 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 872 873 sb->disks[0].state = (1<<MD_DISK_REMOVED); 874 ITERATE_RDEV(mddev,rdev2,tmp) { 875 mdp_disk_t *d; 876 int desc_nr; 877 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 878 && !test_bit(Faulty, &rdev2->flags)) 879 desc_nr = rdev2->raid_disk; 880 else 881 desc_nr = next_spare++; 882 rdev2->desc_nr = desc_nr; 883 d = &sb->disks[rdev2->desc_nr]; 884 nr_disks++; 885 d->number = rdev2->desc_nr; 886 d->major = MAJOR(rdev2->bdev->bd_dev); 887 d->minor = MINOR(rdev2->bdev->bd_dev); 888 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 889 && !test_bit(Faulty, &rdev2->flags)) 890 d->raid_disk = rdev2->raid_disk; 891 else 892 d->raid_disk = rdev2->desc_nr; /* compatibility */ 893 if (test_bit(Faulty, &rdev2->flags)) { 894 d->state = (1<<MD_DISK_FAULTY); 895 failed++; 896 } else if (test_bit(In_sync, &rdev2->flags)) { 897 d->state = (1<<MD_DISK_ACTIVE); 898 d->state |= (1<<MD_DISK_SYNC); 899 active++; 900 working++; 901 } else { 902 d->state = 0; 903 spare++; 904 working++; 905 } 906 if (test_bit(WriteMostly, &rdev2->flags)) 907 d->state |= (1<<MD_DISK_WRITEMOSTLY); 908 } 909 /* now set the "removed" and "faulty" bits on any missing devices */ 910 for (i=0 ; i < mddev->raid_disks ; i++) { 911 mdp_disk_t *d = &sb->disks[i]; 912 if (d->state == 0 && d->number == 0) { 913 d->number = i; 914 d->raid_disk = i; 915 d->state = (1<<MD_DISK_REMOVED); 916 d->state |= (1<<MD_DISK_FAULTY); 917 failed++; 918 } 919 } 920 sb->nr_disks = nr_disks; 921 sb->active_disks = active; 922 sb->working_disks = working; 923 sb->failed_disks = failed; 924 sb->spare_disks = spare; 925 926 sb->this_disk = sb->disks[rdev->desc_nr]; 927 sb->sb_csum = calc_sb_csum(sb); 928 } 929 930 /* 931 * version 1 superblock 932 */ 933 934 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb) 935 { 936 unsigned int disk_csum, csum; 937 unsigned long long newcsum; 938 int size = 256 + le32_to_cpu(sb->max_dev)*2; 939 unsigned int *isuper = (unsigned int*)sb; 940 int i; 941 942 disk_csum = sb->sb_csum; 943 sb->sb_csum = 0; 944 newcsum = 0; 945 for (i=0; size>=4; size -= 4 ) 946 newcsum += le32_to_cpu(*isuper++); 947 948 if (size == 2) 949 newcsum += le16_to_cpu(*(unsigned short*) isuper); 950 951 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 952 sb->sb_csum = disk_csum; 953 return cpu_to_le32(csum); 954 } 955 956 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 957 { 958 struct mdp_superblock_1 *sb; 959 int ret; 960 sector_t sb_offset; 961 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 962 int bmask; 963 964 /* 965 * Calculate the position of the superblock. 966 * It is always aligned to a 4K boundary and 967 * depeding on minor_version, it can be: 968 * 0: At least 8K, but less than 12K, from end of device 969 * 1: At start of device 970 * 2: 4K from start of device. 971 */ 972 switch(minor_version) { 973 case 0: 974 sb_offset = rdev->bdev->bd_inode->i_size >> 9; 975 sb_offset -= 8*2; 976 sb_offset &= ~(sector_t)(4*2-1); 977 /* convert from sectors to K */ 978 sb_offset /= 2; 979 break; 980 case 1: 981 sb_offset = 0; 982 break; 983 case 2: 984 sb_offset = 4; 985 break; 986 default: 987 return -EINVAL; 988 } 989 rdev->sb_offset = sb_offset; 990 991 /* superblock is rarely larger than 1K, but it can be larger, 992 * and it is safe to read 4k, so we do that 993 */ 994 ret = read_disk_sb(rdev, 4096); 995 if (ret) return ret; 996 997 998 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 999 1000 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1001 sb->major_version != cpu_to_le32(1) || 1002 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1003 le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) || 1004 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1005 return -EINVAL; 1006 1007 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1008 printk("md: invalid superblock checksum on %s\n", 1009 bdevname(rdev->bdev,b)); 1010 return -EINVAL; 1011 } 1012 if (le64_to_cpu(sb->data_size) < 10) { 1013 printk("md: data_size too small on %s\n", 1014 bdevname(rdev->bdev,b)); 1015 return -EINVAL; 1016 } 1017 rdev->preferred_minor = 0xffff; 1018 rdev->data_offset = le64_to_cpu(sb->data_offset); 1019 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1020 1021 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1022 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; 1023 if (rdev->sb_size & bmask) 1024 rdev-> sb_size = (rdev->sb_size | bmask)+1; 1025 1026 if (refdev == 0) 1027 return 1; 1028 else { 1029 __u64 ev1, ev2; 1030 struct mdp_superblock_1 *refsb = 1031 (struct mdp_superblock_1*)page_address(refdev->sb_page); 1032 1033 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1034 sb->level != refsb->level || 1035 sb->layout != refsb->layout || 1036 sb->chunksize != refsb->chunksize) { 1037 printk(KERN_WARNING "md: %s has strangely different" 1038 " superblock to %s\n", 1039 bdevname(rdev->bdev,b), 1040 bdevname(refdev->bdev,b2)); 1041 return -EINVAL; 1042 } 1043 ev1 = le64_to_cpu(sb->events); 1044 ev2 = le64_to_cpu(refsb->events); 1045 1046 if (ev1 > ev2) 1047 return 1; 1048 } 1049 if (minor_version) 1050 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; 1051 else 1052 rdev->size = rdev->sb_offset; 1053 if (rdev->size < le64_to_cpu(sb->data_size)/2) 1054 return -EINVAL; 1055 rdev->size = le64_to_cpu(sb->data_size)/2; 1056 if (le32_to_cpu(sb->chunksize)) 1057 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1); 1058 1059 if (le32_to_cpu(sb->size) > rdev->size*2) 1060 return -EINVAL; 1061 return 0; 1062 } 1063 1064 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1065 { 1066 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1067 1068 rdev->raid_disk = -1; 1069 rdev->flags = 0; 1070 if (mddev->raid_disks == 0) { 1071 mddev->major_version = 1; 1072 mddev->patch_version = 0; 1073 mddev->persistent = 1; 1074 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9; 1075 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1076 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1077 mddev->level = le32_to_cpu(sb->level); 1078 mddev->clevel[0] = 0; 1079 mddev->layout = le32_to_cpu(sb->layout); 1080 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1081 mddev->size = le64_to_cpu(sb->size)/2; 1082 mddev->events = le64_to_cpu(sb->events); 1083 mddev->bitmap_offset = 0; 1084 mddev->default_bitmap_offset = 1024; 1085 1086 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1087 memcpy(mddev->uuid, sb->set_uuid, 16); 1088 1089 mddev->max_disks = (4096-256)/2; 1090 1091 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1092 mddev->bitmap_file == NULL ) { 1093 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6 1094 && mddev->level != 10) { 1095 printk(KERN_WARNING "md: bitmaps not supported for this level.\n"); 1096 return -EINVAL; 1097 } 1098 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); 1099 } 1100 } else if (mddev->pers == NULL) { 1101 /* Insist of good event counter while assembling */ 1102 __u64 ev1 = le64_to_cpu(sb->events); 1103 ++ev1; 1104 if (ev1 < mddev->events) 1105 return -EINVAL; 1106 } else if (mddev->bitmap) { 1107 /* If adding to array with a bitmap, then we can accept an 1108 * older device, but not too old. 1109 */ 1110 __u64 ev1 = le64_to_cpu(sb->events); 1111 if (ev1 < mddev->bitmap->events_cleared) 1112 return 0; 1113 } else /* just a hot-add of a new device, leave raid_disk at -1 */ 1114 return 0; 1115 1116 if (mddev->level != LEVEL_MULTIPATH) { 1117 int role; 1118 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1119 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1120 switch(role) { 1121 case 0xffff: /* spare */ 1122 break; 1123 case 0xfffe: /* faulty */ 1124 set_bit(Faulty, &rdev->flags); 1125 break; 1126 default: 1127 set_bit(In_sync, &rdev->flags); 1128 rdev->raid_disk = role; 1129 break; 1130 } 1131 if (sb->devflags & WriteMostly1) 1132 set_bit(WriteMostly, &rdev->flags); 1133 } else /* MULTIPATH are always insync */ 1134 set_bit(In_sync, &rdev->flags); 1135 1136 return 0; 1137 } 1138 1139 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1140 { 1141 struct mdp_superblock_1 *sb; 1142 struct list_head *tmp; 1143 mdk_rdev_t *rdev2; 1144 int max_dev, i; 1145 /* make rdev->sb match mddev and rdev data. */ 1146 1147 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1148 1149 sb->feature_map = 0; 1150 sb->pad0 = 0; 1151 memset(sb->pad1, 0, sizeof(sb->pad1)); 1152 memset(sb->pad2, 0, sizeof(sb->pad2)); 1153 memset(sb->pad3, 0, sizeof(sb->pad3)); 1154 1155 sb->utime = cpu_to_le64((__u64)mddev->utime); 1156 sb->events = cpu_to_le64(mddev->events); 1157 if (mddev->in_sync) 1158 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1159 else 1160 sb->resync_offset = cpu_to_le64(0); 1161 1162 sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors); 1163 1164 if (mddev->bitmap && mddev->bitmap_file == NULL) { 1165 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1166 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1167 } 1168 1169 max_dev = 0; 1170 ITERATE_RDEV(mddev,rdev2,tmp) 1171 if (rdev2->desc_nr+1 > max_dev) 1172 max_dev = rdev2->desc_nr+1; 1173 1174 sb->max_dev = cpu_to_le32(max_dev); 1175 for (i=0; i<max_dev;i++) 1176 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1177 1178 ITERATE_RDEV(mddev,rdev2,tmp) { 1179 i = rdev2->desc_nr; 1180 if (test_bit(Faulty, &rdev2->flags)) 1181 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1182 else if (test_bit(In_sync, &rdev2->flags)) 1183 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1184 else 1185 sb->dev_roles[i] = cpu_to_le16(0xffff); 1186 } 1187 1188 sb->recovery_offset = cpu_to_le64(0); /* not supported yet */ 1189 sb->sb_csum = calc_sb_1_csum(sb); 1190 } 1191 1192 1193 static struct super_type super_types[] = { 1194 [0] = { 1195 .name = "0.90.0", 1196 .owner = THIS_MODULE, 1197 .load_super = super_90_load, 1198 .validate_super = super_90_validate, 1199 .sync_super = super_90_sync, 1200 }, 1201 [1] = { 1202 .name = "md-1", 1203 .owner = THIS_MODULE, 1204 .load_super = super_1_load, 1205 .validate_super = super_1_validate, 1206 .sync_super = super_1_sync, 1207 }, 1208 }; 1209 1210 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev) 1211 { 1212 struct list_head *tmp; 1213 mdk_rdev_t *rdev; 1214 1215 ITERATE_RDEV(mddev,rdev,tmp) 1216 if (rdev->bdev->bd_contains == dev->bdev->bd_contains) 1217 return rdev; 1218 1219 return NULL; 1220 } 1221 1222 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1223 { 1224 struct list_head *tmp; 1225 mdk_rdev_t *rdev; 1226 1227 ITERATE_RDEV(mddev1,rdev,tmp) 1228 if (match_dev_unit(mddev2, rdev)) 1229 return 1; 1230 1231 return 0; 1232 } 1233 1234 static LIST_HEAD(pending_raid_disks); 1235 1236 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) 1237 { 1238 mdk_rdev_t *same_pdev; 1239 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1240 struct kobject *ko; 1241 1242 if (rdev->mddev) { 1243 MD_BUG(); 1244 return -EINVAL; 1245 } 1246 /* make sure rdev->size exceeds mddev->size */ 1247 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) { 1248 if (mddev->pers) 1249 /* Cannot change size, so fail */ 1250 return -ENOSPC; 1251 else 1252 mddev->size = rdev->size; 1253 } 1254 same_pdev = match_dev_unit(mddev, rdev); 1255 if (same_pdev) 1256 printk(KERN_WARNING 1257 "%s: WARNING: %s appears to be on the same physical" 1258 " disk as %s. True\n protection against single-disk" 1259 " failure might be compromised.\n", 1260 mdname(mddev), bdevname(rdev->bdev,b), 1261 bdevname(same_pdev->bdev,b2)); 1262 1263 /* Verify rdev->desc_nr is unique. 1264 * If it is -1, assign a free number, else 1265 * check number is not in use 1266 */ 1267 if (rdev->desc_nr < 0) { 1268 int choice = 0; 1269 if (mddev->pers) choice = mddev->raid_disks; 1270 while (find_rdev_nr(mddev, choice)) 1271 choice++; 1272 rdev->desc_nr = choice; 1273 } else { 1274 if (find_rdev_nr(mddev, rdev->desc_nr)) 1275 return -EBUSY; 1276 } 1277 bdevname(rdev->bdev,b); 1278 if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0) 1279 return -ENOMEM; 1280 1281 list_add(&rdev->same_set, &mddev->disks); 1282 rdev->mddev = mddev; 1283 printk(KERN_INFO "md: bind<%s>\n", b); 1284 1285 rdev->kobj.parent = &mddev->kobj; 1286 kobject_add(&rdev->kobj); 1287 1288 if (rdev->bdev->bd_part) 1289 ko = &rdev->bdev->bd_part->kobj; 1290 else 1291 ko = &rdev->bdev->bd_disk->kobj; 1292 sysfs_create_link(&rdev->kobj, ko, "block"); 1293 return 0; 1294 } 1295 1296 static void unbind_rdev_from_array(mdk_rdev_t * rdev) 1297 { 1298 char b[BDEVNAME_SIZE]; 1299 if (!rdev->mddev) { 1300 MD_BUG(); 1301 return; 1302 } 1303 list_del_init(&rdev->same_set); 1304 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 1305 rdev->mddev = NULL; 1306 sysfs_remove_link(&rdev->kobj, "block"); 1307 kobject_del(&rdev->kobj); 1308 } 1309 1310 /* 1311 * prevent the device from being mounted, repartitioned or 1312 * otherwise reused by a RAID array (or any other kernel 1313 * subsystem), by bd_claiming the device. 1314 */ 1315 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev) 1316 { 1317 int err = 0; 1318 struct block_device *bdev; 1319 char b[BDEVNAME_SIZE]; 1320 1321 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); 1322 if (IS_ERR(bdev)) { 1323 printk(KERN_ERR "md: could not open %s.\n", 1324 __bdevname(dev, b)); 1325 return PTR_ERR(bdev); 1326 } 1327 err = bd_claim(bdev, rdev); 1328 if (err) { 1329 printk(KERN_ERR "md: could not bd_claim %s.\n", 1330 bdevname(bdev, b)); 1331 blkdev_put(bdev); 1332 return err; 1333 } 1334 rdev->bdev = bdev; 1335 return err; 1336 } 1337 1338 static void unlock_rdev(mdk_rdev_t *rdev) 1339 { 1340 struct block_device *bdev = rdev->bdev; 1341 rdev->bdev = NULL; 1342 if (!bdev) 1343 MD_BUG(); 1344 bd_release(bdev); 1345 blkdev_put(bdev); 1346 } 1347 1348 void md_autodetect_dev(dev_t dev); 1349 1350 static void export_rdev(mdk_rdev_t * rdev) 1351 { 1352 char b[BDEVNAME_SIZE]; 1353 printk(KERN_INFO "md: export_rdev(%s)\n", 1354 bdevname(rdev->bdev,b)); 1355 if (rdev->mddev) 1356 MD_BUG(); 1357 free_disk_sb(rdev); 1358 list_del_init(&rdev->same_set); 1359 #ifndef MODULE 1360 md_autodetect_dev(rdev->bdev->bd_dev); 1361 #endif 1362 unlock_rdev(rdev); 1363 kobject_put(&rdev->kobj); 1364 } 1365 1366 static void kick_rdev_from_array(mdk_rdev_t * rdev) 1367 { 1368 unbind_rdev_from_array(rdev); 1369 export_rdev(rdev); 1370 } 1371 1372 static void export_array(mddev_t *mddev) 1373 { 1374 struct list_head *tmp; 1375 mdk_rdev_t *rdev; 1376 1377 ITERATE_RDEV(mddev,rdev,tmp) { 1378 if (!rdev->mddev) { 1379 MD_BUG(); 1380 continue; 1381 } 1382 kick_rdev_from_array(rdev); 1383 } 1384 if (!list_empty(&mddev->disks)) 1385 MD_BUG(); 1386 mddev->raid_disks = 0; 1387 mddev->major_version = 0; 1388 } 1389 1390 static void print_desc(mdp_disk_t *desc) 1391 { 1392 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number, 1393 desc->major,desc->minor,desc->raid_disk,desc->state); 1394 } 1395 1396 static void print_sb(mdp_super_t *sb) 1397 { 1398 int i; 1399 1400 printk(KERN_INFO 1401 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", 1402 sb->major_version, sb->minor_version, sb->patch_version, 1403 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, 1404 sb->ctime); 1405 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", 1406 sb->level, sb->size, sb->nr_disks, sb->raid_disks, 1407 sb->md_minor, sb->layout, sb->chunk_size); 1408 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d" 1409 " FD:%d SD:%d CSUM:%08x E:%08lx\n", 1410 sb->utime, sb->state, sb->active_disks, sb->working_disks, 1411 sb->failed_disks, sb->spare_disks, 1412 sb->sb_csum, (unsigned long)sb->events_lo); 1413 1414 printk(KERN_INFO); 1415 for (i = 0; i < MD_SB_DISKS; i++) { 1416 mdp_disk_t *desc; 1417 1418 desc = sb->disks + i; 1419 if (desc->number || desc->major || desc->minor || 1420 desc->raid_disk || (desc->state && (desc->state != 4))) { 1421 printk(" D %2d: ", i); 1422 print_desc(desc); 1423 } 1424 } 1425 printk(KERN_INFO "md: THIS: "); 1426 print_desc(&sb->this_disk); 1427 1428 } 1429 1430 static void print_rdev(mdk_rdev_t *rdev) 1431 { 1432 char b[BDEVNAME_SIZE]; 1433 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n", 1434 bdevname(rdev->bdev,b), (unsigned long long)rdev->size, 1435 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), 1436 rdev->desc_nr); 1437 if (rdev->sb_loaded) { 1438 printk(KERN_INFO "md: rdev superblock:\n"); 1439 print_sb((mdp_super_t*)page_address(rdev->sb_page)); 1440 } else 1441 printk(KERN_INFO "md: no rdev superblock!\n"); 1442 } 1443 1444 void md_print_devices(void) 1445 { 1446 struct list_head *tmp, *tmp2; 1447 mdk_rdev_t *rdev; 1448 mddev_t *mddev; 1449 char b[BDEVNAME_SIZE]; 1450 1451 printk("\n"); 1452 printk("md: **********************************\n"); 1453 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n"); 1454 printk("md: **********************************\n"); 1455 ITERATE_MDDEV(mddev,tmp) { 1456 1457 if (mddev->bitmap) 1458 bitmap_print_sb(mddev->bitmap); 1459 else 1460 printk("%s: ", mdname(mddev)); 1461 ITERATE_RDEV(mddev,rdev,tmp2) 1462 printk("<%s>", bdevname(rdev->bdev,b)); 1463 printk("\n"); 1464 1465 ITERATE_RDEV(mddev,rdev,tmp2) 1466 print_rdev(rdev); 1467 } 1468 printk("md: **********************************\n"); 1469 printk("\n"); 1470 } 1471 1472 1473 static void sync_sbs(mddev_t * mddev) 1474 { 1475 mdk_rdev_t *rdev; 1476 struct list_head *tmp; 1477 1478 ITERATE_RDEV(mddev,rdev,tmp) { 1479 super_types[mddev->major_version]. 1480 sync_super(mddev, rdev); 1481 rdev->sb_loaded = 1; 1482 } 1483 } 1484 1485 static void md_update_sb(mddev_t * mddev) 1486 { 1487 int err; 1488 struct list_head *tmp; 1489 mdk_rdev_t *rdev; 1490 int sync_req; 1491 1492 repeat: 1493 spin_lock_irq(&mddev->write_lock); 1494 sync_req = mddev->in_sync; 1495 mddev->utime = get_seconds(); 1496 mddev->events ++; 1497 1498 if (!mddev->events) { 1499 /* 1500 * oops, this 64-bit counter should never wrap. 1501 * Either we are in around ~1 trillion A.C., assuming 1502 * 1 reboot per second, or we have a bug: 1503 */ 1504 MD_BUG(); 1505 mddev->events --; 1506 } 1507 mddev->sb_dirty = 2; 1508 sync_sbs(mddev); 1509 1510 /* 1511 * do not write anything to disk if using 1512 * nonpersistent superblocks 1513 */ 1514 if (!mddev->persistent) { 1515 mddev->sb_dirty = 0; 1516 spin_unlock_irq(&mddev->write_lock); 1517 wake_up(&mddev->sb_wait); 1518 return; 1519 } 1520 spin_unlock_irq(&mddev->write_lock); 1521 1522 dprintk(KERN_INFO 1523 "md: updating %s RAID superblock on device (in sync %d)\n", 1524 mdname(mddev),mddev->in_sync); 1525 1526 err = bitmap_update_sb(mddev->bitmap); 1527 ITERATE_RDEV(mddev,rdev,tmp) { 1528 char b[BDEVNAME_SIZE]; 1529 dprintk(KERN_INFO "md: "); 1530 if (test_bit(Faulty, &rdev->flags)) 1531 dprintk("(skipping faulty "); 1532 1533 dprintk("%s ", bdevname(rdev->bdev,b)); 1534 if (!test_bit(Faulty, &rdev->flags)) { 1535 md_super_write(mddev,rdev, 1536 rdev->sb_offset<<1, rdev->sb_size, 1537 rdev->sb_page); 1538 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", 1539 bdevname(rdev->bdev,b), 1540 (unsigned long long)rdev->sb_offset); 1541 1542 } else 1543 dprintk(")\n"); 1544 if (mddev->level == LEVEL_MULTIPATH) 1545 /* only need to write one superblock... */ 1546 break; 1547 } 1548 md_super_wait(mddev); 1549 /* if there was a failure, sb_dirty was set to 1, and we re-write super */ 1550 1551 spin_lock_irq(&mddev->write_lock); 1552 if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) { 1553 /* have to write it out again */ 1554 spin_unlock_irq(&mddev->write_lock); 1555 goto repeat; 1556 } 1557 mddev->sb_dirty = 0; 1558 spin_unlock_irq(&mddev->write_lock); 1559 wake_up(&mddev->sb_wait); 1560 1561 } 1562 1563 /* words written to sysfs files may, or my not, be \n terminated. 1564 * We want to accept with case. For this we use cmd_match. 1565 */ 1566 static int cmd_match(const char *cmd, const char *str) 1567 { 1568 /* See if cmd, written into a sysfs file, matches 1569 * str. They must either be the same, or cmd can 1570 * have a trailing newline 1571 */ 1572 while (*cmd && *str && *cmd == *str) { 1573 cmd++; 1574 str++; 1575 } 1576 if (*cmd == '\n') 1577 cmd++; 1578 if (*str || *cmd) 1579 return 0; 1580 return 1; 1581 } 1582 1583 struct rdev_sysfs_entry { 1584 struct attribute attr; 1585 ssize_t (*show)(mdk_rdev_t *, char *); 1586 ssize_t (*store)(mdk_rdev_t *, const char *, size_t); 1587 }; 1588 1589 static ssize_t 1590 state_show(mdk_rdev_t *rdev, char *page) 1591 { 1592 char *sep = ""; 1593 int len=0; 1594 1595 if (test_bit(Faulty, &rdev->flags)) { 1596 len+= sprintf(page+len, "%sfaulty",sep); 1597 sep = ","; 1598 } 1599 if (test_bit(In_sync, &rdev->flags)) { 1600 len += sprintf(page+len, "%sin_sync",sep); 1601 sep = ","; 1602 } 1603 if (!test_bit(Faulty, &rdev->flags) && 1604 !test_bit(In_sync, &rdev->flags)) { 1605 len += sprintf(page+len, "%sspare", sep); 1606 sep = ","; 1607 } 1608 return len+sprintf(page+len, "\n"); 1609 } 1610 1611 static struct rdev_sysfs_entry 1612 rdev_state = __ATTR_RO(state); 1613 1614 static ssize_t 1615 super_show(mdk_rdev_t *rdev, char *page) 1616 { 1617 if (rdev->sb_loaded && rdev->sb_size) { 1618 memcpy(page, page_address(rdev->sb_page), rdev->sb_size); 1619 return rdev->sb_size; 1620 } else 1621 return 0; 1622 } 1623 static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super); 1624 1625 static ssize_t 1626 errors_show(mdk_rdev_t *rdev, char *page) 1627 { 1628 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 1629 } 1630 1631 static ssize_t 1632 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1633 { 1634 char *e; 1635 unsigned long n = simple_strtoul(buf, &e, 10); 1636 if (*buf && (*e == 0 || *e == '\n')) { 1637 atomic_set(&rdev->corrected_errors, n); 1638 return len; 1639 } 1640 return -EINVAL; 1641 } 1642 static struct rdev_sysfs_entry rdev_errors = 1643 __ATTR(errors, 0644, errors_show, errors_store); 1644 1645 static ssize_t 1646 slot_show(mdk_rdev_t *rdev, char *page) 1647 { 1648 if (rdev->raid_disk < 0) 1649 return sprintf(page, "none\n"); 1650 else 1651 return sprintf(page, "%d\n", rdev->raid_disk); 1652 } 1653 1654 static ssize_t 1655 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1656 { 1657 char *e; 1658 int slot = simple_strtoul(buf, &e, 10); 1659 if (strncmp(buf, "none", 4)==0) 1660 slot = -1; 1661 else if (e==buf || (*e && *e!= '\n')) 1662 return -EINVAL; 1663 if (rdev->mddev->pers) 1664 /* Cannot set slot in active array (yet) */ 1665 return -EBUSY; 1666 if (slot >= rdev->mddev->raid_disks) 1667 return -ENOSPC; 1668 rdev->raid_disk = slot; 1669 /* assume it is working */ 1670 rdev->flags = 0; 1671 set_bit(In_sync, &rdev->flags); 1672 return len; 1673 } 1674 1675 1676 static struct rdev_sysfs_entry rdev_slot = 1677 __ATTR(slot, 0644, slot_show, slot_store); 1678 1679 static ssize_t 1680 offset_show(mdk_rdev_t *rdev, char *page) 1681 { 1682 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 1683 } 1684 1685 static ssize_t 1686 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1687 { 1688 char *e; 1689 unsigned long long offset = simple_strtoull(buf, &e, 10); 1690 if (e==buf || (*e && *e != '\n')) 1691 return -EINVAL; 1692 if (rdev->mddev->pers) 1693 return -EBUSY; 1694 rdev->data_offset = offset; 1695 return len; 1696 } 1697 1698 static struct rdev_sysfs_entry rdev_offset = 1699 __ATTR(offset, 0644, offset_show, offset_store); 1700 1701 static ssize_t 1702 rdev_size_show(mdk_rdev_t *rdev, char *page) 1703 { 1704 return sprintf(page, "%llu\n", (unsigned long long)rdev->size); 1705 } 1706 1707 static ssize_t 1708 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1709 { 1710 char *e; 1711 unsigned long long size = simple_strtoull(buf, &e, 10); 1712 if (e==buf || (*e && *e != '\n')) 1713 return -EINVAL; 1714 if (rdev->mddev->pers) 1715 return -EBUSY; 1716 rdev->size = size; 1717 if (size < rdev->mddev->size || rdev->mddev->size == 0) 1718 rdev->mddev->size = size; 1719 return len; 1720 } 1721 1722 static struct rdev_sysfs_entry rdev_size = 1723 __ATTR(size, 0644, rdev_size_show, rdev_size_store); 1724 1725 static struct attribute *rdev_default_attrs[] = { 1726 &rdev_state.attr, 1727 &rdev_super.attr, 1728 &rdev_errors.attr, 1729 &rdev_slot.attr, 1730 &rdev_offset.attr, 1731 &rdev_size.attr, 1732 NULL, 1733 }; 1734 static ssize_t 1735 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 1736 { 1737 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 1738 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 1739 1740 if (!entry->show) 1741 return -EIO; 1742 return entry->show(rdev, page); 1743 } 1744 1745 static ssize_t 1746 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 1747 const char *page, size_t length) 1748 { 1749 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 1750 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 1751 1752 if (!entry->store) 1753 return -EIO; 1754 return entry->store(rdev, page, length); 1755 } 1756 1757 static void rdev_free(struct kobject *ko) 1758 { 1759 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); 1760 kfree(rdev); 1761 } 1762 static struct sysfs_ops rdev_sysfs_ops = { 1763 .show = rdev_attr_show, 1764 .store = rdev_attr_store, 1765 }; 1766 static struct kobj_type rdev_ktype = { 1767 .release = rdev_free, 1768 .sysfs_ops = &rdev_sysfs_ops, 1769 .default_attrs = rdev_default_attrs, 1770 }; 1771 1772 /* 1773 * Import a device. If 'super_format' >= 0, then sanity check the superblock 1774 * 1775 * mark the device faulty if: 1776 * 1777 * - the device is nonexistent (zero size) 1778 * - the device has no valid superblock 1779 * 1780 * a faulty rdev _never_ has rdev->sb set. 1781 */ 1782 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor) 1783 { 1784 char b[BDEVNAME_SIZE]; 1785 int err; 1786 mdk_rdev_t *rdev; 1787 sector_t size; 1788 1789 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 1790 if (!rdev) { 1791 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 1792 return ERR_PTR(-ENOMEM); 1793 } 1794 1795 if ((err = alloc_disk_sb(rdev))) 1796 goto abort_free; 1797 1798 err = lock_rdev(rdev, newdev); 1799 if (err) 1800 goto abort_free; 1801 1802 rdev->kobj.parent = NULL; 1803 rdev->kobj.ktype = &rdev_ktype; 1804 kobject_init(&rdev->kobj); 1805 1806 rdev->desc_nr = -1; 1807 rdev->flags = 0; 1808 rdev->data_offset = 0; 1809 atomic_set(&rdev->nr_pending, 0); 1810 atomic_set(&rdev->read_errors, 0); 1811 atomic_set(&rdev->corrected_errors, 0); 1812 1813 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 1814 if (!size) { 1815 printk(KERN_WARNING 1816 "md: %s has zero or unknown size, marking faulty!\n", 1817 bdevname(rdev->bdev,b)); 1818 err = -EINVAL; 1819 goto abort_free; 1820 } 1821 1822 if (super_format >= 0) { 1823 err = super_types[super_format]. 1824 load_super(rdev, NULL, super_minor); 1825 if (err == -EINVAL) { 1826 printk(KERN_WARNING 1827 "md: %s has invalid sb, not importing!\n", 1828 bdevname(rdev->bdev,b)); 1829 goto abort_free; 1830 } 1831 if (err < 0) { 1832 printk(KERN_WARNING 1833 "md: could not read %s's sb, not importing!\n", 1834 bdevname(rdev->bdev,b)); 1835 goto abort_free; 1836 } 1837 } 1838 INIT_LIST_HEAD(&rdev->same_set); 1839 1840 return rdev; 1841 1842 abort_free: 1843 if (rdev->sb_page) { 1844 if (rdev->bdev) 1845 unlock_rdev(rdev); 1846 free_disk_sb(rdev); 1847 } 1848 kfree(rdev); 1849 return ERR_PTR(err); 1850 } 1851 1852 /* 1853 * Check a full RAID array for plausibility 1854 */ 1855 1856 1857 static void analyze_sbs(mddev_t * mddev) 1858 { 1859 int i; 1860 struct list_head *tmp; 1861 mdk_rdev_t *rdev, *freshest; 1862 char b[BDEVNAME_SIZE]; 1863 1864 freshest = NULL; 1865 ITERATE_RDEV(mddev,rdev,tmp) 1866 switch (super_types[mddev->major_version]. 1867 load_super(rdev, freshest, mddev->minor_version)) { 1868 case 1: 1869 freshest = rdev; 1870 break; 1871 case 0: 1872 break; 1873 default: 1874 printk( KERN_ERR \ 1875 "md: fatal superblock inconsistency in %s" 1876 " -- removing from array\n", 1877 bdevname(rdev->bdev,b)); 1878 kick_rdev_from_array(rdev); 1879 } 1880 1881 1882 super_types[mddev->major_version]. 1883 validate_super(mddev, freshest); 1884 1885 i = 0; 1886 ITERATE_RDEV(mddev,rdev,tmp) { 1887 if (rdev != freshest) 1888 if (super_types[mddev->major_version]. 1889 validate_super(mddev, rdev)) { 1890 printk(KERN_WARNING "md: kicking non-fresh %s" 1891 " from array!\n", 1892 bdevname(rdev->bdev,b)); 1893 kick_rdev_from_array(rdev); 1894 continue; 1895 } 1896 if (mddev->level == LEVEL_MULTIPATH) { 1897 rdev->desc_nr = i++; 1898 rdev->raid_disk = rdev->desc_nr; 1899 set_bit(In_sync, &rdev->flags); 1900 } 1901 } 1902 1903 1904 1905 if (mddev->recovery_cp != MaxSector && 1906 mddev->level >= 1) 1907 printk(KERN_ERR "md: %s: raid array is not clean" 1908 " -- starting background reconstruction\n", 1909 mdname(mddev)); 1910 1911 } 1912 1913 static ssize_t 1914 level_show(mddev_t *mddev, char *page) 1915 { 1916 struct mdk_personality *p = mddev->pers; 1917 if (p) 1918 return sprintf(page, "%s\n", p->name); 1919 else if (mddev->clevel[0]) 1920 return sprintf(page, "%s\n", mddev->clevel); 1921 else if (mddev->level != LEVEL_NONE) 1922 return sprintf(page, "%d\n", mddev->level); 1923 else 1924 return 0; 1925 } 1926 1927 static ssize_t 1928 level_store(mddev_t *mddev, const char *buf, size_t len) 1929 { 1930 int rv = len; 1931 if (mddev->pers) 1932 return -EBUSY; 1933 if (len == 0) 1934 return 0; 1935 if (len >= sizeof(mddev->clevel)) 1936 return -ENOSPC; 1937 strncpy(mddev->clevel, buf, len); 1938 if (mddev->clevel[len-1] == '\n') 1939 len--; 1940 mddev->clevel[len] = 0; 1941 mddev->level = LEVEL_NONE; 1942 return rv; 1943 } 1944 1945 static struct md_sysfs_entry md_level = 1946 __ATTR(level, 0644, level_show, level_store); 1947 1948 static ssize_t 1949 raid_disks_show(mddev_t *mddev, char *page) 1950 { 1951 if (mddev->raid_disks == 0) 1952 return 0; 1953 return sprintf(page, "%d\n", mddev->raid_disks); 1954 } 1955 1956 static int update_raid_disks(mddev_t *mddev, int raid_disks); 1957 1958 static ssize_t 1959 raid_disks_store(mddev_t *mddev, const char *buf, size_t len) 1960 { 1961 /* can only set raid_disks if array is not yet active */ 1962 char *e; 1963 int rv = 0; 1964 unsigned long n = simple_strtoul(buf, &e, 10); 1965 1966 if (!*buf || (*e && *e != '\n')) 1967 return -EINVAL; 1968 1969 if (mddev->pers) 1970 rv = update_raid_disks(mddev, n); 1971 else 1972 mddev->raid_disks = n; 1973 return rv ? rv : len; 1974 } 1975 static struct md_sysfs_entry md_raid_disks = 1976 __ATTR(raid_disks, 0644, raid_disks_show, raid_disks_store); 1977 1978 static ssize_t 1979 chunk_size_show(mddev_t *mddev, char *page) 1980 { 1981 return sprintf(page, "%d\n", mddev->chunk_size); 1982 } 1983 1984 static ssize_t 1985 chunk_size_store(mddev_t *mddev, const char *buf, size_t len) 1986 { 1987 /* can only set chunk_size if array is not yet active */ 1988 char *e; 1989 unsigned long n = simple_strtoul(buf, &e, 10); 1990 1991 if (mddev->pers) 1992 return -EBUSY; 1993 if (!*buf || (*e && *e != '\n')) 1994 return -EINVAL; 1995 1996 mddev->chunk_size = n; 1997 return len; 1998 } 1999 static struct md_sysfs_entry md_chunk_size = 2000 __ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store); 2001 2002 static ssize_t 2003 null_show(mddev_t *mddev, char *page) 2004 { 2005 return -EINVAL; 2006 } 2007 2008 static ssize_t 2009 new_dev_store(mddev_t *mddev, const char *buf, size_t len) 2010 { 2011 /* buf must be %d:%d\n? giving major and minor numbers */ 2012 /* The new device is added to the array. 2013 * If the array has a persistent superblock, we read the 2014 * superblock to initialise info and check validity. 2015 * Otherwise, only checking done is that in bind_rdev_to_array, 2016 * which mainly checks size. 2017 */ 2018 char *e; 2019 int major = simple_strtoul(buf, &e, 10); 2020 int minor; 2021 dev_t dev; 2022 mdk_rdev_t *rdev; 2023 int err; 2024 2025 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 2026 return -EINVAL; 2027 minor = simple_strtoul(e+1, &e, 10); 2028 if (*e && *e != '\n') 2029 return -EINVAL; 2030 dev = MKDEV(major, minor); 2031 if (major != MAJOR(dev) || 2032 minor != MINOR(dev)) 2033 return -EOVERFLOW; 2034 2035 2036 if (mddev->persistent) { 2037 rdev = md_import_device(dev, mddev->major_version, 2038 mddev->minor_version); 2039 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 2040 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 2041 mdk_rdev_t, same_set); 2042 err = super_types[mddev->major_version] 2043 .load_super(rdev, rdev0, mddev->minor_version); 2044 if (err < 0) 2045 goto out; 2046 } 2047 } else 2048 rdev = md_import_device(dev, -1, -1); 2049 2050 if (IS_ERR(rdev)) 2051 return PTR_ERR(rdev); 2052 err = bind_rdev_to_array(rdev, mddev); 2053 out: 2054 if (err) 2055 export_rdev(rdev); 2056 return err ? err : len; 2057 } 2058 2059 static struct md_sysfs_entry md_new_device = 2060 __ATTR(new_dev, 0200, null_show, new_dev_store); 2061 2062 static ssize_t 2063 size_show(mddev_t *mddev, char *page) 2064 { 2065 return sprintf(page, "%llu\n", (unsigned long long)mddev->size); 2066 } 2067 2068 static int update_size(mddev_t *mddev, unsigned long size); 2069 2070 static ssize_t 2071 size_store(mddev_t *mddev, const char *buf, size_t len) 2072 { 2073 /* If array is inactive, we can reduce the component size, but 2074 * not increase it (except from 0). 2075 * If array is active, we can try an on-line resize 2076 */ 2077 char *e; 2078 int err = 0; 2079 unsigned long long size = simple_strtoull(buf, &e, 10); 2080 if (!*buf || *buf == '\n' || 2081 (*e && *e != '\n')) 2082 return -EINVAL; 2083 2084 if (mddev->pers) { 2085 err = update_size(mddev, size); 2086 md_update_sb(mddev); 2087 } else { 2088 if (mddev->size == 0 || 2089 mddev->size > size) 2090 mddev->size = size; 2091 else 2092 err = -ENOSPC; 2093 } 2094 return err ? err : len; 2095 } 2096 2097 static struct md_sysfs_entry md_size = 2098 __ATTR(component_size, 0644, size_show, size_store); 2099 2100 2101 /* Metdata version. 2102 * This is either 'none' for arrays with externally managed metadata, 2103 * or N.M for internally known formats 2104 */ 2105 static ssize_t 2106 metadata_show(mddev_t *mddev, char *page) 2107 { 2108 if (mddev->persistent) 2109 return sprintf(page, "%d.%d\n", 2110 mddev->major_version, mddev->minor_version); 2111 else 2112 return sprintf(page, "none\n"); 2113 } 2114 2115 static ssize_t 2116 metadata_store(mddev_t *mddev, const char *buf, size_t len) 2117 { 2118 int major, minor; 2119 char *e; 2120 if (!list_empty(&mddev->disks)) 2121 return -EBUSY; 2122 2123 if (cmd_match(buf, "none")) { 2124 mddev->persistent = 0; 2125 mddev->major_version = 0; 2126 mddev->minor_version = 90; 2127 return len; 2128 } 2129 major = simple_strtoul(buf, &e, 10); 2130 if (e==buf || *e != '.') 2131 return -EINVAL; 2132 buf = e+1; 2133 minor = simple_strtoul(buf, &e, 10); 2134 if (e==buf || *e != '\n') 2135 return -EINVAL; 2136 if (major >= sizeof(super_types)/sizeof(super_types[0]) || 2137 super_types[major].name == NULL) 2138 return -ENOENT; 2139 mddev->major_version = major; 2140 mddev->minor_version = minor; 2141 mddev->persistent = 1; 2142 return len; 2143 } 2144 2145 static struct md_sysfs_entry md_metadata = 2146 __ATTR(metadata_version, 0644, metadata_show, metadata_store); 2147 2148 static ssize_t 2149 action_show(mddev_t *mddev, char *page) 2150 { 2151 char *type = "idle"; 2152 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 2153 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) { 2154 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2155 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2156 type = "resync"; 2157 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 2158 type = "check"; 2159 else 2160 type = "repair"; 2161 } else 2162 type = "recover"; 2163 } 2164 return sprintf(page, "%s\n", type); 2165 } 2166 2167 static ssize_t 2168 action_store(mddev_t *mddev, const char *page, size_t len) 2169 { 2170 if (!mddev->pers || !mddev->pers->sync_request) 2171 return -EINVAL; 2172 2173 if (cmd_match(page, "idle")) { 2174 if (mddev->sync_thread) { 2175 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2176 md_unregister_thread(mddev->sync_thread); 2177 mddev->sync_thread = NULL; 2178 mddev->recovery = 0; 2179 } 2180 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 2181 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 2182 return -EBUSY; 2183 else if (cmd_match(page, "resync") || cmd_match(page, "recover")) 2184 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2185 else { 2186 if (cmd_match(page, "check")) 2187 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 2188 else if (cmd_match(page, "repair")) 2189 return -EINVAL; 2190 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 2191 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 2192 } 2193 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2194 md_wakeup_thread(mddev->thread); 2195 return len; 2196 } 2197 2198 static ssize_t 2199 mismatch_cnt_show(mddev_t *mddev, char *page) 2200 { 2201 return sprintf(page, "%llu\n", 2202 (unsigned long long) mddev->resync_mismatches); 2203 } 2204 2205 static struct md_sysfs_entry 2206 md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 2207 2208 2209 static struct md_sysfs_entry 2210 md_mismatches = __ATTR_RO(mismatch_cnt); 2211 2212 static ssize_t 2213 sync_min_show(mddev_t *mddev, char *page) 2214 { 2215 return sprintf(page, "%d (%s)\n", speed_min(mddev), 2216 mddev->sync_speed_min ? "local": "system"); 2217 } 2218 2219 static ssize_t 2220 sync_min_store(mddev_t *mddev, const char *buf, size_t len) 2221 { 2222 int min; 2223 char *e; 2224 if (strncmp(buf, "system", 6)==0) { 2225 mddev->sync_speed_min = 0; 2226 return len; 2227 } 2228 min = simple_strtoul(buf, &e, 10); 2229 if (buf == e || (*e && *e != '\n') || min <= 0) 2230 return -EINVAL; 2231 mddev->sync_speed_min = min; 2232 return len; 2233 } 2234 2235 static struct md_sysfs_entry md_sync_min = 2236 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 2237 2238 static ssize_t 2239 sync_max_show(mddev_t *mddev, char *page) 2240 { 2241 return sprintf(page, "%d (%s)\n", speed_max(mddev), 2242 mddev->sync_speed_max ? "local": "system"); 2243 } 2244 2245 static ssize_t 2246 sync_max_store(mddev_t *mddev, const char *buf, size_t len) 2247 { 2248 int max; 2249 char *e; 2250 if (strncmp(buf, "system", 6)==0) { 2251 mddev->sync_speed_max = 0; 2252 return len; 2253 } 2254 max = simple_strtoul(buf, &e, 10); 2255 if (buf == e || (*e && *e != '\n') || max <= 0) 2256 return -EINVAL; 2257 mddev->sync_speed_max = max; 2258 return len; 2259 } 2260 2261 static struct md_sysfs_entry md_sync_max = 2262 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 2263 2264 2265 static ssize_t 2266 sync_speed_show(mddev_t *mddev, char *page) 2267 { 2268 unsigned long resync, dt, db; 2269 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); 2270 dt = ((jiffies - mddev->resync_mark) / HZ); 2271 if (!dt) dt++; 2272 db = resync - (mddev->resync_mark_cnt); 2273 return sprintf(page, "%ld\n", db/dt/2); /* K/sec */ 2274 } 2275 2276 static struct md_sysfs_entry 2277 md_sync_speed = __ATTR_RO(sync_speed); 2278 2279 static ssize_t 2280 sync_completed_show(mddev_t *mddev, char *page) 2281 { 2282 unsigned long max_blocks, resync; 2283 2284 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 2285 max_blocks = mddev->resync_max_sectors; 2286 else 2287 max_blocks = mddev->size << 1; 2288 2289 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); 2290 return sprintf(page, "%lu / %lu\n", resync, max_blocks); 2291 } 2292 2293 static struct md_sysfs_entry 2294 md_sync_completed = __ATTR_RO(sync_completed); 2295 2296 static struct attribute *md_default_attrs[] = { 2297 &md_level.attr, 2298 &md_raid_disks.attr, 2299 &md_chunk_size.attr, 2300 &md_size.attr, 2301 &md_metadata.attr, 2302 &md_new_device.attr, 2303 NULL, 2304 }; 2305 2306 static struct attribute *md_redundancy_attrs[] = { 2307 &md_scan_mode.attr, 2308 &md_mismatches.attr, 2309 &md_sync_min.attr, 2310 &md_sync_max.attr, 2311 &md_sync_speed.attr, 2312 &md_sync_completed.attr, 2313 NULL, 2314 }; 2315 static struct attribute_group md_redundancy_group = { 2316 .name = NULL, 2317 .attrs = md_redundancy_attrs, 2318 }; 2319 2320 2321 static ssize_t 2322 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 2323 { 2324 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 2325 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 2326 ssize_t rv; 2327 2328 if (!entry->show) 2329 return -EIO; 2330 mddev_lock(mddev); 2331 rv = entry->show(mddev, page); 2332 mddev_unlock(mddev); 2333 return rv; 2334 } 2335 2336 static ssize_t 2337 md_attr_store(struct kobject *kobj, struct attribute *attr, 2338 const char *page, size_t length) 2339 { 2340 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 2341 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 2342 ssize_t rv; 2343 2344 if (!entry->store) 2345 return -EIO; 2346 mddev_lock(mddev); 2347 rv = entry->store(mddev, page, length); 2348 mddev_unlock(mddev); 2349 return rv; 2350 } 2351 2352 static void md_free(struct kobject *ko) 2353 { 2354 mddev_t *mddev = container_of(ko, mddev_t, kobj); 2355 kfree(mddev); 2356 } 2357 2358 static struct sysfs_ops md_sysfs_ops = { 2359 .show = md_attr_show, 2360 .store = md_attr_store, 2361 }; 2362 static struct kobj_type md_ktype = { 2363 .release = md_free, 2364 .sysfs_ops = &md_sysfs_ops, 2365 .default_attrs = md_default_attrs, 2366 }; 2367 2368 int mdp_major = 0; 2369 2370 static struct kobject *md_probe(dev_t dev, int *part, void *data) 2371 { 2372 static DECLARE_MUTEX(disks_sem); 2373 mddev_t *mddev = mddev_find(dev); 2374 struct gendisk *disk; 2375 int partitioned = (MAJOR(dev) != MD_MAJOR); 2376 int shift = partitioned ? MdpMinorShift : 0; 2377 int unit = MINOR(dev) >> shift; 2378 2379 if (!mddev) 2380 return NULL; 2381 2382 down(&disks_sem); 2383 if (mddev->gendisk) { 2384 up(&disks_sem); 2385 mddev_put(mddev); 2386 return NULL; 2387 } 2388 disk = alloc_disk(1 << shift); 2389 if (!disk) { 2390 up(&disks_sem); 2391 mddev_put(mddev); 2392 return NULL; 2393 } 2394 disk->major = MAJOR(dev); 2395 disk->first_minor = unit << shift; 2396 if (partitioned) { 2397 sprintf(disk->disk_name, "md_d%d", unit); 2398 sprintf(disk->devfs_name, "md/d%d", unit); 2399 } else { 2400 sprintf(disk->disk_name, "md%d", unit); 2401 sprintf(disk->devfs_name, "md/%d", unit); 2402 } 2403 disk->fops = &md_fops; 2404 disk->private_data = mddev; 2405 disk->queue = mddev->queue; 2406 add_disk(disk); 2407 mddev->gendisk = disk; 2408 up(&disks_sem); 2409 mddev->kobj.parent = &disk->kobj; 2410 mddev->kobj.k_name = NULL; 2411 snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md"); 2412 mddev->kobj.ktype = &md_ktype; 2413 kobject_register(&mddev->kobj); 2414 return NULL; 2415 } 2416 2417 void md_wakeup_thread(mdk_thread_t *thread); 2418 2419 static void md_safemode_timeout(unsigned long data) 2420 { 2421 mddev_t *mddev = (mddev_t *) data; 2422 2423 mddev->safemode = 1; 2424 md_wakeup_thread(mddev->thread); 2425 } 2426 2427 static int start_dirty_degraded; 2428 2429 static int do_md_run(mddev_t * mddev) 2430 { 2431 int err; 2432 int chunk_size; 2433 struct list_head *tmp; 2434 mdk_rdev_t *rdev; 2435 struct gendisk *disk; 2436 struct mdk_personality *pers; 2437 char b[BDEVNAME_SIZE]; 2438 2439 if (list_empty(&mddev->disks)) 2440 /* cannot run an array with no devices.. */ 2441 return -EINVAL; 2442 2443 if (mddev->pers) 2444 return -EBUSY; 2445 2446 /* 2447 * Analyze all RAID superblock(s) 2448 */ 2449 if (!mddev->raid_disks) 2450 analyze_sbs(mddev); 2451 2452 chunk_size = mddev->chunk_size; 2453 2454 if (chunk_size) { 2455 if (chunk_size > MAX_CHUNK_SIZE) { 2456 printk(KERN_ERR "too big chunk_size: %d > %d\n", 2457 chunk_size, MAX_CHUNK_SIZE); 2458 return -EINVAL; 2459 } 2460 /* 2461 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE 2462 */ 2463 if ( (1 << ffz(~chunk_size)) != chunk_size) { 2464 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size); 2465 return -EINVAL; 2466 } 2467 if (chunk_size < PAGE_SIZE) { 2468 printk(KERN_ERR "too small chunk_size: %d < %ld\n", 2469 chunk_size, PAGE_SIZE); 2470 return -EINVAL; 2471 } 2472 2473 /* devices must have minimum size of one chunk */ 2474 ITERATE_RDEV(mddev,rdev,tmp) { 2475 if (test_bit(Faulty, &rdev->flags)) 2476 continue; 2477 if (rdev->size < chunk_size / 1024) { 2478 printk(KERN_WARNING 2479 "md: Dev %s smaller than chunk_size:" 2480 " %lluk < %dk\n", 2481 bdevname(rdev->bdev,b), 2482 (unsigned long long)rdev->size, 2483 chunk_size / 1024); 2484 return -EINVAL; 2485 } 2486 } 2487 } 2488 2489 #ifdef CONFIG_KMOD 2490 if (mddev->level != LEVEL_NONE) 2491 request_module("md-level-%d", mddev->level); 2492 else if (mddev->clevel[0]) 2493 request_module("md-%s", mddev->clevel); 2494 #endif 2495 2496 /* 2497 * Drop all container device buffers, from now on 2498 * the only valid external interface is through the md 2499 * device. 2500 * Also find largest hardsector size 2501 */ 2502 ITERATE_RDEV(mddev,rdev,tmp) { 2503 if (test_bit(Faulty, &rdev->flags)) 2504 continue; 2505 sync_blockdev(rdev->bdev); 2506 invalidate_bdev(rdev->bdev, 0); 2507 } 2508 2509 md_probe(mddev->unit, NULL, NULL); 2510 disk = mddev->gendisk; 2511 if (!disk) 2512 return -ENOMEM; 2513 2514 spin_lock(&pers_lock); 2515 pers = find_pers(mddev->level, mddev->clevel); 2516 if (!pers || !try_module_get(pers->owner)) { 2517 spin_unlock(&pers_lock); 2518 if (mddev->level != LEVEL_NONE) 2519 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 2520 mddev->level); 2521 else 2522 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 2523 mddev->clevel); 2524 return -EINVAL; 2525 } 2526 mddev->pers = pers; 2527 spin_unlock(&pers_lock); 2528 mddev->level = pers->level; 2529 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 2530 2531 mddev->recovery = 0; 2532 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ 2533 mddev->barriers_work = 1; 2534 mddev->ok_start_degraded = start_dirty_degraded; 2535 2536 if (start_readonly) 2537 mddev->ro = 2; /* read-only, but switch on first write */ 2538 2539 err = mddev->pers->run(mddev); 2540 if (!err && mddev->pers->sync_request) { 2541 err = bitmap_create(mddev); 2542 if (err) { 2543 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 2544 mdname(mddev), err); 2545 mddev->pers->stop(mddev); 2546 } 2547 } 2548 if (err) { 2549 printk(KERN_ERR "md: pers->run() failed ...\n"); 2550 module_put(mddev->pers->owner); 2551 mddev->pers = NULL; 2552 bitmap_destroy(mddev); 2553 return err; 2554 } 2555 if (mddev->pers->sync_request) 2556 sysfs_create_group(&mddev->kobj, &md_redundancy_group); 2557 else if (mddev->ro == 2) /* auto-readonly not meaningful */ 2558 mddev->ro = 0; 2559 2560 atomic_set(&mddev->writes_pending,0); 2561 mddev->safemode = 0; 2562 mddev->safemode_timer.function = md_safemode_timeout; 2563 mddev->safemode_timer.data = (unsigned long) mddev; 2564 mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */ 2565 mddev->in_sync = 1; 2566 2567 ITERATE_RDEV(mddev,rdev,tmp) 2568 if (rdev->raid_disk >= 0) { 2569 char nm[20]; 2570 sprintf(nm, "rd%d", rdev->raid_disk); 2571 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 2572 } 2573 2574 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2575 md_wakeup_thread(mddev->thread); 2576 2577 if (mddev->sb_dirty) 2578 md_update_sb(mddev); 2579 2580 set_capacity(disk, mddev->array_size<<1); 2581 2582 /* If we call blk_queue_make_request here, it will 2583 * re-initialise max_sectors etc which may have been 2584 * refined inside -> run. So just set the bits we need to set. 2585 * Most initialisation happended when we called 2586 * blk_queue_make_request(..., md_fail_request) 2587 * earlier. 2588 */ 2589 mddev->queue->queuedata = mddev; 2590 mddev->queue->make_request_fn = mddev->pers->make_request; 2591 2592 mddev->changed = 1; 2593 md_new_event(mddev); 2594 return 0; 2595 } 2596 2597 static int restart_array(mddev_t *mddev) 2598 { 2599 struct gendisk *disk = mddev->gendisk; 2600 int err; 2601 2602 /* 2603 * Complain if it has no devices 2604 */ 2605 err = -ENXIO; 2606 if (list_empty(&mddev->disks)) 2607 goto out; 2608 2609 if (mddev->pers) { 2610 err = -EBUSY; 2611 if (!mddev->ro) 2612 goto out; 2613 2614 mddev->safemode = 0; 2615 mddev->ro = 0; 2616 set_disk_ro(disk, 0); 2617 2618 printk(KERN_INFO "md: %s switched to read-write mode.\n", 2619 mdname(mddev)); 2620 /* 2621 * Kick recovery or resync if necessary 2622 */ 2623 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2624 md_wakeup_thread(mddev->thread); 2625 err = 0; 2626 } else { 2627 printk(KERN_ERR "md: %s has no personality assigned.\n", 2628 mdname(mddev)); 2629 err = -EINVAL; 2630 } 2631 2632 out: 2633 return err; 2634 } 2635 2636 static int do_md_stop(mddev_t * mddev, int ro) 2637 { 2638 int err = 0; 2639 struct gendisk *disk = mddev->gendisk; 2640 2641 if (mddev->pers) { 2642 if (atomic_read(&mddev->active)>2) { 2643 printk("md: %s still in use.\n",mdname(mddev)); 2644 return -EBUSY; 2645 } 2646 2647 if (mddev->sync_thread) { 2648 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2649 md_unregister_thread(mddev->sync_thread); 2650 mddev->sync_thread = NULL; 2651 } 2652 2653 del_timer_sync(&mddev->safemode_timer); 2654 2655 invalidate_partition(disk, 0); 2656 2657 if (ro) { 2658 err = -ENXIO; 2659 if (mddev->ro==1) 2660 goto out; 2661 mddev->ro = 1; 2662 } else { 2663 bitmap_flush(mddev); 2664 md_super_wait(mddev); 2665 if (mddev->ro) 2666 set_disk_ro(disk, 0); 2667 blk_queue_make_request(mddev->queue, md_fail_request); 2668 mddev->pers->stop(mddev); 2669 if (mddev->pers->sync_request) 2670 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 2671 2672 module_put(mddev->pers->owner); 2673 mddev->pers = NULL; 2674 if (mddev->ro) 2675 mddev->ro = 0; 2676 } 2677 if (!mddev->in_sync) { 2678 /* mark array as shutdown cleanly */ 2679 mddev->in_sync = 1; 2680 md_update_sb(mddev); 2681 } 2682 if (ro) 2683 set_disk_ro(disk, 1); 2684 } 2685 2686 bitmap_destroy(mddev); 2687 if (mddev->bitmap_file) { 2688 atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1); 2689 fput(mddev->bitmap_file); 2690 mddev->bitmap_file = NULL; 2691 } 2692 mddev->bitmap_offset = 0; 2693 2694 /* 2695 * Free resources if final stop 2696 */ 2697 if (!ro) { 2698 mdk_rdev_t *rdev; 2699 struct list_head *tmp; 2700 struct gendisk *disk; 2701 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 2702 2703 ITERATE_RDEV(mddev,rdev,tmp) 2704 if (rdev->raid_disk >= 0) { 2705 char nm[20]; 2706 sprintf(nm, "rd%d", rdev->raid_disk); 2707 sysfs_remove_link(&mddev->kobj, nm); 2708 } 2709 2710 export_array(mddev); 2711 2712 mddev->array_size = 0; 2713 disk = mddev->gendisk; 2714 if (disk) 2715 set_capacity(disk, 0); 2716 mddev->changed = 1; 2717 } else 2718 printk(KERN_INFO "md: %s switched to read-only mode.\n", 2719 mdname(mddev)); 2720 err = 0; 2721 md_new_event(mddev); 2722 out: 2723 return err; 2724 } 2725 2726 static void autorun_array(mddev_t *mddev) 2727 { 2728 mdk_rdev_t *rdev; 2729 struct list_head *tmp; 2730 int err; 2731 2732 if (list_empty(&mddev->disks)) 2733 return; 2734 2735 printk(KERN_INFO "md: running: "); 2736 2737 ITERATE_RDEV(mddev,rdev,tmp) { 2738 char b[BDEVNAME_SIZE]; 2739 printk("<%s>", bdevname(rdev->bdev,b)); 2740 } 2741 printk("\n"); 2742 2743 err = do_md_run (mddev); 2744 if (err) { 2745 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 2746 do_md_stop (mddev, 0); 2747 } 2748 } 2749 2750 /* 2751 * lets try to run arrays based on all disks that have arrived 2752 * until now. (those are in pending_raid_disks) 2753 * 2754 * the method: pick the first pending disk, collect all disks with 2755 * the same UUID, remove all from the pending list and put them into 2756 * the 'same_array' list. Then order this list based on superblock 2757 * update time (freshest comes first), kick out 'old' disks and 2758 * compare superblocks. If everything's fine then run it. 2759 * 2760 * If "unit" is allocated, then bump its reference count 2761 */ 2762 static void autorun_devices(int part) 2763 { 2764 struct list_head candidates; 2765 struct list_head *tmp; 2766 mdk_rdev_t *rdev0, *rdev; 2767 mddev_t *mddev; 2768 char b[BDEVNAME_SIZE]; 2769 2770 printk(KERN_INFO "md: autorun ...\n"); 2771 while (!list_empty(&pending_raid_disks)) { 2772 dev_t dev; 2773 rdev0 = list_entry(pending_raid_disks.next, 2774 mdk_rdev_t, same_set); 2775 2776 printk(KERN_INFO "md: considering %s ...\n", 2777 bdevname(rdev0->bdev,b)); 2778 INIT_LIST_HEAD(&candidates); 2779 ITERATE_RDEV_PENDING(rdev,tmp) 2780 if (super_90_load(rdev, rdev0, 0) >= 0) { 2781 printk(KERN_INFO "md: adding %s ...\n", 2782 bdevname(rdev->bdev,b)); 2783 list_move(&rdev->same_set, &candidates); 2784 } 2785 /* 2786 * now we have a set of devices, with all of them having 2787 * mostly sane superblocks. It's time to allocate the 2788 * mddev. 2789 */ 2790 if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) { 2791 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 2792 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 2793 break; 2794 } 2795 if (part) 2796 dev = MKDEV(mdp_major, 2797 rdev0->preferred_minor << MdpMinorShift); 2798 else 2799 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 2800 2801 md_probe(dev, NULL, NULL); 2802 mddev = mddev_find(dev); 2803 if (!mddev) { 2804 printk(KERN_ERR 2805 "md: cannot allocate memory for md drive.\n"); 2806 break; 2807 } 2808 if (mddev_lock(mddev)) 2809 printk(KERN_WARNING "md: %s locked, cannot run\n", 2810 mdname(mddev)); 2811 else if (mddev->raid_disks || mddev->major_version 2812 || !list_empty(&mddev->disks)) { 2813 printk(KERN_WARNING 2814 "md: %s already running, cannot run %s\n", 2815 mdname(mddev), bdevname(rdev0->bdev,b)); 2816 mddev_unlock(mddev); 2817 } else { 2818 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 2819 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) { 2820 list_del_init(&rdev->same_set); 2821 if (bind_rdev_to_array(rdev, mddev)) 2822 export_rdev(rdev); 2823 } 2824 autorun_array(mddev); 2825 mddev_unlock(mddev); 2826 } 2827 /* on success, candidates will be empty, on error 2828 * it won't... 2829 */ 2830 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) 2831 export_rdev(rdev); 2832 mddev_put(mddev); 2833 } 2834 printk(KERN_INFO "md: ... autorun DONE.\n"); 2835 } 2836 2837 /* 2838 * import RAID devices based on one partition 2839 * if possible, the array gets run as well. 2840 */ 2841 2842 static int autostart_array(dev_t startdev) 2843 { 2844 char b[BDEVNAME_SIZE]; 2845 int err = -EINVAL, i; 2846 mdp_super_t *sb = NULL; 2847 mdk_rdev_t *start_rdev = NULL, *rdev; 2848 2849 start_rdev = md_import_device(startdev, 0, 0); 2850 if (IS_ERR(start_rdev)) 2851 return err; 2852 2853 2854 /* NOTE: this can only work for 0.90.0 superblocks */ 2855 sb = (mdp_super_t*)page_address(start_rdev->sb_page); 2856 if (sb->major_version != 0 || 2857 sb->minor_version != 90 ) { 2858 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n"); 2859 export_rdev(start_rdev); 2860 return err; 2861 } 2862 2863 if (test_bit(Faulty, &start_rdev->flags)) { 2864 printk(KERN_WARNING 2865 "md: can not autostart based on faulty %s!\n", 2866 bdevname(start_rdev->bdev,b)); 2867 export_rdev(start_rdev); 2868 return err; 2869 } 2870 list_add(&start_rdev->same_set, &pending_raid_disks); 2871 2872 for (i = 0; i < MD_SB_DISKS; i++) { 2873 mdp_disk_t *desc = sb->disks + i; 2874 dev_t dev = MKDEV(desc->major, desc->minor); 2875 2876 if (!dev) 2877 continue; 2878 if (dev == startdev) 2879 continue; 2880 if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor) 2881 continue; 2882 rdev = md_import_device(dev, 0, 0); 2883 if (IS_ERR(rdev)) 2884 continue; 2885 2886 list_add(&rdev->same_set, &pending_raid_disks); 2887 } 2888 2889 /* 2890 * possibly return codes 2891 */ 2892 autorun_devices(0); 2893 return 0; 2894 2895 } 2896 2897 2898 static int get_version(void __user * arg) 2899 { 2900 mdu_version_t ver; 2901 2902 ver.major = MD_MAJOR_VERSION; 2903 ver.minor = MD_MINOR_VERSION; 2904 ver.patchlevel = MD_PATCHLEVEL_VERSION; 2905 2906 if (copy_to_user(arg, &ver, sizeof(ver))) 2907 return -EFAULT; 2908 2909 return 0; 2910 } 2911 2912 static int get_array_info(mddev_t * mddev, void __user * arg) 2913 { 2914 mdu_array_info_t info; 2915 int nr,working,active,failed,spare; 2916 mdk_rdev_t *rdev; 2917 struct list_head *tmp; 2918 2919 nr=working=active=failed=spare=0; 2920 ITERATE_RDEV(mddev,rdev,tmp) { 2921 nr++; 2922 if (test_bit(Faulty, &rdev->flags)) 2923 failed++; 2924 else { 2925 working++; 2926 if (test_bit(In_sync, &rdev->flags)) 2927 active++; 2928 else 2929 spare++; 2930 } 2931 } 2932 2933 info.major_version = mddev->major_version; 2934 info.minor_version = mddev->minor_version; 2935 info.patch_version = MD_PATCHLEVEL_VERSION; 2936 info.ctime = mddev->ctime; 2937 info.level = mddev->level; 2938 info.size = mddev->size; 2939 info.nr_disks = nr; 2940 info.raid_disks = mddev->raid_disks; 2941 info.md_minor = mddev->md_minor; 2942 info.not_persistent= !mddev->persistent; 2943 2944 info.utime = mddev->utime; 2945 info.state = 0; 2946 if (mddev->in_sync) 2947 info.state = (1<<MD_SB_CLEAN); 2948 if (mddev->bitmap && mddev->bitmap_offset) 2949 info.state = (1<<MD_SB_BITMAP_PRESENT); 2950 info.active_disks = active; 2951 info.working_disks = working; 2952 info.failed_disks = failed; 2953 info.spare_disks = spare; 2954 2955 info.layout = mddev->layout; 2956 info.chunk_size = mddev->chunk_size; 2957 2958 if (copy_to_user(arg, &info, sizeof(info))) 2959 return -EFAULT; 2960 2961 return 0; 2962 } 2963 2964 static int get_bitmap_file(mddev_t * mddev, void __user * arg) 2965 { 2966 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 2967 char *ptr, *buf = NULL; 2968 int err = -ENOMEM; 2969 2970 file = kmalloc(sizeof(*file), GFP_KERNEL); 2971 if (!file) 2972 goto out; 2973 2974 /* bitmap disabled, zero the first byte and copy out */ 2975 if (!mddev->bitmap || !mddev->bitmap->file) { 2976 file->pathname[0] = '\0'; 2977 goto copy_out; 2978 } 2979 2980 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL); 2981 if (!buf) 2982 goto out; 2983 2984 ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname)); 2985 if (!ptr) 2986 goto out; 2987 2988 strcpy(file->pathname, ptr); 2989 2990 copy_out: 2991 err = 0; 2992 if (copy_to_user(arg, file, sizeof(*file))) 2993 err = -EFAULT; 2994 out: 2995 kfree(buf); 2996 kfree(file); 2997 return err; 2998 } 2999 3000 static int get_disk_info(mddev_t * mddev, void __user * arg) 3001 { 3002 mdu_disk_info_t info; 3003 unsigned int nr; 3004 mdk_rdev_t *rdev; 3005 3006 if (copy_from_user(&info, arg, sizeof(info))) 3007 return -EFAULT; 3008 3009 nr = info.number; 3010 3011 rdev = find_rdev_nr(mddev, nr); 3012 if (rdev) { 3013 info.major = MAJOR(rdev->bdev->bd_dev); 3014 info.minor = MINOR(rdev->bdev->bd_dev); 3015 info.raid_disk = rdev->raid_disk; 3016 info.state = 0; 3017 if (test_bit(Faulty, &rdev->flags)) 3018 info.state |= (1<<MD_DISK_FAULTY); 3019 else if (test_bit(In_sync, &rdev->flags)) { 3020 info.state |= (1<<MD_DISK_ACTIVE); 3021 info.state |= (1<<MD_DISK_SYNC); 3022 } 3023 if (test_bit(WriteMostly, &rdev->flags)) 3024 info.state |= (1<<MD_DISK_WRITEMOSTLY); 3025 } else { 3026 info.major = info.minor = 0; 3027 info.raid_disk = -1; 3028 info.state = (1<<MD_DISK_REMOVED); 3029 } 3030 3031 if (copy_to_user(arg, &info, sizeof(info))) 3032 return -EFAULT; 3033 3034 return 0; 3035 } 3036 3037 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) 3038 { 3039 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 3040 mdk_rdev_t *rdev; 3041 dev_t dev = MKDEV(info->major,info->minor); 3042 3043 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 3044 return -EOVERFLOW; 3045 3046 if (!mddev->raid_disks) { 3047 int err; 3048 /* expecting a device which has a superblock */ 3049 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 3050 if (IS_ERR(rdev)) { 3051 printk(KERN_WARNING 3052 "md: md_import_device returned %ld\n", 3053 PTR_ERR(rdev)); 3054 return PTR_ERR(rdev); 3055 } 3056 if (!list_empty(&mddev->disks)) { 3057 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 3058 mdk_rdev_t, same_set); 3059 int err = super_types[mddev->major_version] 3060 .load_super(rdev, rdev0, mddev->minor_version); 3061 if (err < 0) { 3062 printk(KERN_WARNING 3063 "md: %s has different UUID to %s\n", 3064 bdevname(rdev->bdev,b), 3065 bdevname(rdev0->bdev,b2)); 3066 export_rdev(rdev); 3067 return -EINVAL; 3068 } 3069 } 3070 err = bind_rdev_to_array(rdev, mddev); 3071 if (err) 3072 export_rdev(rdev); 3073 return err; 3074 } 3075 3076 /* 3077 * add_new_disk can be used once the array is assembled 3078 * to add "hot spares". They must already have a superblock 3079 * written 3080 */ 3081 if (mddev->pers) { 3082 int err; 3083 if (!mddev->pers->hot_add_disk) { 3084 printk(KERN_WARNING 3085 "%s: personality does not support diskops!\n", 3086 mdname(mddev)); 3087 return -EINVAL; 3088 } 3089 if (mddev->persistent) 3090 rdev = md_import_device(dev, mddev->major_version, 3091 mddev->minor_version); 3092 else 3093 rdev = md_import_device(dev, -1, -1); 3094 if (IS_ERR(rdev)) { 3095 printk(KERN_WARNING 3096 "md: md_import_device returned %ld\n", 3097 PTR_ERR(rdev)); 3098 return PTR_ERR(rdev); 3099 } 3100 /* set save_raid_disk if appropriate */ 3101 if (!mddev->persistent) { 3102 if (info->state & (1<<MD_DISK_SYNC) && 3103 info->raid_disk < mddev->raid_disks) 3104 rdev->raid_disk = info->raid_disk; 3105 else 3106 rdev->raid_disk = -1; 3107 } else 3108 super_types[mddev->major_version]. 3109 validate_super(mddev, rdev); 3110 rdev->saved_raid_disk = rdev->raid_disk; 3111 3112 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 3113 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 3114 set_bit(WriteMostly, &rdev->flags); 3115 3116 rdev->raid_disk = -1; 3117 err = bind_rdev_to_array(rdev, mddev); 3118 if (err) 3119 export_rdev(rdev); 3120 3121 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3122 md_wakeup_thread(mddev->thread); 3123 return err; 3124 } 3125 3126 /* otherwise, add_new_disk is only allowed 3127 * for major_version==0 superblocks 3128 */ 3129 if (mddev->major_version != 0) { 3130 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 3131 mdname(mddev)); 3132 return -EINVAL; 3133 } 3134 3135 if (!(info->state & (1<<MD_DISK_FAULTY))) { 3136 int err; 3137 rdev = md_import_device (dev, -1, 0); 3138 if (IS_ERR(rdev)) { 3139 printk(KERN_WARNING 3140 "md: error, md_import_device() returned %ld\n", 3141 PTR_ERR(rdev)); 3142 return PTR_ERR(rdev); 3143 } 3144 rdev->desc_nr = info->number; 3145 if (info->raid_disk < mddev->raid_disks) 3146 rdev->raid_disk = info->raid_disk; 3147 else 3148 rdev->raid_disk = -1; 3149 3150 rdev->flags = 0; 3151 3152 if (rdev->raid_disk < mddev->raid_disks) 3153 if (info->state & (1<<MD_DISK_SYNC)) 3154 set_bit(In_sync, &rdev->flags); 3155 3156 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 3157 set_bit(WriteMostly, &rdev->flags); 3158 3159 if (!mddev->persistent) { 3160 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 3161 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 3162 } else 3163 rdev->sb_offset = calc_dev_sboffset(rdev->bdev); 3164 rdev->size = calc_dev_size(rdev, mddev->chunk_size); 3165 3166 err = bind_rdev_to_array(rdev, mddev); 3167 if (err) { 3168 export_rdev(rdev); 3169 return err; 3170 } 3171 } 3172 3173 return 0; 3174 } 3175 3176 static int hot_remove_disk(mddev_t * mddev, dev_t dev) 3177 { 3178 char b[BDEVNAME_SIZE]; 3179 mdk_rdev_t *rdev; 3180 3181 if (!mddev->pers) 3182 return -ENODEV; 3183 3184 rdev = find_rdev(mddev, dev); 3185 if (!rdev) 3186 return -ENXIO; 3187 3188 if (rdev->raid_disk >= 0) 3189 goto busy; 3190 3191 kick_rdev_from_array(rdev); 3192 md_update_sb(mddev); 3193 md_new_event(mddev); 3194 3195 return 0; 3196 busy: 3197 printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n", 3198 bdevname(rdev->bdev,b), mdname(mddev)); 3199 return -EBUSY; 3200 } 3201 3202 static int hot_add_disk(mddev_t * mddev, dev_t dev) 3203 { 3204 char b[BDEVNAME_SIZE]; 3205 int err; 3206 unsigned int size; 3207 mdk_rdev_t *rdev; 3208 3209 if (!mddev->pers) 3210 return -ENODEV; 3211 3212 if (mddev->major_version != 0) { 3213 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 3214 " version-0 superblocks.\n", 3215 mdname(mddev)); 3216 return -EINVAL; 3217 } 3218 if (!mddev->pers->hot_add_disk) { 3219 printk(KERN_WARNING 3220 "%s: personality does not support diskops!\n", 3221 mdname(mddev)); 3222 return -EINVAL; 3223 } 3224 3225 rdev = md_import_device (dev, -1, 0); 3226 if (IS_ERR(rdev)) { 3227 printk(KERN_WARNING 3228 "md: error, md_import_device() returned %ld\n", 3229 PTR_ERR(rdev)); 3230 return -EINVAL; 3231 } 3232 3233 if (mddev->persistent) 3234 rdev->sb_offset = calc_dev_sboffset(rdev->bdev); 3235 else 3236 rdev->sb_offset = 3237 rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 3238 3239 size = calc_dev_size(rdev, mddev->chunk_size); 3240 rdev->size = size; 3241 3242 if (test_bit(Faulty, &rdev->flags)) { 3243 printk(KERN_WARNING 3244 "md: can not hot-add faulty %s disk to %s!\n", 3245 bdevname(rdev->bdev,b), mdname(mddev)); 3246 err = -EINVAL; 3247 goto abort_export; 3248 } 3249 clear_bit(In_sync, &rdev->flags); 3250 rdev->desc_nr = -1; 3251 err = bind_rdev_to_array(rdev, mddev); 3252 if (err) 3253 goto abort_export; 3254 3255 /* 3256 * The rest should better be atomic, we can have disk failures 3257 * noticed in interrupt contexts ... 3258 */ 3259 3260 if (rdev->desc_nr == mddev->max_disks) { 3261 printk(KERN_WARNING "%s: can not hot-add to full array!\n", 3262 mdname(mddev)); 3263 err = -EBUSY; 3264 goto abort_unbind_export; 3265 } 3266 3267 rdev->raid_disk = -1; 3268 3269 md_update_sb(mddev); 3270 3271 /* 3272 * Kick recovery, maybe this spare has to be added to the 3273 * array immediately. 3274 */ 3275 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3276 md_wakeup_thread(mddev->thread); 3277 md_new_event(mddev); 3278 return 0; 3279 3280 abort_unbind_export: 3281 unbind_rdev_from_array(rdev); 3282 3283 abort_export: 3284 export_rdev(rdev); 3285 return err; 3286 } 3287 3288 /* similar to deny_write_access, but accounts for our holding a reference 3289 * to the file ourselves */ 3290 static int deny_bitmap_write_access(struct file * file) 3291 { 3292 struct inode *inode = file->f_mapping->host; 3293 3294 spin_lock(&inode->i_lock); 3295 if (atomic_read(&inode->i_writecount) > 1) { 3296 spin_unlock(&inode->i_lock); 3297 return -ETXTBSY; 3298 } 3299 atomic_set(&inode->i_writecount, -1); 3300 spin_unlock(&inode->i_lock); 3301 3302 return 0; 3303 } 3304 3305 static int set_bitmap_file(mddev_t *mddev, int fd) 3306 { 3307 int err; 3308 3309 if (mddev->pers) { 3310 if (!mddev->pers->quiesce) 3311 return -EBUSY; 3312 if (mddev->recovery || mddev->sync_thread) 3313 return -EBUSY; 3314 /* we should be able to change the bitmap.. */ 3315 } 3316 3317 3318 if (fd >= 0) { 3319 if (mddev->bitmap) 3320 return -EEXIST; /* cannot add when bitmap is present */ 3321 mddev->bitmap_file = fget(fd); 3322 3323 if (mddev->bitmap_file == NULL) { 3324 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 3325 mdname(mddev)); 3326 return -EBADF; 3327 } 3328 3329 err = deny_bitmap_write_access(mddev->bitmap_file); 3330 if (err) { 3331 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 3332 mdname(mddev)); 3333 fput(mddev->bitmap_file); 3334 mddev->bitmap_file = NULL; 3335 return err; 3336 } 3337 mddev->bitmap_offset = 0; /* file overrides offset */ 3338 } else if (mddev->bitmap == NULL) 3339 return -ENOENT; /* cannot remove what isn't there */ 3340 err = 0; 3341 if (mddev->pers) { 3342 mddev->pers->quiesce(mddev, 1); 3343 if (fd >= 0) 3344 err = bitmap_create(mddev); 3345 if (fd < 0 || err) 3346 bitmap_destroy(mddev); 3347 mddev->pers->quiesce(mddev, 0); 3348 } else if (fd < 0) { 3349 if (mddev->bitmap_file) 3350 fput(mddev->bitmap_file); 3351 mddev->bitmap_file = NULL; 3352 } 3353 3354 return err; 3355 } 3356 3357 /* 3358 * set_array_info is used two different ways 3359 * The original usage is when creating a new array. 3360 * In this usage, raid_disks is > 0 and it together with 3361 * level, size, not_persistent,layout,chunksize determine the 3362 * shape of the array. 3363 * This will always create an array with a type-0.90.0 superblock. 3364 * The newer usage is when assembling an array. 3365 * In this case raid_disks will be 0, and the major_version field is 3366 * use to determine which style super-blocks are to be found on the devices. 3367 * The minor and patch _version numbers are also kept incase the 3368 * super_block handler wishes to interpret them. 3369 */ 3370 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) 3371 { 3372 3373 if (info->raid_disks == 0) { 3374 /* just setting version number for superblock loading */ 3375 if (info->major_version < 0 || 3376 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) || 3377 super_types[info->major_version].name == NULL) { 3378 /* maybe try to auto-load a module? */ 3379 printk(KERN_INFO 3380 "md: superblock version %d not known\n", 3381 info->major_version); 3382 return -EINVAL; 3383 } 3384 mddev->major_version = info->major_version; 3385 mddev->minor_version = info->minor_version; 3386 mddev->patch_version = info->patch_version; 3387 return 0; 3388 } 3389 mddev->major_version = MD_MAJOR_VERSION; 3390 mddev->minor_version = MD_MINOR_VERSION; 3391 mddev->patch_version = MD_PATCHLEVEL_VERSION; 3392 mddev->ctime = get_seconds(); 3393 3394 mddev->level = info->level; 3395 mddev->size = info->size; 3396 mddev->raid_disks = info->raid_disks; 3397 /* don't set md_minor, it is determined by which /dev/md* was 3398 * openned 3399 */ 3400 if (info->state & (1<<MD_SB_CLEAN)) 3401 mddev->recovery_cp = MaxSector; 3402 else 3403 mddev->recovery_cp = 0; 3404 mddev->persistent = ! info->not_persistent; 3405 3406 mddev->layout = info->layout; 3407 mddev->chunk_size = info->chunk_size; 3408 3409 mddev->max_disks = MD_SB_DISKS; 3410 3411 mddev->sb_dirty = 1; 3412 3413 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 3414 mddev->bitmap_offset = 0; 3415 3416 /* 3417 * Generate a 128 bit UUID 3418 */ 3419 get_random_bytes(mddev->uuid, 16); 3420 3421 return 0; 3422 } 3423 3424 static int update_size(mddev_t *mddev, unsigned long size) 3425 { 3426 mdk_rdev_t * rdev; 3427 int rv; 3428 struct list_head *tmp; 3429 3430 if (mddev->pers->resize == NULL) 3431 return -EINVAL; 3432 /* The "size" is the amount of each device that is used. 3433 * This can only make sense for arrays with redundancy. 3434 * linear and raid0 always use whatever space is available 3435 * We can only consider changing the size if no resync 3436 * or reconstruction is happening, and if the new size 3437 * is acceptable. It must fit before the sb_offset or, 3438 * if that is <data_offset, it must fit before the 3439 * size of each device. 3440 * If size is zero, we find the largest size that fits. 3441 */ 3442 if (mddev->sync_thread) 3443 return -EBUSY; 3444 ITERATE_RDEV(mddev,rdev,tmp) { 3445 sector_t avail; 3446 int fit = (size == 0); 3447 if (rdev->sb_offset > rdev->data_offset) 3448 avail = (rdev->sb_offset*2) - rdev->data_offset; 3449 else 3450 avail = get_capacity(rdev->bdev->bd_disk) 3451 - rdev->data_offset; 3452 if (fit && (size == 0 || size > avail/2)) 3453 size = avail/2; 3454 if (avail < ((sector_t)size << 1)) 3455 return -ENOSPC; 3456 } 3457 rv = mddev->pers->resize(mddev, (sector_t)size *2); 3458 if (!rv) { 3459 struct block_device *bdev; 3460 3461 bdev = bdget_disk(mddev->gendisk, 0); 3462 if (bdev) { 3463 mutex_lock(&bdev->bd_inode->i_mutex); 3464 i_size_write(bdev->bd_inode, mddev->array_size << 10); 3465 mutex_unlock(&bdev->bd_inode->i_mutex); 3466 bdput(bdev); 3467 } 3468 } 3469 return rv; 3470 } 3471 3472 static int update_raid_disks(mddev_t *mddev, int raid_disks) 3473 { 3474 int rv; 3475 /* change the number of raid disks */ 3476 if (mddev->pers->reshape == NULL) 3477 return -EINVAL; 3478 if (raid_disks <= 0 || 3479 raid_disks >= mddev->max_disks) 3480 return -EINVAL; 3481 if (mddev->sync_thread) 3482 return -EBUSY; 3483 rv = mddev->pers->reshape(mddev, raid_disks); 3484 if (!rv) { 3485 struct block_device *bdev; 3486 3487 bdev = bdget_disk(mddev->gendisk, 0); 3488 if (bdev) { 3489 mutex_lock(&bdev->bd_inode->i_mutex); 3490 i_size_write(bdev->bd_inode, mddev->array_size << 10); 3491 mutex_unlock(&bdev->bd_inode->i_mutex); 3492 bdput(bdev); 3493 } 3494 } 3495 return rv; 3496 } 3497 3498 3499 /* 3500 * update_array_info is used to change the configuration of an 3501 * on-line array. 3502 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 3503 * fields in the info are checked against the array. 3504 * Any differences that cannot be handled will cause an error. 3505 * Normally, only one change can be managed at a time. 3506 */ 3507 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) 3508 { 3509 int rv = 0; 3510 int cnt = 0; 3511 int state = 0; 3512 3513 /* calculate expected state,ignoring low bits */ 3514 if (mddev->bitmap && mddev->bitmap_offset) 3515 state |= (1 << MD_SB_BITMAP_PRESENT); 3516 3517 if (mddev->major_version != info->major_version || 3518 mddev->minor_version != info->minor_version || 3519 /* mddev->patch_version != info->patch_version || */ 3520 mddev->ctime != info->ctime || 3521 mddev->level != info->level || 3522 /* mddev->layout != info->layout || */ 3523 !mddev->persistent != info->not_persistent|| 3524 mddev->chunk_size != info->chunk_size || 3525 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 3526 ((state^info->state) & 0xfffffe00) 3527 ) 3528 return -EINVAL; 3529 /* Check there is only one change */ 3530 if (mddev->size != info->size) cnt++; 3531 if (mddev->raid_disks != info->raid_disks) cnt++; 3532 if (mddev->layout != info->layout) cnt++; 3533 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++; 3534 if (cnt == 0) return 0; 3535 if (cnt > 1) return -EINVAL; 3536 3537 if (mddev->layout != info->layout) { 3538 /* Change layout 3539 * we don't need to do anything at the md level, the 3540 * personality will take care of it all. 3541 */ 3542 if (mddev->pers->reconfig == NULL) 3543 return -EINVAL; 3544 else 3545 return mddev->pers->reconfig(mddev, info->layout, -1); 3546 } 3547 if (mddev->size != info->size) 3548 rv = update_size(mddev, info->size); 3549 3550 if (mddev->raid_disks != info->raid_disks) 3551 rv = update_raid_disks(mddev, info->raid_disks); 3552 3553 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 3554 if (mddev->pers->quiesce == NULL) 3555 return -EINVAL; 3556 if (mddev->recovery || mddev->sync_thread) 3557 return -EBUSY; 3558 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 3559 /* add the bitmap */ 3560 if (mddev->bitmap) 3561 return -EEXIST; 3562 if (mddev->default_bitmap_offset == 0) 3563 return -EINVAL; 3564 mddev->bitmap_offset = mddev->default_bitmap_offset; 3565 mddev->pers->quiesce(mddev, 1); 3566 rv = bitmap_create(mddev); 3567 if (rv) 3568 bitmap_destroy(mddev); 3569 mddev->pers->quiesce(mddev, 0); 3570 } else { 3571 /* remove the bitmap */ 3572 if (!mddev->bitmap) 3573 return -ENOENT; 3574 if (mddev->bitmap->file) 3575 return -EINVAL; 3576 mddev->pers->quiesce(mddev, 1); 3577 bitmap_destroy(mddev); 3578 mddev->pers->quiesce(mddev, 0); 3579 mddev->bitmap_offset = 0; 3580 } 3581 } 3582 md_update_sb(mddev); 3583 return rv; 3584 } 3585 3586 static int set_disk_faulty(mddev_t *mddev, dev_t dev) 3587 { 3588 mdk_rdev_t *rdev; 3589 3590 if (mddev->pers == NULL) 3591 return -ENODEV; 3592 3593 rdev = find_rdev(mddev, dev); 3594 if (!rdev) 3595 return -ENODEV; 3596 3597 md_error(mddev, rdev); 3598 return 0; 3599 } 3600 3601 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3602 { 3603 mddev_t *mddev = bdev->bd_disk->private_data; 3604 3605 geo->heads = 2; 3606 geo->sectors = 4; 3607 geo->cylinders = get_capacity(mddev->gendisk) / 8; 3608 return 0; 3609 } 3610 3611 static int md_ioctl(struct inode *inode, struct file *file, 3612 unsigned int cmd, unsigned long arg) 3613 { 3614 int err = 0; 3615 void __user *argp = (void __user *)arg; 3616 mddev_t *mddev = NULL; 3617 3618 if (!capable(CAP_SYS_ADMIN)) 3619 return -EACCES; 3620 3621 /* 3622 * Commands dealing with the RAID driver but not any 3623 * particular array: 3624 */ 3625 switch (cmd) 3626 { 3627 case RAID_VERSION: 3628 err = get_version(argp); 3629 goto done; 3630 3631 case PRINT_RAID_DEBUG: 3632 err = 0; 3633 md_print_devices(); 3634 goto done; 3635 3636 #ifndef MODULE 3637 case RAID_AUTORUN: 3638 err = 0; 3639 autostart_arrays(arg); 3640 goto done; 3641 #endif 3642 default:; 3643 } 3644 3645 /* 3646 * Commands creating/starting a new array: 3647 */ 3648 3649 mddev = inode->i_bdev->bd_disk->private_data; 3650 3651 if (!mddev) { 3652 BUG(); 3653 goto abort; 3654 } 3655 3656 3657 if (cmd == START_ARRAY) { 3658 /* START_ARRAY doesn't need to lock the array as autostart_array 3659 * does the locking, and it could even be a different array 3660 */ 3661 static int cnt = 3; 3662 if (cnt > 0 ) { 3663 printk(KERN_WARNING 3664 "md: %s(pid %d) used deprecated START_ARRAY ioctl. " 3665 "This will not be supported beyond July 2006\n", 3666 current->comm, current->pid); 3667 cnt--; 3668 } 3669 err = autostart_array(new_decode_dev(arg)); 3670 if (err) { 3671 printk(KERN_WARNING "md: autostart failed!\n"); 3672 goto abort; 3673 } 3674 goto done; 3675 } 3676 3677 err = mddev_lock(mddev); 3678 if (err) { 3679 printk(KERN_INFO 3680 "md: ioctl lock interrupted, reason %d, cmd %d\n", 3681 err, cmd); 3682 goto abort; 3683 } 3684 3685 switch (cmd) 3686 { 3687 case SET_ARRAY_INFO: 3688 { 3689 mdu_array_info_t info; 3690 if (!arg) 3691 memset(&info, 0, sizeof(info)); 3692 else if (copy_from_user(&info, argp, sizeof(info))) { 3693 err = -EFAULT; 3694 goto abort_unlock; 3695 } 3696 if (mddev->pers) { 3697 err = update_array_info(mddev, &info); 3698 if (err) { 3699 printk(KERN_WARNING "md: couldn't update" 3700 " array info. %d\n", err); 3701 goto abort_unlock; 3702 } 3703 goto done_unlock; 3704 } 3705 if (!list_empty(&mddev->disks)) { 3706 printk(KERN_WARNING 3707 "md: array %s already has disks!\n", 3708 mdname(mddev)); 3709 err = -EBUSY; 3710 goto abort_unlock; 3711 } 3712 if (mddev->raid_disks) { 3713 printk(KERN_WARNING 3714 "md: array %s already initialised!\n", 3715 mdname(mddev)); 3716 err = -EBUSY; 3717 goto abort_unlock; 3718 } 3719 err = set_array_info(mddev, &info); 3720 if (err) { 3721 printk(KERN_WARNING "md: couldn't set" 3722 " array info. %d\n", err); 3723 goto abort_unlock; 3724 } 3725 } 3726 goto done_unlock; 3727 3728 default:; 3729 } 3730 3731 /* 3732 * Commands querying/configuring an existing array: 3733 */ 3734 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 3735 * RUN_ARRAY, and SET_BITMAP_FILE are allowed */ 3736 if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 3737 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) { 3738 err = -ENODEV; 3739 goto abort_unlock; 3740 } 3741 3742 /* 3743 * Commands even a read-only array can execute: 3744 */ 3745 switch (cmd) 3746 { 3747 case GET_ARRAY_INFO: 3748 err = get_array_info(mddev, argp); 3749 goto done_unlock; 3750 3751 case GET_BITMAP_FILE: 3752 err = get_bitmap_file(mddev, argp); 3753 goto done_unlock; 3754 3755 case GET_DISK_INFO: 3756 err = get_disk_info(mddev, argp); 3757 goto done_unlock; 3758 3759 case RESTART_ARRAY_RW: 3760 err = restart_array(mddev); 3761 goto done_unlock; 3762 3763 case STOP_ARRAY: 3764 err = do_md_stop (mddev, 0); 3765 goto done_unlock; 3766 3767 case STOP_ARRAY_RO: 3768 err = do_md_stop (mddev, 1); 3769 goto done_unlock; 3770 3771 /* 3772 * We have a problem here : there is no easy way to give a CHS 3773 * virtual geometry. We currently pretend that we have a 2 heads 3774 * 4 sectors (with a BIG number of cylinders...). This drives 3775 * dosfs just mad... ;-) 3776 */ 3777 } 3778 3779 /* 3780 * The remaining ioctls are changing the state of the 3781 * superblock, so we do not allow them on read-only arrays. 3782 * However non-MD ioctls (e.g. get-size) will still come through 3783 * here and hit the 'default' below, so only disallow 3784 * 'md' ioctls, and switch to rw mode if started auto-readonly. 3785 */ 3786 if (_IOC_TYPE(cmd) == MD_MAJOR && 3787 mddev->ro && mddev->pers) { 3788 if (mddev->ro == 2) { 3789 mddev->ro = 0; 3790 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3791 md_wakeup_thread(mddev->thread); 3792 3793 } else { 3794 err = -EROFS; 3795 goto abort_unlock; 3796 } 3797 } 3798 3799 switch (cmd) 3800 { 3801 case ADD_NEW_DISK: 3802 { 3803 mdu_disk_info_t info; 3804 if (copy_from_user(&info, argp, sizeof(info))) 3805 err = -EFAULT; 3806 else 3807 err = add_new_disk(mddev, &info); 3808 goto done_unlock; 3809 } 3810 3811 case HOT_REMOVE_DISK: 3812 err = hot_remove_disk(mddev, new_decode_dev(arg)); 3813 goto done_unlock; 3814 3815 case HOT_ADD_DISK: 3816 err = hot_add_disk(mddev, new_decode_dev(arg)); 3817 goto done_unlock; 3818 3819 case SET_DISK_FAULTY: 3820 err = set_disk_faulty(mddev, new_decode_dev(arg)); 3821 goto done_unlock; 3822 3823 case RUN_ARRAY: 3824 err = do_md_run (mddev); 3825 goto done_unlock; 3826 3827 case SET_BITMAP_FILE: 3828 err = set_bitmap_file(mddev, (int)arg); 3829 goto done_unlock; 3830 3831 default: 3832 if (_IOC_TYPE(cmd) == MD_MAJOR) 3833 printk(KERN_WARNING "md: %s(pid %d) used" 3834 " obsolete MD ioctl, upgrade your" 3835 " software to use new ictls.\n", 3836 current->comm, current->pid); 3837 err = -EINVAL; 3838 goto abort_unlock; 3839 } 3840 3841 done_unlock: 3842 abort_unlock: 3843 mddev_unlock(mddev); 3844 3845 return err; 3846 done: 3847 if (err) 3848 MD_BUG(); 3849 abort: 3850 return err; 3851 } 3852 3853 static int md_open(struct inode *inode, struct file *file) 3854 { 3855 /* 3856 * Succeed if we can lock the mddev, which confirms that 3857 * it isn't being stopped right now. 3858 */ 3859 mddev_t *mddev = inode->i_bdev->bd_disk->private_data; 3860 int err; 3861 3862 if ((err = mddev_lock(mddev))) 3863 goto out; 3864 3865 err = 0; 3866 mddev_get(mddev); 3867 mddev_unlock(mddev); 3868 3869 check_disk_change(inode->i_bdev); 3870 out: 3871 return err; 3872 } 3873 3874 static int md_release(struct inode *inode, struct file * file) 3875 { 3876 mddev_t *mddev = inode->i_bdev->bd_disk->private_data; 3877 3878 if (!mddev) 3879 BUG(); 3880 mddev_put(mddev); 3881 3882 return 0; 3883 } 3884 3885 static int md_media_changed(struct gendisk *disk) 3886 { 3887 mddev_t *mddev = disk->private_data; 3888 3889 return mddev->changed; 3890 } 3891 3892 static int md_revalidate(struct gendisk *disk) 3893 { 3894 mddev_t *mddev = disk->private_data; 3895 3896 mddev->changed = 0; 3897 return 0; 3898 } 3899 static struct block_device_operations md_fops = 3900 { 3901 .owner = THIS_MODULE, 3902 .open = md_open, 3903 .release = md_release, 3904 .ioctl = md_ioctl, 3905 .getgeo = md_getgeo, 3906 .media_changed = md_media_changed, 3907 .revalidate_disk= md_revalidate, 3908 }; 3909 3910 static int md_thread(void * arg) 3911 { 3912 mdk_thread_t *thread = arg; 3913 3914 /* 3915 * md_thread is a 'system-thread', it's priority should be very 3916 * high. We avoid resource deadlocks individually in each 3917 * raid personality. (RAID5 does preallocation) We also use RR and 3918 * the very same RT priority as kswapd, thus we will never get 3919 * into a priority inversion deadlock. 3920 * 3921 * we definitely have to have equal or higher priority than 3922 * bdflush, otherwise bdflush will deadlock if there are too 3923 * many dirty RAID5 blocks. 3924 */ 3925 3926 allow_signal(SIGKILL); 3927 while (!kthread_should_stop()) { 3928 3929 /* We need to wait INTERRUPTIBLE so that 3930 * we don't add to the load-average. 3931 * That means we need to be sure no signals are 3932 * pending 3933 */ 3934 if (signal_pending(current)) 3935 flush_signals(current); 3936 3937 wait_event_interruptible_timeout 3938 (thread->wqueue, 3939 test_bit(THREAD_WAKEUP, &thread->flags) 3940 || kthread_should_stop(), 3941 thread->timeout); 3942 try_to_freeze(); 3943 3944 clear_bit(THREAD_WAKEUP, &thread->flags); 3945 3946 thread->run(thread->mddev); 3947 } 3948 3949 return 0; 3950 } 3951 3952 void md_wakeup_thread(mdk_thread_t *thread) 3953 { 3954 if (thread) { 3955 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm); 3956 set_bit(THREAD_WAKEUP, &thread->flags); 3957 wake_up(&thread->wqueue); 3958 } 3959 } 3960 3961 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, 3962 const char *name) 3963 { 3964 mdk_thread_t *thread; 3965 3966 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL); 3967 if (!thread) 3968 return NULL; 3969 3970 init_waitqueue_head(&thread->wqueue); 3971 3972 thread->run = run; 3973 thread->mddev = mddev; 3974 thread->timeout = MAX_SCHEDULE_TIMEOUT; 3975 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev)); 3976 if (IS_ERR(thread->tsk)) { 3977 kfree(thread); 3978 return NULL; 3979 } 3980 return thread; 3981 } 3982 3983 void md_unregister_thread(mdk_thread_t *thread) 3984 { 3985 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid); 3986 3987 kthread_stop(thread->tsk); 3988 kfree(thread); 3989 } 3990 3991 void md_error(mddev_t *mddev, mdk_rdev_t *rdev) 3992 { 3993 if (!mddev) { 3994 MD_BUG(); 3995 return; 3996 } 3997 3998 if (!rdev || test_bit(Faulty, &rdev->flags)) 3999 return; 4000 /* 4001 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", 4002 mdname(mddev), 4003 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev), 4004 __builtin_return_address(0),__builtin_return_address(1), 4005 __builtin_return_address(2),__builtin_return_address(3)); 4006 */ 4007 if (!mddev->pers->error_handler) 4008 return; 4009 mddev->pers->error_handler(mddev,rdev); 4010 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4011 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4012 md_wakeup_thread(mddev->thread); 4013 md_new_event(mddev); 4014 } 4015 4016 /* seq_file implementation /proc/mdstat */ 4017 4018 static void status_unused(struct seq_file *seq) 4019 { 4020 int i = 0; 4021 mdk_rdev_t *rdev; 4022 struct list_head *tmp; 4023 4024 seq_printf(seq, "unused devices: "); 4025 4026 ITERATE_RDEV_PENDING(rdev,tmp) { 4027 char b[BDEVNAME_SIZE]; 4028 i++; 4029 seq_printf(seq, "%s ", 4030 bdevname(rdev->bdev,b)); 4031 } 4032 if (!i) 4033 seq_printf(seq, "<none>"); 4034 4035 seq_printf(seq, "\n"); 4036 } 4037 4038 4039 static void status_resync(struct seq_file *seq, mddev_t * mddev) 4040 { 4041 unsigned long max_blocks, resync, res, dt, db, rt; 4042 4043 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2; 4044 4045 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 4046 max_blocks = mddev->resync_max_sectors >> 1; 4047 else 4048 max_blocks = mddev->size; 4049 4050 /* 4051 * Should not happen. 4052 */ 4053 if (!max_blocks) { 4054 MD_BUG(); 4055 return; 4056 } 4057 res = (resync/1024)*1000/(max_blocks/1024 + 1); 4058 { 4059 int i, x = res/50, y = 20-x; 4060 seq_printf(seq, "["); 4061 for (i = 0; i < x; i++) 4062 seq_printf(seq, "="); 4063 seq_printf(seq, ">"); 4064 for (i = 0; i < y; i++) 4065 seq_printf(seq, "."); 4066 seq_printf(seq, "] "); 4067 } 4068 seq_printf(seq, " %s =%3lu.%lu%% (%lu/%lu)", 4069 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 4070 "resync" : "recovery"), 4071 res/10, res % 10, resync, max_blocks); 4072 4073 /* 4074 * We do not want to overflow, so the order of operands and 4075 * the * 100 / 100 trick are important. We do a +1 to be 4076 * safe against division by zero. We only estimate anyway. 4077 * 4078 * dt: time from mark until now 4079 * db: blocks written from mark until now 4080 * rt: remaining time 4081 */ 4082 dt = ((jiffies - mddev->resync_mark) / HZ); 4083 if (!dt) dt++; 4084 db = resync - (mddev->resync_mark_cnt/2); 4085 rt = (dt * ((max_blocks-resync) / (db/100+1)))/100; 4086 4087 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6); 4088 4089 seq_printf(seq, " speed=%ldK/sec", db/dt); 4090 } 4091 4092 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 4093 { 4094 struct list_head *tmp; 4095 loff_t l = *pos; 4096 mddev_t *mddev; 4097 4098 if (l >= 0x10000) 4099 return NULL; 4100 if (!l--) 4101 /* header */ 4102 return (void*)1; 4103 4104 spin_lock(&all_mddevs_lock); 4105 list_for_each(tmp,&all_mddevs) 4106 if (!l--) { 4107 mddev = list_entry(tmp, mddev_t, all_mddevs); 4108 mddev_get(mddev); 4109 spin_unlock(&all_mddevs_lock); 4110 return mddev; 4111 } 4112 spin_unlock(&all_mddevs_lock); 4113 if (!l--) 4114 return (void*)2;/* tail */ 4115 return NULL; 4116 } 4117 4118 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4119 { 4120 struct list_head *tmp; 4121 mddev_t *next_mddev, *mddev = v; 4122 4123 ++*pos; 4124 if (v == (void*)2) 4125 return NULL; 4126 4127 spin_lock(&all_mddevs_lock); 4128 if (v == (void*)1) 4129 tmp = all_mddevs.next; 4130 else 4131 tmp = mddev->all_mddevs.next; 4132 if (tmp != &all_mddevs) 4133 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs)); 4134 else { 4135 next_mddev = (void*)2; 4136 *pos = 0x10000; 4137 } 4138 spin_unlock(&all_mddevs_lock); 4139 4140 if (v != (void*)1) 4141 mddev_put(mddev); 4142 return next_mddev; 4143 4144 } 4145 4146 static void md_seq_stop(struct seq_file *seq, void *v) 4147 { 4148 mddev_t *mddev = v; 4149 4150 if (mddev && v != (void*)1 && v != (void*)2) 4151 mddev_put(mddev); 4152 } 4153 4154 struct mdstat_info { 4155 int event; 4156 }; 4157 4158 static int md_seq_show(struct seq_file *seq, void *v) 4159 { 4160 mddev_t *mddev = v; 4161 sector_t size; 4162 struct list_head *tmp2; 4163 mdk_rdev_t *rdev; 4164 struct mdstat_info *mi = seq->private; 4165 struct bitmap *bitmap; 4166 4167 if (v == (void*)1) { 4168 struct mdk_personality *pers; 4169 seq_printf(seq, "Personalities : "); 4170 spin_lock(&pers_lock); 4171 list_for_each_entry(pers, &pers_list, list) 4172 seq_printf(seq, "[%s] ", pers->name); 4173 4174 spin_unlock(&pers_lock); 4175 seq_printf(seq, "\n"); 4176 mi->event = atomic_read(&md_event_count); 4177 return 0; 4178 } 4179 if (v == (void*)2) { 4180 status_unused(seq); 4181 return 0; 4182 } 4183 4184 if (mddev_lock(mddev)!=0) 4185 return -EINTR; 4186 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 4187 seq_printf(seq, "%s : %sactive", mdname(mddev), 4188 mddev->pers ? "" : "in"); 4189 if (mddev->pers) { 4190 if (mddev->ro==1) 4191 seq_printf(seq, " (read-only)"); 4192 if (mddev->ro==2) 4193 seq_printf(seq, "(auto-read-only)"); 4194 seq_printf(seq, " %s", mddev->pers->name); 4195 } 4196 4197 size = 0; 4198 ITERATE_RDEV(mddev,rdev,tmp2) { 4199 char b[BDEVNAME_SIZE]; 4200 seq_printf(seq, " %s[%d]", 4201 bdevname(rdev->bdev,b), rdev->desc_nr); 4202 if (test_bit(WriteMostly, &rdev->flags)) 4203 seq_printf(seq, "(W)"); 4204 if (test_bit(Faulty, &rdev->flags)) { 4205 seq_printf(seq, "(F)"); 4206 continue; 4207 } else if (rdev->raid_disk < 0) 4208 seq_printf(seq, "(S)"); /* spare */ 4209 size += rdev->size; 4210 } 4211 4212 if (!list_empty(&mddev->disks)) { 4213 if (mddev->pers) 4214 seq_printf(seq, "\n %llu blocks", 4215 (unsigned long long)mddev->array_size); 4216 else 4217 seq_printf(seq, "\n %llu blocks", 4218 (unsigned long long)size); 4219 } 4220 if (mddev->persistent) { 4221 if (mddev->major_version != 0 || 4222 mddev->minor_version != 90) { 4223 seq_printf(seq," super %d.%d", 4224 mddev->major_version, 4225 mddev->minor_version); 4226 } 4227 } else 4228 seq_printf(seq, " super non-persistent"); 4229 4230 if (mddev->pers) { 4231 mddev->pers->status (seq, mddev); 4232 seq_printf(seq, "\n "); 4233 if (mddev->pers->sync_request) { 4234 if (mddev->curr_resync > 2) { 4235 status_resync (seq, mddev); 4236 seq_printf(seq, "\n "); 4237 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) 4238 seq_printf(seq, "\tresync=DELAYED\n "); 4239 else if (mddev->recovery_cp < MaxSector) 4240 seq_printf(seq, "\tresync=PENDING\n "); 4241 } 4242 } else 4243 seq_printf(seq, "\n "); 4244 4245 if ((bitmap = mddev->bitmap)) { 4246 unsigned long chunk_kb; 4247 unsigned long flags; 4248 spin_lock_irqsave(&bitmap->lock, flags); 4249 chunk_kb = bitmap->chunksize >> 10; 4250 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 4251 "%lu%s chunk", 4252 bitmap->pages - bitmap->missing_pages, 4253 bitmap->pages, 4254 (bitmap->pages - bitmap->missing_pages) 4255 << (PAGE_SHIFT - 10), 4256 chunk_kb ? chunk_kb : bitmap->chunksize, 4257 chunk_kb ? "KB" : "B"); 4258 if (bitmap->file) { 4259 seq_printf(seq, ", file: "); 4260 seq_path(seq, bitmap->file->f_vfsmnt, 4261 bitmap->file->f_dentry," \t\n"); 4262 } 4263 4264 seq_printf(seq, "\n"); 4265 spin_unlock_irqrestore(&bitmap->lock, flags); 4266 } 4267 4268 seq_printf(seq, "\n"); 4269 } 4270 mddev_unlock(mddev); 4271 4272 return 0; 4273 } 4274 4275 static struct seq_operations md_seq_ops = { 4276 .start = md_seq_start, 4277 .next = md_seq_next, 4278 .stop = md_seq_stop, 4279 .show = md_seq_show, 4280 }; 4281 4282 static int md_seq_open(struct inode *inode, struct file *file) 4283 { 4284 int error; 4285 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL); 4286 if (mi == NULL) 4287 return -ENOMEM; 4288 4289 error = seq_open(file, &md_seq_ops); 4290 if (error) 4291 kfree(mi); 4292 else { 4293 struct seq_file *p = file->private_data; 4294 p->private = mi; 4295 mi->event = atomic_read(&md_event_count); 4296 } 4297 return error; 4298 } 4299 4300 static int md_seq_release(struct inode *inode, struct file *file) 4301 { 4302 struct seq_file *m = file->private_data; 4303 struct mdstat_info *mi = m->private; 4304 m->private = NULL; 4305 kfree(mi); 4306 return seq_release(inode, file); 4307 } 4308 4309 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 4310 { 4311 struct seq_file *m = filp->private_data; 4312 struct mdstat_info *mi = m->private; 4313 int mask; 4314 4315 poll_wait(filp, &md_event_waiters, wait); 4316 4317 /* always allow read */ 4318 mask = POLLIN | POLLRDNORM; 4319 4320 if (mi->event != atomic_read(&md_event_count)) 4321 mask |= POLLERR | POLLPRI; 4322 return mask; 4323 } 4324 4325 static struct file_operations md_seq_fops = { 4326 .open = md_seq_open, 4327 .read = seq_read, 4328 .llseek = seq_lseek, 4329 .release = md_seq_release, 4330 .poll = mdstat_poll, 4331 }; 4332 4333 int register_md_personality(struct mdk_personality *p) 4334 { 4335 spin_lock(&pers_lock); 4336 list_add_tail(&p->list, &pers_list); 4337 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); 4338 spin_unlock(&pers_lock); 4339 return 0; 4340 } 4341 4342 int unregister_md_personality(struct mdk_personality *p) 4343 { 4344 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 4345 spin_lock(&pers_lock); 4346 list_del_init(&p->list); 4347 spin_unlock(&pers_lock); 4348 return 0; 4349 } 4350 4351 static int is_mddev_idle(mddev_t *mddev) 4352 { 4353 mdk_rdev_t * rdev; 4354 struct list_head *tmp; 4355 int idle; 4356 unsigned long curr_events; 4357 4358 idle = 1; 4359 ITERATE_RDEV(mddev,rdev,tmp) { 4360 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 4361 curr_events = disk_stat_read(disk, sectors[0]) + 4362 disk_stat_read(disk, sectors[1]) - 4363 atomic_read(&disk->sync_io); 4364 /* The difference between curr_events and last_events 4365 * will be affected by any new non-sync IO (making 4366 * curr_events bigger) and any difference in the amount of 4367 * in-flight syncio (making current_events bigger or smaller) 4368 * The amount in-flight is currently limited to 4369 * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6 4370 * which is at most 4096 sectors. 4371 * These numbers are fairly fragile and should be made 4372 * more robust, probably by enforcing the 4373 * 'window size' that md_do_sync sort-of uses. 4374 * 4375 * Note: the following is an unsigned comparison. 4376 */ 4377 if ((curr_events - rdev->last_events + 4096) > 8192) { 4378 rdev->last_events = curr_events; 4379 idle = 0; 4380 } 4381 } 4382 return idle; 4383 } 4384 4385 void md_done_sync(mddev_t *mddev, int blocks, int ok) 4386 { 4387 /* another "blocks" (512byte) blocks have been synced */ 4388 atomic_sub(blocks, &mddev->recovery_active); 4389 wake_up(&mddev->recovery_wait); 4390 if (!ok) { 4391 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 4392 md_wakeup_thread(mddev->thread); 4393 // stop recovery, signal do_sync .... 4394 } 4395 } 4396 4397 4398 /* md_write_start(mddev, bi) 4399 * If we need to update some array metadata (e.g. 'active' flag 4400 * in superblock) before writing, schedule a superblock update 4401 * and wait for it to complete. 4402 */ 4403 void md_write_start(mddev_t *mddev, struct bio *bi) 4404 { 4405 if (bio_data_dir(bi) != WRITE) 4406 return; 4407 4408 BUG_ON(mddev->ro == 1); 4409 if (mddev->ro == 2) { 4410 /* need to switch to read/write */ 4411 mddev->ro = 0; 4412 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4413 md_wakeup_thread(mddev->thread); 4414 } 4415 atomic_inc(&mddev->writes_pending); 4416 if (mddev->in_sync) { 4417 spin_lock_irq(&mddev->write_lock); 4418 if (mddev->in_sync) { 4419 mddev->in_sync = 0; 4420 mddev->sb_dirty = 1; 4421 md_wakeup_thread(mddev->thread); 4422 } 4423 spin_unlock_irq(&mddev->write_lock); 4424 } 4425 wait_event(mddev->sb_wait, mddev->sb_dirty==0); 4426 } 4427 4428 void md_write_end(mddev_t *mddev) 4429 { 4430 if (atomic_dec_and_test(&mddev->writes_pending)) { 4431 if (mddev->safemode == 2) 4432 md_wakeup_thread(mddev->thread); 4433 else 4434 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 4435 } 4436 } 4437 4438 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 4439 4440 #define SYNC_MARKS 10 4441 #define SYNC_MARK_STEP (3*HZ) 4442 static void md_do_sync(mddev_t *mddev) 4443 { 4444 mddev_t *mddev2; 4445 unsigned int currspeed = 0, 4446 window; 4447 sector_t max_sectors,j, io_sectors; 4448 unsigned long mark[SYNC_MARKS]; 4449 sector_t mark_cnt[SYNC_MARKS]; 4450 int last_mark,m; 4451 struct list_head *tmp; 4452 sector_t last_check; 4453 int skipped = 0; 4454 4455 /* just incase thread restarts... */ 4456 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 4457 return; 4458 4459 /* we overload curr_resync somewhat here. 4460 * 0 == not engaged in resync at all 4461 * 2 == checking that there is no conflict with another sync 4462 * 1 == like 2, but have yielded to allow conflicting resync to 4463 * commense 4464 * other == active in resync - this many blocks 4465 * 4466 * Before starting a resync we must have set curr_resync to 4467 * 2, and then checked that every "conflicting" array has curr_resync 4468 * less than ours. When we find one that is the same or higher 4469 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 4470 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 4471 * This will mean we have to start checking from the beginning again. 4472 * 4473 */ 4474 4475 do { 4476 mddev->curr_resync = 2; 4477 4478 try_again: 4479 if (kthread_should_stop()) { 4480 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4481 goto skip; 4482 } 4483 ITERATE_MDDEV(mddev2,tmp) { 4484 if (mddev2 == mddev) 4485 continue; 4486 if (mddev2->curr_resync && 4487 match_mddev_units(mddev,mddev2)) { 4488 DEFINE_WAIT(wq); 4489 if (mddev < mddev2 && mddev->curr_resync == 2) { 4490 /* arbitrarily yield */ 4491 mddev->curr_resync = 1; 4492 wake_up(&resync_wait); 4493 } 4494 if (mddev > mddev2 && mddev->curr_resync == 1) 4495 /* no need to wait here, we can wait the next 4496 * time 'round when curr_resync == 2 4497 */ 4498 continue; 4499 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE); 4500 if (!kthread_should_stop() && 4501 mddev2->curr_resync >= mddev->curr_resync) { 4502 printk(KERN_INFO "md: delaying resync of %s" 4503 " until %s has finished resync (they" 4504 " share one or more physical units)\n", 4505 mdname(mddev), mdname(mddev2)); 4506 mddev_put(mddev2); 4507 schedule(); 4508 finish_wait(&resync_wait, &wq); 4509 goto try_again; 4510 } 4511 finish_wait(&resync_wait, &wq); 4512 } 4513 } 4514 } while (mddev->curr_resync < 2); 4515 4516 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 4517 /* resync follows the size requested by the personality, 4518 * which defaults to physical size, but can be virtual size 4519 */ 4520 max_sectors = mddev->resync_max_sectors; 4521 mddev->resync_mismatches = 0; 4522 } else 4523 /* recovery follows the physical size of devices */ 4524 max_sectors = mddev->size << 1; 4525 4526 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev)); 4527 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:" 4528 " %d KB/sec/disc.\n", speed_min(mddev)); 4529 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 4530 "(but not more than %d KB/sec) for reconstruction.\n", 4531 speed_max(mddev)); 4532 4533 is_mddev_idle(mddev); /* this also initializes IO event counters */ 4534 /* we don't use the checkpoint if there's a bitmap */ 4535 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap 4536 && ! test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 4537 j = mddev->recovery_cp; 4538 else 4539 j = 0; 4540 io_sectors = 0; 4541 for (m = 0; m < SYNC_MARKS; m++) { 4542 mark[m] = jiffies; 4543 mark_cnt[m] = io_sectors; 4544 } 4545 last_mark = 0; 4546 mddev->resync_mark = mark[last_mark]; 4547 mddev->resync_mark_cnt = mark_cnt[last_mark]; 4548 4549 /* 4550 * Tune reconstruction: 4551 */ 4552 window = 32*(PAGE_SIZE/512); 4553 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", 4554 window/2,(unsigned long long) max_sectors/2); 4555 4556 atomic_set(&mddev->recovery_active, 0); 4557 init_waitqueue_head(&mddev->recovery_wait); 4558 last_check = 0; 4559 4560 if (j>2) { 4561 printk(KERN_INFO 4562 "md: resuming recovery of %s from checkpoint.\n", 4563 mdname(mddev)); 4564 mddev->curr_resync = j; 4565 } 4566 4567 while (j < max_sectors) { 4568 sector_t sectors; 4569 4570 skipped = 0; 4571 sectors = mddev->pers->sync_request(mddev, j, &skipped, 4572 currspeed < speed_min(mddev)); 4573 if (sectors == 0) { 4574 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 4575 goto out; 4576 } 4577 4578 if (!skipped) { /* actual IO requested */ 4579 io_sectors += sectors; 4580 atomic_add(sectors, &mddev->recovery_active); 4581 } 4582 4583 j += sectors; 4584 if (j>1) mddev->curr_resync = j; 4585 if (last_check == 0) 4586 /* this is the earliers that rebuilt will be 4587 * visible in /proc/mdstat 4588 */ 4589 md_new_event(mddev); 4590 4591 if (last_check + window > io_sectors || j == max_sectors) 4592 continue; 4593 4594 last_check = io_sectors; 4595 4596 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) || 4597 test_bit(MD_RECOVERY_ERR, &mddev->recovery)) 4598 break; 4599 4600 repeat: 4601 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 4602 /* step marks */ 4603 int next = (last_mark+1) % SYNC_MARKS; 4604 4605 mddev->resync_mark = mark[next]; 4606 mddev->resync_mark_cnt = mark_cnt[next]; 4607 mark[next] = jiffies; 4608 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 4609 last_mark = next; 4610 } 4611 4612 4613 if (kthread_should_stop()) { 4614 /* 4615 * got a signal, exit. 4616 */ 4617 printk(KERN_INFO 4618 "md: md_do_sync() got signal ... exiting\n"); 4619 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4620 goto out; 4621 } 4622 4623 /* 4624 * this loop exits only if either when we are slower than 4625 * the 'hard' speed limit, or the system was IO-idle for 4626 * a jiffy. 4627 * the system might be non-idle CPU-wise, but we only care 4628 * about not overloading the IO subsystem. (things like an 4629 * e2fsck being done on the RAID array should execute fast) 4630 */ 4631 mddev->queue->unplug_fn(mddev->queue); 4632 cond_resched(); 4633 4634 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 4635 /((jiffies-mddev->resync_mark)/HZ +1) +1; 4636 4637 if (currspeed > speed_min(mddev)) { 4638 if ((currspeed > speed_max(mddev)) || 4639 !is_mddev_idle(mddev)) { 4640 msleep(500); 4641 goto repeat; 4642 } 4643 } 4644 } 4645 printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev)); 4646 /* 4647 * this also signals 'finished resyncing' to md_stop 4648 */ 4649 out: 4650 mddev->queue->unplug_fn(mddev->queue); 4651 4652 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 4653 4654 /* tell personality that we are finished */ 4655 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 4656 4657 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 4658 mddev->curr_resync > 2 && 4659 mddev->curr_resync >= mddev->recovery_cp) { 4660 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4661 printk(KERN_INFO 4662 "md: checkpointing recovery of %s.\n", 4663 mdname(mddev)); 4664 mddev->recovery_cp = mddev->curr_resync; 4665 } else 4666 mddev->recovery_cp = MaxSector; 4667 } 4668 4669 skip: 4670 mddev->curr_resync = 0; 4671 wake_up(&resync_wait); 4672 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 4673 md_wakeup_thread(mddev->thread); 4674 } 4675 4676 4677 /* 4678 * This routine is regularly called by all per-raid-array threads to 4679 * deal with generic issues like resync and super-block update. 4680 * Raid personalities that don't have a thread (linear/raid0) do not 4681 * need this as they never do any recovery or update the superblock. 4682 * 4683 * It does not do any resync itself, but rather "forks" off other threads 4684 * to do that as needed. 4685 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 4686 * "->recovery" and create a thread at ->sync_thread. 4687 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR) 4688 * and wakeups up this thread which will reap the thread and finish up. 4689 * This thread also removes any faulty devices (with nr_pending == 0). 4690 * 4691 * The overall approach is: 4692 * 1/ if the superblock needs updating, update it. 4693 * 2/ If a recovery thread is running, don't do anything else. 4694 * 3/ If recovery has finished, clean up, possibly marking spares active. 4695 * 4/ If there are any faulty devices, remove them. 4696 * 5/ If array is degraded, try to add spares devices 4697 * 6/ If array has spares or is not in-sync, start a resync thread. 4698 */ 4699 void md_check_recovery(mddev_t *mddev) 4700 { 4701 mdk_rdev_t *rdev; 4702 struct list_head *rtmp; 4703 4704 4705 if (mddev->bitmap) 4706 bitmap_daemon_work(mddev->bitmap); 4707 4708 if (mddev->ro) 4709 return; 4710 4711 if (signal_pending(current)) { 4712 if (mddev->pers->sync_request) { 4713 printk(KERN_INFO "md: %s in immediate safe mode\n", 4714 mdname(mddev)); 4715 mddev->safemode = 2; 4716 } 4717 flush_signals(current); 4718 } 4719 4720 if ( ! ( 4721 mddev->sb_dirty || 4722 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 4723 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 4724 (mddev->safemode == 1) || 4725 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 4726 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 4727 )) 4728 return; 4729 4730 if (mddev_trylock(mddev)==0) { 4731 int spares =0; 4732 4733 spin_lock_irq(&mddev->write_lock); 4734 if (mddev->safemode && !atomic_read(&mddev->writes_pending) && 4735 !mddev->in_sync && mddev->recovery_cp == MaxSector) { 4736 mddev->in_sync = 1; 4737 mddev->sb_dirty = 1; 4738 } 4739 if (mddev->safemode == 1) 4740 mddev->safemode = 0; 4741 spin_unlock_irq(&mddev->write_lock); 4742 4743 if (mddev->sb_dirty) 4744 md_update_sb(mddev); 4745 4746 4747 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 4748 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 4749 /* resync/recovery still happening */ 4750 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4751 goto unlock; 4752 } 4753 if (mddev->sync_thread) { 4754 /* resync has finished, collect result */ 4755 md_unregister_thread(mddev->sync_thread); 4756 mddev->sync_thread = NULL; 4757 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 4758 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4759 /* success...*/ 4760 /* activate any spares */ 4761 mddev->pers->spare_active(mddev); 4762 } 4763 md_update_sb(mddev); 4764 4765 /* if array is no-longer degraded, then any saved_raid_disk 4766 * information must be scrapped 4767 */ 4768 if (!mddev->degraded) 4769 ITERATE_RDEV(mddev,rdev,rtmp) 4770 rdev->saved_raid_disk = -1; 4771 4772 mddev->recovery = 0; 4773 /* flag recovery needed just to double check */ 4774 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4775 md_new_event(mddev); 4776 goto unlock; 4777 } 4778 /* Clear some bits that don't mean anything, but 4779 * might be left set 4780 */ 4781 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4782 clear_bit(MD_RECOVERY_ERR, &mddev->recovery); 4783 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 4784 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 4785 4786 /* no recovery is running. 4787 * remove any failed drives, then 4788 * add spares if possible. 4789 * Spare are also removed and re-added, to allow 4790 * the personality to fail the re-add. 4791 */ 4792 ITERATE_RDEV(mddev,rdev,rtmp) 4793 if (rdev->raid_disk >= 0 && 4794 (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) && 4795 atomic_read(&rdev->nr_pending)==0) { 4796 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) { 4797 char nm[20]; 4798 sprintf(nm,"rd%d", rdev->raid_disk); 4799 sysfs_remove_link(&mddev->kobj, nm); 4800 rdev->raid_disk = -1; 4801 } 4802 } 4803 4804 if (mddev->degraded) { 4805 ITERATE_RDEV(mddev,rdev,rtmp) 4806 if (rdev->raid_disk < 0 4807 && !test_bit(Faulty, &rdev->flags)) { 4808 if (mddev->pers->hot_add_disk(mddev,rdev)) { 4809 char nm[20]; 4810 sprintf(nm, "rd%d", rdev->raid_disk); 4811 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 4812 spares++; 4813 md_new_event(mddev); 4814 } else 4815 break; 4816 } 4817 } 4818 4819 if (spares) { 4820 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4821 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4822 } else if (mddev->recovery_cp < MaxSector) { 4823 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4824 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 4825 /* nothing to be done ... */ 4826 goto unlock; 4827 4828 if (mddev->pers->sync_request) { 4829 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4830 if (spares && mddev->bitmap && ! mddev->bitmap->file) { 4831 /* We are adding a device or devices to an array 4832 * which has the bitmap stored on all devices. 4833 * So make sure all bitmap pages get written 4834 */ 4835 bitmap_write_all(mddev->bitmap); 4836 } 4837 mddev->sync_thread = md_register_thread(md_do_sync, 4838 mddev, 4839 "%s_resync"); 4840 if (!mddev->sync_thread) { 4841 printk(KERN_ERR "%s: could not start resync" 4842 " thread...\n", 4843 mdname(mddev)); 4844 /* leave the spares where they are, it shouldn't hurt */ 4845 mddev->recovery = 0; 4846 } else 4847 md_wakeup_thread(mddev->sync_thread); 4848 md_new_event(mddev); 4849 } 4850 unlock: 4851 mddev_unlock(mddev); 4852 } 4853 } 4854 4855 static int md_notify_reboot(struct notifier_block *this, 4856 unsigned long code, void *x) 4857 { 4858 struct list_head *tmp; 4859 mddev_t *mddev; 4860 4861 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { 4862 4863 printk(KERN_INFO "md: stopping all md devices.\n"); 4864 4865 ITERATE_MDDEV(mddev,tmp) 4866 if (mddev_trylock(mddev)==0) 4867 do_md_stop (mddev, 1); 4868 /* 4869 * certain more exotic SCSI devices are known to be 4870 * volatile wrt too early system reboots. While the 4871 * right place to handle this issue is the given 4872 * driver, we do want to have a safe RAID driver ... 4873 */ 4874 mdelay(1000*1); 4875 } 4876 return NOTIFY_DONE; 4877 } 4878 4879 static struct notifier_block md_notifier = { 4880 .notifier_call = md_notify_reboot, 4881 .next = NULL, 4882 .priority = INT_MAX, /* before any real devices */ 4883 }; 4884 4885 static void md_geninit(void) 4886 { 4887 struct proc_dir_entry *p; 4888 4889 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 4890 4891 p = create_proc_entry("mdstat", S_IRUGO, NULL); 4892 if (p) 4893 p->proc_fops = &md_seq_fops; 4894 } 4895 4896 static int __init md_init(void) 4897 { 4898 int minor; 4899 4900 printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d," 4901 " MD_SB_DISKS=%d\n", 4902 MD_MAJOR_VERSION, MD_MINOR_VERSION, 4903 MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS); 4904 printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI, 4905 BITMAP_MINOR); 4906 4907 if (register_blkdev(MAJOR_NR, "md")) 4908 return -1; 4909 if ((mdp_major=register_blkdev(0, "mdp"))<=0) { 4910 unregister_blkdev(MAJOR_NR, "md"); 4911 return -1; 4912 } 4913 devfs_mk_dir("md"); 4914 blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE, 4915 md_probe, NULL, NULL); 4916 blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE, 4917 md_probe, NULL, NULL); 4918 4919 for (minor=0; minor < MAX_MD_DEVS; ++minor) 4920 devfs_mk_bdev(MKDEV(MAJOR_NR, minor), 4921 S_IFBLK|S_IRUSR|S_IWUSR, 4922 "md/%d", minor); 4923 4924 for (minor=0; minor < MAX_MD_DEVS; ++minor) 4925 devfs_mk_bdev(MKDEV(mdp_major, minor<<MdpMinorShift), 4926 S_IFBLK|S_IRUSR|S_IWUSR, 4927 "md/mdp%d", minor); 4928 4929 4930 register_reboot_notifier(&md_notifier); 4931 raid_table_header = register_sysctl_table(raid_root_table, 1); 4932 4933 md_geninit(); 4934 return (0); 4935 } 4936 4937 4938 #ifndef MODULE 4939 4940 /* 4941 * Searches all registered partitions for autorun RAID arrays 4942 * at boot time. 4943 */ 4944 static dev_t detected_devices[128]; 4945 static int dev_cnt; 4946 4947 void md_autodetect_dev(dev_t dev) 4948 { 4949 if (dev_cnt >= 0 && dev_cnt < 127) 4950 detected_devices[dev_cnt++] = dev; 4951 } 4952 4953 4954 static void autostart_arrays(int part) 4955 { 4956 mdk_rdev_t *rdev; 4957 int i; 4958 4959 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 4960 4961 for (i = 0; i < dev_cnt; i++) { 4962 dev_t dev = detected_devices[i]; 4963 4964 rdev = md_import_device(dev,0, 0); 4965 if (IS_ERR(rdev)) 4966 continue; 4967 4968 if (test_bit(Faulty, &rdev->flags)) { 4969 MD_BUG(); 4970 continue; 4971 } 4972 list_add(&rdev->same_set, &pending_raid_disks); 4973 } 4974 dev_cnt = 0; 4975 4976 autorun_devices(part); 4977 } 4978 4979 #endif 4980 4981 static __exit void md_exit(void) 4982 { 4983 mddev_t *mddev; 4984 struct list_head *tmp; 4985 int i; 4986 blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS); 4987 blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift); 4988 for (i=0; i < MAX_MD_DEVS; i++) 4989 devfs_remove("md/%d", i); 4990 for (i=0; i < MAX_MD_DEVS; i++) 4991 devfs_remove("md/d%d", i); 4992 4993 devfs_remove("md"); 4994 4995 unregister_blkdev(MAJOR_NR,"md"); 4996 unregister_blkdev(mdp_major, "mdp"); 4997 unregister_reboot_notifier(&md_notifier); 4998 unregister_sysctl_table(raid_table_header); 4999 remove_proc_entry("mdstat", NULL); 5000 ITERATE_MDDEV(mddev,tmp) { 5001 struct gendisk *disk = mddev->gendisk; 5002 if (!disk) 5003 continue; 5004 export_array(mddev); 5005 del_gendisk(disk); 5006 put_disk(disk); 5007 mddev->gendisk = NULL; 5008 mddev_put(mddev); 5009 } 5010 } 5011 5012 module_init(md_init) 5013 module_exit(md_exit) 5014 5015 static int get_ro(char *buffer, struct kernel_param *kp) 5016 { 5017 return sprintf(buffer, "%d", start_readonly); 5018 } 5019 static int set_ro(const char *val, struct kernel_param *kp) 5020 { 5021 char *e; 5022 int num = simple_strtoul(val, &e, 10); 5023 if (*val && (*e == '\0' || *e == '\n')) { 5024 start_readonly = num; 5025 return 0; 5026 } 5027 return -EINVAL; 5028 } 5029 5030 module_param_call(start_ro, set_ro, get_ro, NULL, 0600); 5031 module_param(start_dirty_degraded, int, 0644); 5032 5033 5034 EXPORT_SYMBOL(register_md_personality); 5035 EXPORT_SYMBOL(unregister_md_personality); 5036 EXPORT_SYMBOL(md_error); 5037 EXPORT_SYMBOL(md_done_sync); 5038 EXPORT_SYMBOL(md_write_start); 5039 EXPORT_SYMBOL(md_write_end); 5040 EXPORT_SYMBOL(md_register_thread); 5041 EXPORT_SYMBOL(md_unregister_thread); 5042 EXPORT_SYMBOL(md_wakeup_thread); 5043 EXPORT_SYMBOL(md_print_devices); 5044 EXPORT_SYMBOL(md_check_recovery); 5045 MODULE_LICENSE("GPL"); 5046 MODULE_ALIAS("md"); 5047 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 5048