1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 #include <linux/kthread.h> 36 #include <linux/blkdev.h> 37 #include <linux/sysctl.h> 38 #include <linux/seq_file.h> 39 #include <linux/mutex.h> 40 #include <linux/buffer_head.h> /* for invalidate_bdev */ 41 #include <linux/poll.h> 42 #include <linux/ctype.h> 43 #include <linux/string.h> 44 #include <linux/hdreg.h> 45 #include <linux/proc_fs.h> 46 #include <linux/random.h> 47 #include <linux/reboot.h> 48 #include <linux/file.h> 49 #include <linux/compat.h> 50 #include <linux/delay.h> 51 #include <linux/raid/md_p.h> 52 #include <linux/raid/md_u.h> 53 #include <linux/slab.h> 54 #include "md.h" 55 #include "bitmap.h" 56 57 #define DEBUG 0 58 #define dprintk(x...) ((void)(DEBUG && printk(x))) 59 60 #ifndef MODULE 61 static void autostart_arrays(int part); 62 #endif 63 64 static LIST_HEAD(pers_list); 65 static DEFINE_SPINLOCK(pers_lock); 66 67 static void md_print_devices(void); 68 69 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 70 static struct workqueue_struct *md_wq; 71 static struct workqueue_struct *md_misc_wq; 72 73 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } 74 75 /* 76 * Default number of read corrections we'll attempt on an rdev 77 * before ejecting it from the array. We divide the read error 78 * count by 2 for every hour elapsed between read errors. 79 */ 80 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 81 /* 82 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 83 * is 1000 KB/sec, so the extra system load does not show up that much. 84 * Increase it if you want to have more _guaranteed_ speed. Note that 85 * the RAID driver will use the maximum available bandwidth if the IO 86 * subsystem is idle. There is also an 'absolute maximum' reconstruction 87 * speed limit - in case reconstruction slows down your system despite 88 * idle IO detection. 89 * 90 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 91 * or /sys/block/mdX/md/sync_speed_{min,max} 92 */ 93 94 static int sysctl_speed_limit_min = 1000; 95 static int sysctl_speed_limit_max = 200000; 96 static inline int speed_min(mddev_t *mddev) 97 { 98 return mddev->sync_speed_min ? 99 mddev->sync_speed_min : sysctl_speed_limit_min; 100 } 101 102 static inline int speed_max(mddev_t *mddev) 103 { 104 return mddev->sync_speed_max ? 105 mddev->sync_speed_max : sysctl_speed_limit_max; 106 } 107 108 static struct ctl_table_header *raid_table_header; 109 110 static ctl_table raid_table[] = { 111 { 112 .procname = "speed_limit_min", 113 .data = &sysctl_speed_limit_min, 114 .maxlen = sizeof(int), 115 .mode = S_IRUGO|S_IWUSR, 116 .proc_handler = proc_dointvec, 117 }, 118 { 119 .procname = "speed_limit_max", 120 .data = &sysctl_speed_limit_max, 121 .maxlen = sizeof(int), 122 .mode = S_IRUGO|S_IWUSR, 123 .proc_handler = proc_dointvec, 124 }, 125 { } 126 }; 127 128 static ctl_table raid_dir_table[] = { 129 { 130 .procname = "raid", 131 .maxlen = 0, 132 .mode = S_IRUGO|S_IXUGO, 133 .child = raid_table, 134 }, 135 { } 136 }; 137 138 static ctl_table raid_root_table[] = { 139 { 140 .procname = "dev", 141 .maxlen = 0, 142 .mode = 0555, 143 .child = raid_dir_table, 144 }, 145 { } 146 }; 147 148 static const struct block_device_operations md_fops; 149 150 static int start_readonly; 151 152 /* bio_clone_mddev 153 * like bio_clone, but with a local bio set 154 */ 155 156 static void mddev_bio_destructor(struct bio *bio) 157 { 158 mddev_t *mddev, **mddevp; 159 160 mddevp = (void*)bio; 161 mddev = mddevp[-1]; 162 163 bio_free(bio, mddev->bio_set); 164 } 165 166 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 167 mddev_t *mddev) 168 { 169 struct bio *b; 170 mddev_t **mddevp; 171 172 if (!mddev || !mddev->bio_set) 173 return bio_alloc(gfp_mask, nr_iovecs); 174 175 b = bio_alloc_bioset(gfp_mask, nr_iovecs, 176 mddev->bio_set); 177 if (!b) 178 return NULL; 179 mddevp = (void*)b; 180 mddevp[-1] = mddev; 181 b->bi_destructor = mddev_bio_destructor; 182 return b; 183 } 184 EXPORT_SYMBOL_GPL(bio_alloc_mddev); 185 186 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, 187 mddev_t *mddev) 188 { 189 struct bio *b; 190 mddev_t **mddevp; 191 192 if (!mddev || !mddev->bio_set) 193 return bio_clone(bio, gfp_mask); 194 195 b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, 196 mddev->bio_set); 197 if (!b) 198 return NULL; 199 mddevp = (void*)b; 200 mddevp[-1] = mddev; 201 b->bi_destructor = mddev_bio_destructor; 202 __bio_clone(b, bio); 203 if (bio_integrity(bio)) { 204 int ret; 205 206 ret = bio_integrity_clone(b, bio, gfp_mask, mddev->bio_set); 207 208 if (ret < 0) { 209 bio_put(b); 210 return NULL; 211 } 212 } 213 214 return b; 215 } 216 EXPORT_SYMBOL_GPL(bio_clone_mddev); 217 218 /* 219 * We have a system wide 'event count' that is incremented 220 * on any 'interesting' event, and readers of /proc/mdstat 221 * can use 'poll' or 'select' to find out when the event 222 * count increases. 223 * 224 * Events are: 225 * start array, stop array, error, add device, remove device, 226 * start build, activate spare 227 */ 228 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 229 static atomic_t md_event_count; 230 void md_new_event(mddev_t *mddev) 231 { 232 atomic_inc(&md_event_count); 233 wake_up(&md_event_waiters); 234 } 235 EXPORT_SYMBOL_GPL(md_new_event); 236 237 /* Alternate version that can be called from interrupts 238 * when calling sysfs_notify isn't needed. 239 */ 240 static void md_new_event_inintr(mddev_t *mddev) 241 { 242 atomic_inc(&md_event_count); 243 wake_up(&md_event_waiters); 244 } 245 246 /* 247 * Enables to iterate over all existing md arrays 248 * all_mddevs_lock protects this list. 249 */ 250 static LIST_HEAD(all_mddevs); 251 static DEFINE_SPINLOCK(all_mddevs_lock); 252 253 254 /* 255 * iterates through all used mddevs in the system. 256 * We take care to grab the all_mddevs_lock whenever navigating 257 * the list, and to always hold a refcount when unlocked. 258 * Any code which breaks out of this loop while own 259 * a reference to the current mddev and must mddev_put it. 260 */ 261 #define for_each_mddev(mddev,tmp) \ 262 \ 263 for (({ spin_lock(&all_mddevs_lock); \ 264 tmp = all_mddevs.next; \ 265 mddev = NULL;}); \ 266 ({ if (tmp != &all_mddevs) \ 267 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\ 268 spin_unlock(&all_mddevs_lock); \ 269 if (mddev) mddev_put(mddev); \ 270 mddev = list_entry(tmp, mddev_t, all_mddevs); \ 271 tmp != &all_mddevs;}); \ 272 ({ spin_lock(&all_mddevs_lock); \ 273 tmp = tmp->next;}) \ 274 ) 275 276 277 /* Rather than calling directly into the personality make_request function, 278 * IO requests come here first so that we can check if the device is 279 * being suspended pending a reconfiguration. 280 * We hold a refcount over the call to ->make_request. By the time that 281 * call has finished, the bio has been linked into some internal structure 282 * and so is visible to ->quiesce(), so we don't need the refcount any more. 283 */ 284 static int md_make_request(struct request_queue *q, struct bio *bio) 285 { 286 const int rw = bio_data_dir(bio); 287 mddev_t *mddev = q->queuedata; 288 int rv; 289 int cpu; 290 291 if (mddev == NULL || mddev->pers == NULL) { 292 bio_io_error(bio); 293 return 0; 294 } 295 rcu_read_lock(); 296 if (mddev->suspended) { 297 DEFINE_WAIT(__wait); 298 for (;;) { 299 prepare_to_wait(&mddev->sb_wait, &__wait, 300 TASK_UNINTERRUPTIBLE); 301 if (!mddev->suspended) 302 break; 303 rcu_read_unlock(); 304 schedule(); 305 rcu_read_lock(); 306 } 307 finish_wait(&mddev->sb_wait, &__wait); 308 } 309 atomic_inc(&mddev->active_io); 310 rcu_read_unlock(); 311 312 rv = mddev->pers->make_request(mddev, bio); 313 314 cpu = part_stat_lock(); 315 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 316 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], 317 bio_sectors(bio)); 318 part_stat_unlock(); 319 320 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 321 wake_up(&mddev->sb_wait); 322 323 return rv; 324 } 325 326 /* mddev_suspend makes sure no new requests are submitted 327 * to the device, and that any requests that have been submitted 328 * are completely handled. 329 * Once ->stop is called and completes, the module will be completely 330 * unused. 331 */ 332 void mddev_suspend(mddev_t *mddev) 333 { 334 BUG_ON(mddev->suspended); 335 mddev->suspended = 1; 336 synchronize_rcu(); 337 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 338 mddev->pers->quiesce(mddev, 1); 339 } 340 EXPORT_SYMBOL_GPL(mddev_suspend); 341 342 void mddev_resume(mddev_t *mddev) 343 { 344 mddev->suspended = 0; 345 wake_up(&mddev->sb_wait); 346 mddev->pers->quiesce(mddev, 0); 347 } 348 EXPORT_SYMBOL_GPL(mddev_resume); 349 350 int mddev_congested(mddev_t *mddev, int bits) 351 { 352 return mddev->suspended; 353 } 354 EXPORT_SYMBOL(mddev_congested); 355 356 /* 357 * Generic flush handling for md 358 */ 359 360 static void md_end_flush(struct bio *bio, int err) 361 { 362 mdk_rdev_t *rdev = bio->bi_private; 363 mddev_t *mddev = rdev->mddev; 364 365 rdev_dec_pending(rdev, mddev); 366 367 if (atomic_dec_and_test(&mddev->flush_pending)) { 368 /* The pre-request flush has finished */ 369 queue_work(md_wq, &mddev->flush_work); 370 } 371 bio_put(bio); 372 } 373 374 static void submit_flushes(mddev_t *mddev) 375 { 376 mdk_rdev_t *rdev; 377 378 rcu_read_lock(); 379 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 380 if (rdev->raid_disk >= 0 && 381 !test_bit(Faulty, &rdev->flags)) { 382 /* Take two references, one is dropped 383 * when request finishes, one after 384 * we reclaim rcu_read_lock 385 */ 386 struct bio *bi; 387 atomic_inc(&rdev->nr_pending); 388 atomic_inc(&rdev->nr_pending); 389 rcu_read_unlock(); 390 bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev); 391 bi->bi_end_io = md_end_flush; 392 bi->bi_private = rdev; 393 bi->bi_bdev = rdev->bdev; 394 atomic_inc(&mddev->flush_pending); 395 submit_bio(WRITE_FLUSH, bi); 396 rcu_read_lock(); 397 rdev_dec_pending(rdev, mddev); 398 } 399 rcu_read_unlock(); 400 } 401 402 static void md_submit_flush_data(struct work_struct *ws) 403 { 404 mddev_t *mddev = container_of(ws, mddev_t, flush_work); 405 struct bio *bio = mddev->flush_bio; 406 407 atomic_set(&mddev->flush_pending, 1); 408 409 if (bio->bi_size == 0) 410 /* an empty barrier - all done */ 411 bio_endio(bio, 0); 412 else { 413 bio->bi_rw &= ~REQ_FLUSH; 414 if (mddev->pers->make_request(mddev, bio)) 415 generic_make_request(bio); 416 } 417 if (atomic_dec_and_test(&mddev->flush_pending)) { 418 mddev->flush_bio = NULL; 419 wake_up(&mddev->sb_wait); 420 } 421 } 422 423 void md_flush_request(mddev_t *mddev, struct bio *bio) 424 { 425 spin_lock_irq(&mddev->write_lock); 426 wait_event_lock_irq(mddev->sb_wait, 427 !mddev->flush_bio, 428 mddev->write_lock, /*nothing*/); 429 mddev->flush_bio = bio; 430 spin_unlock_irq(&mddev->write_lock); 431 432 atomic_set(&mddev->flush_pending, 1); 433 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 434 435 submit_flushes(mddev); 436 437 if (atomic_dec_and_test(&mddev->flush_pending)) 438 queue_work(md_wq, &mddev->flush_work); 439 } 440 EXPORT_SYMBOL(md_flush_request); 441 442 /* Support for plugging. 443 * This mirrors the plugging support in request_queue, but does not 444 * require having a whole queue 445 */ 446 static void plugger_work(struct work_struct *work) 447 { 448 struct plug_handle *plug = 449 container_of(work, struct plug_handle, unplug_work); 450 plug->unplug_fn(plug); 451 } 452 static void plugger_timeout(unsigned long data) 453 { 454 struct plug_handle *plug = (void *)data; 455 kblockd_schedule_work(NULL, &plug->unplug_work); 456 } 457 void plugger_init(struct plug_handle *plug, 458 void (*unplug_fn)(struct plug_handle *)) 459 { 460 plug->unplug_flag = 0; 461 plug->unplug_fn = unplug_fn; 462 init_timer(&plug->unplug_timer); 463 plug->unplug_timer.function = plugger_timeout; 464 plug->unplug_timer.data = (unsigned long)plug; 465 INIT_WORK(&plug->unplug_work, plugger_work); 466 } 467 EXPORT_SYMBOL_GPL(plugger_init); 468 469 void plugger_set_plug(struct plug_handle *plug) 470 { 471 if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag)) 472 mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1); 473 } 474 EXPORT_SYMBOL_GPL(plugger_set_plug); 475 476 int plugger_remove_plug(struct plug_handle *plug) 477 { 478 if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) { 479 del_timer(&plug->unplug_timer); 480 return 1; 481 } else 482 return 0; 483 } 484 EXPORT_SYMBOL_GPL(plugger_remove_plug); 485 486 487 static inline mddev_t *mddev_get(mddev_t *mddev) 488 { 489 atomic_inc(&mddev->active); 490 return mddev; 491 } 492 493 static void mddev_delayed_delete(struct work_struct *ws); 494 495 static void mddev_put(mddev_t *mddev) 496 { 497 struct bio_set *bs = NULL; 498 499 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 500 return; 501 if (!mddev->raid_disks && list_empty(&mddev->disks) && 502 mddev->ctime == 0 && !mddev->hold_active) { 503 /* Array is not configured at all, and not held active, 504 * so destroy it */ 505 list_del(&mddev->all_mddevs); 506 bs = mddev->bio_set; 507 mddev->bio_set = NULL; 508 if (mddev->gendisk) { 509 /* We did a probe so need to clean up. Call 510 * queue_work inside the spinlock so that 511 * flush_workqueue() after mddev_find will 512 * succeed in waiting for the work to be done. 513 */ 514 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 515 queue_work(md_misc_wq, &mddev->del_work); 516 } else 517 kfree(mddev); 518 } 519 spin_unlock(&all_mddevs_lock); 520 if (bs) 521 bioset_free(bs); 522 } 523 524 void mddev_init(mddev_t *mddev) 525 { 526 mutex_init(&mddev->open_mutex); 527 mutex_init(&mddev->reconfig_mutex); 528 mutex_init(&mddev->bitmap_info.mutex); 529 INIT_LIST_HEAD(&mddev->disks); 530 INIT_LIST_HEAD(&mddev->all_mddevs); 531 init_timer(&mddev->safemode_timer); 532 atomic_set(&mddev->active, 1); 533 atomic_set(&mddev->openers, 0); 534 atomic_set(&mddev->active_io, 0); 535 spin_lock_init(&mddev->write_lock); 536 atomic_set(&mddev->flush_pending, 0); 537 init_waitqueue_head(&mddev->sb_wait); 538 init_waitqueue_head(&mddev->recovery_wait); 539 mddev->reshape_position = MaxSector; 540 mddev->resync_min = 0; 541 mddev->resync_max = MaxSector; 542 mddev->level = LEVEL_NONE; 543 } 544 EXPORT_SYMBOL_GPL(mddev_init); 545 546 static mddev_t * mddev_find(dev_t unit) 547 { 548 mddev_t *mddev, *new = NULL; 549 550 retry: 551 spin_lock(&all_mddevs_lock); 552 553 if (unit) { 554 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 555 if (mddev->unit == unit) { 556 mddev_get(mddev); 557 spin_unlock(&all_mddevs_lock); 558 kfree(new); 559 return mddev; 560 } 561 562 if (new) { 563 list_add(&new->all_mddevs, &all_mddevs); 564 spin_unlock(&all_mddevs_lock); 565 new->hold_active = UNTIL_IOCTL; 566 return new; 567 } 568 } else if (new) { 569 /* find an unused unit number */ 570 static int next_minor = 512; 571 int start = next_minor; 572 int is_free = 0; 573 int dev = 0; 574 while (!is_free) { 575 dev = MKDEV(MD_MAJOR, next_minor); 576 next_minor++; 577 if (next_minor > MINORMASK) 578 next_minor = 0; 579 if (next_minor == start) { 580 /* Oh dear, all in use. */ 581 spin_unlock(&all_mddevs_lock); 582 kfree(new); 583 return NULL; 584 } 585 586 is_free = 1; 587 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 588 if (mddev->unit == dev) { 589 is_free = 0; 590 break; 591 } 592 } 593 new->unit = dev; 594 new->md_minor = MINOR(dev); 595 new->hold_active = UNTIL_STOP; 596 list_add(&new->all_mddevs, &all_mddevs); 597 spin_unlock(&all_mddevs_lock); 598 return new; 599 } 600 spin_unlock(&all_mddevs_lock); 601 602 new = kzalloc(sizeof(*new), GFP_KERNEL); 603 if (!new) 604 return NULL; 605 606 new->unit = unit; 607 if (MAJOR(unit) == MD_MAJOR) 608 new->md_minor = MINOR(unit); 609 else 610 new->md_minor = MINOR(unit) >> MdpMinorShift; 611 612 mddev_init(new); 613 614 goto retry; 615 } 616 617 static inline int mddev_lock(mddev_t * mddev) 618 { 619 return mutex_lock_interruptible(&mddev->reconfig_mutex); 620 } 621 622 static inline int mddev_is_locked(mddev_t *mddev) 623 { 624 return mutex_is_locked(&mddev->reconfig_mutex); 625 } 626 627 static inline int mddev_trylock(mddev_t * mddev) 628 { 629 return mutex_trylock(&mddev->reconfig_mutex); 630 } 631 632 static struct attribute_group md_redundancy_group; 633 634 static void mddev_unlock(mddev_t * mddev) 635 { 636 if (mddev->to_remove) { 637 /* These cannot be removed under reconfig_mutex as 638 * an access to the files will try to take reconfig_mutex 639 * while holding the file unremovable, which leads to 640 * a deadlock. 641 * So hold set sysfs_active while the remove in happeing, 642 * and anything else which might set ->to_remove or my 643 * otherwise change the sysfs namespace will fail with 644 * -EBUSY if sysfs_active is still set. 645 * We set sysfs_active under reconfig_mutex and elsewhere 646 * test it under the same mutex to ensure its correct value 647 * is seen. 648 */ 649 struct attribute_group *to_remove = mddev->to_remove; 650 mddev->to_remove = NULL; 651 mddev->sysfs_active = 1; 652 mutex_unlock(&mddev->reconfig_mutex); 653 654 if (mddev->kobj.sd) { 655 if (to_remove != &md_redundancy_group) 656 sysfs_remove_group(&mddev->kobj, to_remove); 657 if (mddev->pers == NULL || 658 mddev->pers->sync_request == NULL) { 659 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 660 if (mddev->sysfs_action) 661 sysfs_put(mddev->sysfs_action); 662 mddev->sysfs_action = NULL; 663 } 664 } 665 mddev->sysfs_active = 0; 666 } else 667 mutex_unlock(&mddev->reconfig_mutex); 668 669 md_wakeup_thread(mddev->thread); 670 } 671 672 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 673 { 674 mdk_rdev_t *rdev; 675 676 list_for_each_entry(rdev, &mddev->disks, same_set) 677 if (rdev->desc_nr == nr) 678 return rdev; 679 680 return NULL; 681 } 682 683 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) 684 { 685 mdk_rdev_t *rdev; 686 687 list_for_each_entry(rdev, &mddev->disks, same_set) 688 if (rdev->bdev->bd_dev == dev) 689 return rdev; 690 691 return NULL; 692 } 693 694 static struct mdk_personality *find_pers(int level, char *clevel) 695 { 696 struct mdk_personality *pers; 697 list_for_each_entry(pers, &pers_list, list) { 698 if (level != LEVEL_NONE && pers->level == level) 699 return pers; 700 if (strcmp(pers->name, clevel)==0) 701 return pers; 702 } 703 return NULL; 704 } 705 706 /* return the offset of the super block in 512byte sectors */ 707 static inline sector_t calc_dev_sboffset(struct block_device *bdev) 708 { 709 sector_t num_sectors = bdev->bd_inode->i_size / 512; 710 return MD_NEW_SIZE_SECTORS(num_sectors); 711 } 712 713 static int alloc_disk_sb(mdk_rdev_t * rdev) 714 { 715 if (rdev->sb_page) 716 MD_BUG(); 717 718 rdev->sb_page = alloc_page(GFP_KERNEL); 719 if (!rdev->sb_page) { 720 printk(KERN_ALERT "md: out of memory.\n"); 721 return -ENOMEM; 722 } 723 724 return 0; 725 } 726 727 static void free_disk_sb(mdk_rdev_t * rdev) 728 { 729 if (rdev->sb_page) { 730 put_page(rdev->sb_page); 731 rdev->sb_loaded = 0; 732 rdev->sb_page = NULL; 733 rdev->sb_start = 0; 734 rdev->sectors = 0; 735 } 736 } 737 738 739 static void super_written(struct bio *bio, int error) 740 { 741 mdk_rdev_t *rdev = bio->bi_private; 742 mddev_t *mddev = rdev->mddev; 743 744 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 745 printk("md: super_written gets error=%d, uptodate=%d\n", 746 error, test_bit(BIO_UPTODATE, &bio->bi_flags)); 747 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags)); 748 md_error(mddev, rdev); 749 } 750 751 if (atomic_dec_and_test(&mddev->pending_writes)) 752 wake_up(&mddev->sb_wait); 753 bio_put(bio); 754 } 755 756 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 757 sector_t sector, int size, struct page *page) 758 { 759 /* write first size bytes of page to sector of rdev 760 * Increment mddev->pending_writes before returning 761 * and decrement it on completion, waking up sb_wait 762 * if zero is reached. 763 * If an error occurred, call md_error 764 */ 765 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 766 767 bio->bi_bdev = rdev->bdev; 768 bio->bi_sector = sector; 769 bio_add_page(bio, page, size, 0); 770 bio->bi_private = rdev; 771 bio->bi_end_io = super_written; 772 773 atomic_inc(&mddev->pending_writes); 774 submit_bio(REQ_WRITE | REQ_SYNC | REQ_UNPLUG | REQ_FLUSH | REQ_FUA, 775 bio); 776 } 777 778 void md_super_wait(mddev_t *mddev) 779 { 780 /* wait for all superblock writes that were scheduled to complete */ 781 DEFINE_WAIT(wq); 782 for(;;) { 783 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); 784 if (atomic_read(&mddev->pending_writes)==0) 785 break; 786 schedule(); 787 } 788 finish_wait(&mddev->sb_wait, &wq); 789 } 790 791 static void bi_complete(struct bio *bio, int error) 792 { 793 complete((struct completion*)bio->bi_private); 794 } 795 796 int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, 797 struct page *page, int rw) 798 { 799 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); 800 struct completion event; 801 int ret; 802 803 rw |= REQ_SYNC | REQ_UNPLUG; 804 805 bio->bi_bdev = rdev->bdev; 806 bio->bi_sector = sector; 807 bio_add_page(bio, page, size, 0); 808 init_completion(&event); 809 bio->bi_private = &event; 810 bio->bi_end_io = bi_complete; 811 submit_bio(rw, bio); 812 wait_for_completion(&event); 813 814 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 815 bio_put(bio); 816 return ret; 817 } 818 EXPORT_SYMBOL_GPL(sync_page_io); 819 820 static int read_disk_sb(mdk_rdev_t * rdev, int size) 821 { 822 char b[BDEVNAME_SIZE]; 823 if (!rdev->sb_page) { 824 MD_BUG(); 825 return -EINVAL; 826 } 827 if (rdev->sb_loaded) 828 return 0; 829 830 831 if (!sync_page_io(rdev, rdev->sb_start, size, rdev->sb_page, READ)) 832 goto fail; 833 rdev->sb_loaded = 1; 834 return 0; 835 836 fail: 837 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 838 bdevname(rdev->bdev,b)); 839 return -EINVAL; 840 } 841 842 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 843 { 844 return sb1->set_uuid0 == sb2->set_uuid0 && 845 sb1->set_uuid1 == sb2->set_uuid1 && 846 sb1->set_uuid2 == sb2->set_uuid2 && 847 sb1->set_uuid3 == sb2->set_uuid3; 848 } 849 850 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 851 { 852 int ret; 853 mdp_super_t *tmp1, *tmp2; 854 855 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 856 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 857 858 if (!tmp1 || !tmp2) { 859 ret = 0; 860 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n"); 861 goto abort; 862 } 863 864 *tmp1 = *sb1; 865 *tmp2 = *sb2; 866 867 /* 868 * nr_disks is not constant 869 */ 870 tmp1->nr_disks = 0; 871 tmp2->nr_disks = 0; 872 873 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 874 abort: 875 kfree(tmp1); 876 kfree(tmp2); 877 return ret; 878 } 879 880 881 static u32 md_csum_fold(u32 csum) 882 { 883 csum = (csum & 0xffff) + (csum >> 16); 884 return (csum & 0xffff) + (csum >> 16); 885 } 886 887 static unsigned int calc_sb_csum(mdp_super_t * sb) 888 { 889 u64 newcsum = 0; 890 u32 *sb32 = (u32*)sb; 891 int i; 892 unsigned int disk_csum, csum; 893 894 disk_csum = sb->sb_csum; 895 sb->sb_csum = 0; 896 897 for (i = 0; i < MD_SB_BYTES/4 ; i++) 898 newcsum += sb32[i]; 899 csum = (newcsum & 0xffffffff) + (newcsum>>32); 900 901 902 #ifdef CONFIG_ALPHA 903 /* This used to use csum_partial, which was wrong for several 904 * reasons including that different results are returned on 905 * different architectures. It isn't critical that we get exactly 906 * the same return value as before (we always csum_fold before 907 * testing, and that removes any differences). However as we 908 * know that csum_partial always returned a 16bit value on 909 * alphas, do a fold to maximise conformity to previous behaviour. 910 */ 911 sb->sb_csum = md_csum_fold(disk_csum); 912 #else 913 sb->sb_csum = disk_csum; 914 #endif 915 return csum; 916 } 917 918 919 /* 920 * Handle superblock details. 921 * We want to be able to handle multiple superblock formats 922 * so we have a common interface to them all, and an array of 923 * different handlers. 924 * We rely on user-space to write the initial superblock, and support 925 * reading and updating of superblocks. 926 * Interface methods are: 927 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version) 928 * loads and validates a superblock on dev. 929 * if refdev != NULL, compare superblocks on both devices 930 * Return: 931 * 0 - dev has a superblock that is compatible with refdev 932 * 1 - dev has a superblock that is compatible and newer than refdev 933 * so dev should be used as the refdev in future 934 * -EINVAL superblock incompatible or invalid 935 * -othererror e.g. -EIO 936 * 937 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev) 938 * Verify that dev is acceptable into mddev. 939 * The first time, mddev->raid_disks will be 0, and data from 940 * dev should be merged in. Subsequent calls check that dev 941 * is new enough. Return 0 or -EINVAL 942 * 943 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev) 944 * Update the superblock for rdev with data in mddev 945 * This does not write to disc. 946 * 947 */ 948 949 struct super_type { 950 char *name; 951 struct module *owner; 952 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, 953 int minor_version); 954 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); 955 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); 956 unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev, 957 sector_t num_sectors); 958 }; 959 960 /* 961 * Check that the given mddev has no bitmap. 962 * 963 * This function is called from the run method of all personalities that do not 964 * support bitmaps. It prints an error message and returns non-zero if mddev 965 * has a bitmap. Otherwise, it returns 0. 966 * 967 */ 968 int md_check_no_bitmap(mddev_t *mddev) 969 { 970 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 971 return 0; 972 printk(KERN_ERR "%s: bitmaps are not supported for %s\n", 973 mdname(mddev), mddev->pers->name); 974 return 1; 975 } 976 EXPORT_SYMBOL(md_check_no_bitmap); 977 978 /* 979 * load_super for 0.90.0 980 */ 981 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 982 { 983 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 984 mdp_super_t *sb; 985 int ret; 986 987 /* 988 * Calculate the position of the superblock (512byte sectors), 989 * it's at the end of the disk. 990 * 991 * It also happens to be a multiple of 4Kb. 992 */ 993 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 994 995 ret = read_disk_sb(rdev, MD_SB_BYTES); 996 if (ret) return ret; 997 998 ret = -EINVAL; 999 1000 bdevname(rdev->bdev, b); 1001 sb = (mdp_super_t*)page_address(rdev->sb_page); 1002 1003 if (sb->md_magic != MD_SB_MAGIC) { 1004 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 1005 b); 1006 goto abort; 1007 } 1008 1009 if (sb->major_version != 0 || 1010 sb->minor_version < 90 || 1011 sb->minor_version > 91) { 1012 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 1013 sb->major_version, sb->minor_version, 1014 b); 1015 goto abort; 1016 } 1017 1018 if (sb->raid_disks <= 0) 1019 goto abort; 1020 1021 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 1022 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 1023 b); 1024 goto abort; 1025 } 1026 1027 rdev->preferred_minor = sb->md_minor; 1028 rdev->data_offset = 0; 1029 rdev->sb_size = MD_SB_BYTES; 1030 1031 if (sb->level == LEVEL_MULTIPATH) 1032 rdev->desc_nr = -1; 1033 else 1034 rdev->desc_nr = sb->this_disk.number; 1035 1036 if (!refdev) { 1037 ret = 1; 1038 } else { 1039 __u64 ev1, ev2; 1040 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page); 1041 if (!uuid_equal(refsb, sb)) { 1042 printk(KERN_WARNING "md: %s has different UUID to %s\n", 1043 b, bdevname(refdev->bdev,b2)); 1044 goto abort; 1045 } 1046 if (!sb_equal(refsb, sb)) { 1047 printk(KERN_WARNING "md: %s has same UUID" 1048 " but different superblock to %s\n", 1049 b, bdevname(refdev->bdev, b2)); 1050 goto abort; 1051 } 1052 ev1 = md_event(sb); 1053 ev2 = md_event(refsb); 1054 if (ev1 > ev2) 1055 ret = 1; 1056 else 1057 ret = 0; 1058 } 1059 rdev->sectors = rdev->sb_start; 1060 1061 if (rdev->sectors < sb->size * 2 && sb->level > 1) 1062 /* "this cannot possibly happen" ... */ 1063 ret = -EINVAL; 1064 1065 abort: 1066 return ret; 1067 } 1068 1069 /* 1070 * validate_super for 0.90.0 1071 */ 1072 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1073 { 1074 mdp_disk_t *desc; 1075 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); 1076 __u64 ev1 = md_event(sb); 1077 1078 rdev->raid_disk = -1; 1079 clear_bit(Faulty, &rdev->flags); 1080 clear_bit(In_sync, &rdev->flags); 1081 clear_bit(WriteMostly, &rdev->flags); 1082 1083 if (mddev->raid_disks == 0) { 1084 mddev->major_version = 0; 1085 mddev->minor_version = sb->minor_version; 1086 mddev->patch_version = sb->patch_version; 1087 mddev->external = 0; 1088 mddev->chunk_sectors = sb->chunk_size >> 9; 1089 mddev->ctime = sb->ctime; 1090 mddev->utime = sb->utime; 1091 mddev->level = sb->level; 1092 mddev->clevel[0] = 0; 1093 mddev->layout = sb->layout; 1094 mddev->raid_disks = sb->raid_disks; 1095 mddev->dev_sectors = sb->size * 2; 1096 mddev->events = ev1; 1097 mddev->bitmap_info.offset = 0; 1098 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1099 1100 if (mddev->minor_version >= 91) { 1101 mddev->reshape_position = sb->reshape_position; 1102 mddev->delta_disks = sb->delta_disks; 1103 mddev->new_level = sb->new_level; 1104 mddev->new_layout = sb->new_layout; 1105 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1106 } else { 1107 mddev->reshape_position = MaxSector; 1108 mddev->delta_disks = 0; 1109 mddev->new_level = mddev->level; 1110 mddev->new_layout = mddev->layout; 1111 mddev->new_chunk_sectors = mddev->chunk_sectors; 1112 } 1113 1114 if (sb->state & (1<<MD_SB_CLEAN)) 1115 mddev->recovery_cp = MaxSector; 1116 else { 1117 if (sb->events_hi == sb->cp_events_hi && 1118 sb->events_lo == sb->cp_events_lo) { 1119 mddev->recovery_cp = sb->recovery_cp; 1120 } else 1121 mddev->recovery_cp = 0; 1122 } 1123 1124 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1125 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1126 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1127 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1128 1129 mddev->max_disks = MD_SB_DISKS; 1130 1131 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1132 mddev->bitmap_info.file == NULL) 1133 mddev->bitmap_info.offset = 1134 mddev->bitmap_info.default_offset; 1135 1136 } else if (mddev->pers == NULL) { 1137 /* Insist on good event counter while assembling, except 1138 * for spares (which don't need an event count) */ 1139 ++ev1; 1140 if (sb->disks[rdev->desc_nr].state & ( 1141 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1142 if (ev1 < mddev->events) 1143 return -EINVAL; 1144 } else if (mddev->bitmap) { 1145 /* if adding to array with a bitmap, then we can accept an 1146 * older device ... but not too old. 1147 */ 1148 if (ev1 < mddev->bitmap->events_cleared) 1149 return 0; 1150 } else { 1151 if (ev1 < mddev->events) 1152 /* just a hot-add of a new device, leave raid_disk at -1 */ 1153 return 0; 1154 } 1155 1156 if (mddev->level != LEVEL_MULTIPATH) { 1157 desc = sb->disks + rdev->desc_nr; 1158 1159 if (desc->state & (1<<MD_DISK_FAULTY)) 1160 set_bit(Faulty, &rdev->flags); 1161 else if (desc->state & (1<<MD_DISK_SYNC) /* && 1162 desc->raid_disk < mddev->raid_disks */) { 1163 set_bit(In_sync, &rdev->flags); 1164 rdev->raid_disk = desc->raid_disk; 1165 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1166 /* active but not in sync implies recovery up to 1167 * reshape position. We don't know exactly where 1168 * that is, so set to zero for now */ 1169 if (mddev->minor_version >= 91) { 1170 rdev->recovery_offset = 0; 1171 rdev->raid_disk = desc->raid_disk; 1172 } 1173 } 1174 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1175 set_bit(WriteMostly, &rdev->flags); 1176 } else /* MULTIPATH are always insync */ 1177 set_bit(In_sync, &rdev->flags); 1178 return 0; 1179 } 1180 1181 /* 1182 * sync_super for 0.90.0 1183 */ 1184 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1185 { 1186 mdp_super_t *sb; 1187 mdk_rdev_t *rdev2; 1188 int next_spare = mddev->raid_disks; 1189 1190 1191 /* make rdev->sb match mddev data.. 1192 * 1193 * 1/ zero out disks 1194 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1195 * 3/ any empty disks < next_spare become removed 1196 * 1197 * disks[0] gets initialised to REMOVED because 1198 * we cannot be sure from other fields if it has 1199 * been initialised or not. 1200 */ 1201 int i; 1202 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1203 1204 rdev->sb_size = MD_SB_BYTES; 1205 1206 sb = (mdp_super_t*)page_address(rdev->sb_page); 1207 1208 memset(sb, 0, sizeof(*sb)); 1209 1210 sb->md_magic = MD_SB_MAGIC; 1211 sb->major_version = mddev->major_version; 1212 sb->patch_version = mddev->patch_version; 1213 sb->gvalid_words = 0; /* ignored */ 1214 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1215 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1216 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1217 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1218 1219 sb->ctime = mddev->ctime; 1220 sb->level = mddev->level; 1221 sb->size = mddev->dev_sectors / 2; 1222 sb->raid_disks = mddev->raid_disks; 1223 sb->md_minor = mddev->md_minor; 1224 sb->not_persistent = 0; 1225 sb->utime = mddev->utime; 1226 sb->state = 0; 1227 sb->events_hi = (mddev->events>>32); 1228 sb->events_lo = (u32)mddev->events; 1229 1230 if (mddev->reshape_position == MaxSector) 1231 sb->minor_version = 90; 1232 else { 1233 sb->minor_version = 91; 1234 sb->reshape_position = mddev->reshape_position; 1235 sb->new_level = mddev->new_level; 1236 sb->delta_disks = mddev->delta_disks; 1237 sb->new_layout = mddev->new_layout; 1238 sb->new_chunk = mddev->new_chunk_sectors << 9; 1239 } 1240 mddev->minor_version = sb->minor_version; 1241 if (mddev->in_sync) 1242 { 1243 sb->recovery_cp = mddev->recovery_cp; 1244 sb->cp_events_hi = (mddev->events>>32); 1245 sb->cp_events_lo = (u32)mddev->events; 1246 if (mddev->recovery_cp == MaxSector) 1247 sb->state = (1<< MD_SB_CLEAN); 1248 } else 1249 sb->recovery_cp = 0; 1250 1251 sb->layout = mddev->layout; 1252 sb->chunk_size = mddev->chunk_sectors << 9; 1253 1254 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1255 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1256 1257 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1258 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1259 mdp_disk_t *d; 1260 int desc_nr; 1261 int is_active = test_bit(In_sync, &rdev2->flags); 1262 1263 if (rdev2->raid_disk >= 0 && 1264 sb->minor_version >= 91) 1265 /* we have nowhere to store the recovery_offset, 1266 * but if it is not below the reshape_position, 1267 * we can piggy-back on that. 1268 */ 1269 is_active = 1; 1270 if (rdev2->raid_disk < 0 || 1271 test_bit(Faulty, &rdev2->flags)) 1272 is_active = 0; 1273 if (is_active) 1274 desc_nr = rdev2->raid_disk; 1275 else 1276 desc_nr = next_spare++; 1277 rdev2->desc_nr = desc_nr; 1278 d = &sb->disks[rdev2->desc_nr]; 1279 nr_disks++; 1280 d->number = rdev2->desc_nr; 1281 d->major = MAJOR(rdev2->bdev->bd_dev); 1282 d->minor = MINOR(rdev2->bdev->bd_dev); 1283 if (is_active) 1284 d->raid_disk = rdev2->raid_disk; 1285 else 1286 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1287 if (test_bit(Faulty, &rdev2->flags)) 1288 d->state = (1<<MD_DISK_FAULTY); 1289 else if (is_active) { 1290 d->state = (1<<MD_DISK_ACTIVE); 1291 if (test_bit(In_sync, &rdev2->flags)) 1292 d->state |= (1<<MD_DISK_SYNC); 1293 active++; 1294 working++; 1295 } else { 1296 d->state = 0; 1297 spare++; 1298 working++; 1299 } 1300 if (test_bit(WriteMostly, &rdev2->flags)) 1301 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1302 } 1303 /* now set the "removed" and "faulty" bits on any missing devices */ 1304 for (i=0 ; i < mddev->raid_disks ; i++) { 1305 mdp_disk_t *d = &sb->disks[i]; 1306 if (d->state == 0 && d->number == 0) { 1307 d->number = i; 1308 d->raid_disk = i; 1309 d->state = (1<<MD_DISK_REMOVED); 1310 d->state |= (1<<MD_DISK_FAULTY); 1311 failed++; 1312 } 1313 } 1314 sb->nr_disks = nr_disks; 1315 sb->active_disks = active; 1316 sb->working_disks = working; 1317 sb->failed_disks = failed; 1318 sb->spare_disks = spare; 1319 1320 sb->this_disk = sb->disks[rdev->desc_nr]; 1321 sb->sb_csum = calc_sb_csum(sb); 1322 } 1323 1324 /* 1325 * rdev_size_change for 0.90.0 1326 */ 1327 static unsigned long long 1328 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1329 { 1330 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1331 return 0; /* component must fit device */ 1332 if (rdev->mddev->bitmap_info.offset) 1333 return 0; /* can't move bitmap */ 1334 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 1335 if (!num_sectors || num_sectors > rdev->sb_start) 1336 num_sectors = rdev->sb_start; 1337 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1338 rdev->sb_page); 1339 md_super_wait(rdev->mddev); 1340 return num_sectors / 2; /* kB for sysfs */ 1341 } 1342 1343 1344 /* 1345 * version 1 superblock 1346 */ 1347 1348 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb) 1349 { 1350 __le32 disk_csum; 1351 u32 csum; 1352 unsigned long long newcsum; 1353 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1354 __le32 *isuper = (__le32*)sb; 1355 int i; 1356 1357 disk_csum = sb->sb_csum; 1358 sb->sb_csum = 0; 1359 newcsum = 0; 1360 for (i=0; size>=4; size -= 4 ) 1361 newcsum += le32_to_cpu(*isuper++); 1362 1363 if (size == 2) 1364 newcsum += le16_to_cpu(*(__le16*) isuper); 1365 1366 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1367 sb->sb_csum = disk_csum; 1368 return cpu_to_le32(csum); 1369 } 1370 1371 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 1372 { 1373 struct mdp_superblock_1 *sb; 1374 int ret; 1375 sector_t sb_start; 1376 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1377 int bmask; 1378 1379 /* 1380 * Calculate the position of the superblock in 512byte sectors. 1381 * It is always aligned to a 4K boundary and 1382 * depeding on minor_version, it can be: 1383 * 0: At least 8K, but less than 12K, from end of device 1384 * 1: At start of device 1385 * 2: 4K from start of device. 1386 */ 1387 switch(minor_version) { 1388 case 0: 1389 sb_start = rdev->bdev->bd_inode->i_size >> 9; 1390 sb_start -= 8*2; 1391 sb_start &= ~(sector_t)(4*2-1); 1392 break; 1393 case 1: 1394 sb_start = 0; 1395 break; 1396 case 2: 1397 sb_start = 8; 1398 break; 1399 default: 1400 return -EINVAL; 1401 } 1402 rdev->sb_start = sb_start; 1403 1404 /* superblock is rarely larger than 1K, but it can be larger, 1405 * and it is safe to read 4k, so we do that 1406 */ 1407 ret = read_disk_sb(rdev, 4096); 1408 if (ret) return ret; 1409 1410 1411 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1412 1413 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1414 sb->major_version != cpu_to_le32(1) || 1415 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1416 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1417 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1418 return -EINVAL; 1419 1420 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1421 printk("md: invalid superblock checksum on %s\n", 1422 bdevname(rdev->bdev,b)); 1423 return -EINVAL; 1424 } 1425 if (le64_to_cpu(sb->data_size) < 10) { 1426 printk("md: data_size too small on %s\n", 1427 bdevname(rdev->bdev,b)); 1428 return -EINVAL; 1429 } 1430 1431 rdev->preferred_minor = 0xffff; 1432 rdev->data_offset = le64_to_cpu(sb->data_offset); 1433 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1434 1435 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1436 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1437 if (rdev->sb_size & bmask) 1438 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1439 1440 if (minor_version 1441 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1442 return -EINVAL; 1443 1444 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1445 rdev->desc_nr = -1; 1446 else 1447 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1448 1449 if (!refdev) { 1450 ret = 1; 1451 } else { 1452 __u64 ev1, ev2; 1453 struct mdp_superblock_1 *refsb = 1454 (struct mdp_superblock_1*)page_address(refdev->sb_page); 1455 1456 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1457 sb->level != refsb->level || 1458 sb->layout != refsb->layout || 1459 sb->chunksize != refsb->chunksize) { 1460 printk(KERN_WARNING "md: %s has strangely different" 1461 " superblock to %s\n", 1462 bdevname(rdev->bdev,b), 1463 bdevname(refdev->bdev,b2)); 1464 return -EINVAL; 1465 } 1466 ev1 = le64_to_cpu(sb->events); 1467 ev2 = le64_to_cpu(refsb->events); 1468 1469 if (ev1 > ev2) 1470 ret = 1; 1471 else 1472 ret = 0; 1473 } 1474 if (minor_version) 1475 rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) - 1476 le64_to_cpu(sb->data_offset); 1477 else 1478 rdev->sectors = rdev->sb_start; 1479 if (rdev->sectors < le64_to_cpu(sb->data_size)) 1480 return -EINVAL; 1481 rdev->sectors = le64_to_cpu(sb->data_size); 1482 if (le64_to_cpu(sb->size) > rdev->sectors) 1483 return -EINVAL; 1484 return ret; 1485 } 1486 1487 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1488 { 1489 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1490 __u64 ev1 = le64_to_cpu(sb->events); 1491 1492 rdev->raid_disk = -1; 1493 clear_bit(Faulty, &rdev->flags); 1494 clear_bit(In_sync, &rdev->flags); 1495 clear_bit(WriteMostly, &rdev->flags); 1496 1497 if (mddev->raid_disks == 0) { 1498 mddev->major_version = 1; 1499 mddev->patch_version = 0; 1500 mddev->external = 0; 1501 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1502 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1503 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1504 mddev->level = le32_to_cpu(sb->level); 1505 mddev->clevel[0] = 0; 1506 mddev->layout = le32_to_cpu(sb->layout); 1507 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1508 mddev->dev_sectors = le64_to_cpu(sb->size); 1509 mddev->events = ev1; 1510 mddev->bitmap_info.offset = 0; 1511 mddev->bitmap_info.default_offset = 1024 >> 9; 1512 1513 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1514 memcpy(mddev->uuid, sb->set_uuid, 16); 1515 1516 mddev->max_disks = (4096-256)/2; 1517 1518 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1519 mddev->bitmap_info.file == NULL ) 1520 mddev->bitmap_info.offset = 1521 (__s32)le32_to_cpu(sb->bitmap_offset); 1522 1523 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1524 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1525 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1526 mddev->new_level = le32_to_cpu(sb->new_level); 1527 mddev->new_layout = le32_to_cpu(sb->new_layout); 1528 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1529 } else { 1530 mddev->reshape_position = MaxSector; 1531 mddev->delta_disks = 0; 1532 mddev->new_level = mddev->level; 1533 mddev->new_layout = mddev->layout; 1534 mddev->new_chunk_sectors = mddev->chunk_sectors; 1535 } 1536 1537 } else if (mddev->pers == NULL) { 1538 /* Insist of good event counter while assembling, except for 1539 * spares (which don't need an event count) */ 1540 ++ev1; 1541 if (rdev->desc_nr >= 0 && 1542 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1543 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe) 1544 if (ev1 < mddev->events) 1545 return -EINVAL; 1546 } else if (mddev->bitmap) { 1547 /* If adding to array with a bitmap, then we can accept an 1548 * older device, but not too old. 1549 */ 1550 if (ev1 < mddev->bitmap->events_cleared) 1551 return 0; 1552 } else { 1553 if (ev1 < mddev->events) 1554 /* just a hot-add of a new device, leave raid_disk at -1 */ 1555 return 0; 1556 } 1557 if (mddev->level != LEVEL_MULTIPATH) { 1558 int role; 1559 if (rdev->desc_nr < 0 || 1560 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1561 role = 0xffff; 1562 rdev->desc_nr = -1; 1563 } else 1564 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1565 switch(role) { 1566 case 0xffff: /* spare */ 1567 break; 1568 case 0xfffe: /* faulty */ 1569 set_bit(Faulty, &rdev->flags); 1570 break; 1571 default: 1572 if ((le32_to_cpu(sb->feature_map) & 1573 MD_FEATURE_RECOVERY_OFFSET)) 1574 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1575 else 1576 set_bit(In_sync, &rdev->flags); 1577 rdev->raid_disk = role; 1578 break; 1579 } 1580 if (sb->devflags & WriteMostly1) 1581 set_bit(WriteMostly, &rdev->flags); 1582 } else /* MULTIPATH are always insync */ 1583 set_bit(In_sync, &rdev->flags); 1584 1585 return 0; 1586 } 1587 1588 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1589 { 1590 struct mdp_superblock_1 *sb; 1591 mdk_rdev_t *rdev2; 1592 int max_dev, i; 1593 /* make rdev->sb match mddev and rdev data. */ 1594 1595 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1596 1597 sb->feature_map = 0; 1598 sb->pad0 = 0; 1599 sb->recovery_offset = cpu_to_le64(0); 1600 memset(sb->pad1, 0, sizeof(sb->pad1)); 1601 memset(sb->pad2, 0, sizeof(sb->pad2)); 1602 memset(sb->pad3, 0, sizeof(sb->pad3)); 1603 1604 sb->utime = cpu_to_le64((__u64)mddev->utime); 1605 sb->events = cpu_to_le64(mddev->events); 1606 if (mddev->in_sync) 1607 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1608 else 1609 sb->resync_offset = cpu_to_le64(0); 1610 1611 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1612 1613 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1614 sb->size = cpu_to_le64(mddev->dev_sectors); 1615 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 1616 sb->level = cpu_to_le32(mddev->level); 1617 sb->layout = cpu_to_le32(mddev->layout); 1618 1619 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1620 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 1621 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1622 } 1623 1624 if (rdev->raid_disk >= 0 && 1625 !test_bit(In_sync, &rdev->flags)) { 1626 sb->feature_map |= 1627 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1628 sb->recovery_offset = 1629 cpu_to_le64(rdev->recovery_offset); 1630 } 1631 1632 if (mddev->reshape_position != MaxSector) { 1633 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1634 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1635 sb->new_layout = cpu_to_le32(mddev->new_layout); 1636 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1637 sb->new_level = cpu_to_le32(mddev->new_level); 1638 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 1639 } 1640 1641 max_dev = 0; 1642 list_for_each_entry(rdev2, &mddev->disks, same_set) 1643 if (rdev2->desc_nr+1 > max_dev) 1644 max_dev = rdev2->desc_nr+1; 1645 1646 if (max_dev > le32_to_cpu(sb->max_dev)) { 1647 int bmask; 1648 sb->max_dev = cpu_to_le32(max_dev); 1649 rdev->sb_size = max_dev * 2 + 256; 1650 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1651 if (rdev->sb_size & bmask) 1652 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1653 } else 1654 max_dev = le32_to_cpu(sb->max_dev); 1655 1656 for (i=0; i<max_dev;i++) 1657 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1658 1659 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1660 i = rdev2->desc_nr; 1661 if (test_bit(Faulty, &rdev2->flags)) 1662 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1663 else if (test_bit(In_sync, &rdev2->flags)) 1664 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1665 else if (rdev2->raid_disk >= 0) 1666 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1667 else 1668 sb->dev_roles[i] = cpu_to_le16(0xffff); 1669 } 1670 1671 sb->sb_csum = calc_sb_1_csum(sb); 1672 } 1673 1674 static unsigned long long 1675 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1676 { 1677 struct mdp_superblock_1 *sb; 1678 sector_t max_sectors; 1679 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1680 return 0; /* component must fit device */ 1681 if (rdev->sb_start < rdev->data_offset) { 1682 /* minor versions 1 and 2; superblock before data */ 1683 max_sectors = rdev->bdev->bd_inode->i_size >> 9; 1684 max_sectors -= rdev->data_offset; 1685 if (!num_sectors || num_sectors > max_sectors) 1686 num_sectors = max_sectors; 1687 } else if (rdev->mddev->bitmap_info.offset) { 1688 /* minor version 0 with bitmap we can't move */ 1689 return 0; 1690 } else { 1691 /* minor version 0; superblock after data */ 1692 sector_t sb_start; 1693 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2; 1694 sb_start &= ~(sector_t)(4*2 - 1); 1695 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 1696 if (!num_sectors || num_sectors > max_sectors) 1697 num_sectors = max_sectors; 1698 rdev->sb_start = sb_start; 1699 } 1700 sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page); 1701 sb->data_size = cpu_to_le64(num_sectors); 1702 sb->super_offset = rdev->sb_start; 1703 sb->sb_csum = calc_sb_1_csum(sb); 1704 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1705 rdev->sb_page); 1706 md_super_wait(rdev->mddev); 1707 return num_sectors / 2; /* kB for sysfs */ 1708 } 1709 1710 static struct super_type super_types[] = { 1711 [0] = { 1712 .name = "0.90.0", 1713 .owner = THIS_MODULE, 1714 .load_super = super_90_load, 1715 .validate_super = super_90_validate, 1716 .sync_super = super_90_sync, 1717 .rdev_size_change = super_90_rdev_size_change, 1718 }, 1719 [1] = { 1720 .name = "md-1", 1721 .owner = THIS_MODULE, 1722 .load_super = super_1_load, 1723 .validate_super = super_1_validate, 1724 .sync_super = super_1_sync, 1725 .rdev_size_change = super_1_rdev_size_change, 1726 }, 1727 }; 1728 1729 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1730 { 1731 mdk_rdev_t *rdev, *rdev2; 1732 1733 rcu_read_lock(); 1734 rdev_for_each_rcu(rdev, mddev1) 1735 rdev_for_each_rcu(rdev2, mddev2) 1736 if (rdev->bdev->bd_contains == 1737 rdev2->bdev->bd_contains) { 1738 rcu_read_unlock(); 1739 return 1; 1740 } 1741 rcu_read_unlock(); 1742 return 0; 1743 } 1744 1745 static LIST_HEAD(pending_raid_disks); 1746 1747 /* 1748 * Try to register data integrity profile for an mddev 1749 * 1750 * This is called when an array is started and after a disk has been kicked 1751 * from the array. It only succeeds if all working and active component devices 1752 * are integrity capable with matching profiles. 1753 */ 1754 int md_integrity_register(mddev_t *mddev) 1755 { 1756 mdk_rdev_t *rdev, *reference = NULL; 1757 1758 if (list_empty(&mddev->disks)) 1759 return 0; /* nothing to do */ 1760 if (blk_get_integrity(mddev->gendisk)) 1761 return 0; /* already registered */ 1762 list_for_each_entry(rdev, &mddev->disks, same_set) { 1763 /* skip spares and non-functional disks */ 1764 if (test_bit(Faulty, &rdev->flags)) 1765 continue; 1766 if (rdev->raid_disk < 0) 1767 continue; 1768 /* 1769 * If at least one rdev is not integrity capable, we can not 1770 * enable data integrity for the md device. 1771 */ 1772 if (!bdev_get_integrity(rdev->bdev)) 1773 return -EINVAL; 1774 if (!reference) { 1775 /* Use the first rdev as the reference */ 1776 reference = rdev; 1777 continue; 1778 } 1779 /* does this rdev's profile match the reference profile? */ 1780 if (blk_integrity_compare(reference->bdev->bd_disk, 1781 rdev->bdev->bd_disk) < 0) 1782 return -EINVAL; 1783 } 1784 /* 1785 * All component devices are integrity capable and have matching 1786 * profiles, register the common profile for the md device. 1787 */ 1788 if (blk_integrity_register(mddev->gendisk, 1789 bdev_get_integrity(reference->bdev)) != 0) { 1790 printk(KERN_ERR "md: failed to register integrity for %s\n", 1791 mdname(mddev)); 1792 return -EINVAL; 1793 } 1794 printk(KERN_NOTICE "md: data integrity on %s enabled\n", 1795 mdname(mddev)); 1796 return 0; 1797 } 1798 EXPORT_SYMBOL(md_integrity_register); 1799 1800 /* Disable data integrity if non-capable/non-matching disk is being added */ 1801 void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev) 1802 { 1803 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); 1804 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk); 1805 1806 if (!bi_mddev) /* nothing to do */ 1807 return; 1808 if (rdev->raid_disk < 0) /* skip spares */ 1809 return; 1810 if (bi_rdev && blk_integrity_compare(mddev->gendisk, 1811 rdev->bdev->bd_disk) >= 0) 1812 return; 1813 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev)); 1814 blk_integrity_unregister(mddev->gendisk); 1815 } 1816 EXPORT_SYMBOL(md_integrity_add_rdev); 1817 1818 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) 1819 { 1820 char b[BDEVNAME_SIZE]; 1821 struct kobject *ko; 1822 char *s; 1823 int err; 1824 1825 if (rdev->mddev) { 1826 MD_BUG(); 1827 return -EINVAL; 1828 } 1829 1830 /* prevent duplicates */ 1831 if (find_rdev(mddev, rdev->bdev->bd_dev)) 1832 return -EEXIST; 1833 1834 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 1835 if (rdev->sectors && (mddev->dev_sectors == 0 || 1836 rdev->sectors < mddev->dev_sectors)) { 1837 if (mddev->pers) { 1838 /* Cannot change size, so fail 1839 * If mddev->level <= 0, then we don't care 1840 * about aligning sizes (e.g. linear) 1841 */ 1842 if (mddev->level > 0) 1843 return -ENOSPC; 1844 } else 1845 mddev->dev_sectors = rdev->sectors; 1846 } 1847 1848 /* Verify rdev->desc_nr is unique. 1849 * If it is -1, assign a free number, else 1850 * check number is not in use 1851 */ 1852 if (rdev->desc_nr < 0) { 1853 int choice = 0; 1854 if (mddev->pers) choice = mddev->raid_disks; 1855 while (find_rdev_nr(mddev, choice)) 1856 choice++; 1857 rdev->desc_nr = choice; 1858 } else { 1859 if (find_rdev_nr(mddev, rdev->desc_nr)) 1860 return -EBUSY; 1861 } 1862 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 1863 printk(KERN_WARNING "md: %s: array is limited to %d devices\n", 1864 mdname(mddev), mddev->max_disks); 1865 return -EBUSY; 1866 } 1867 bdevname(rdev->bdev,b); 1868 while ( (s=strchr(b, '/')) != NULL) 1869 *s = '!'; 1870 1871 rdev->mddev = mddev; 1872 printk(KERN_INFO "md: bind<%s>\n", b); 1873 1874 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 1875 goto fail; 1876 1877 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 1878 if (sysfs_create_link(&rdev->kobj, ko, "block")) 1879 /* failure here is OK */; 1880 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 1881 1882 list_add_rcu(&rdev->same_set, &mddev->disks); 1883 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk); 1884 1885 /* May as well allow recovery to be retried once */ 1886 mddev->recovery_disabled = 0; 1887 1888 return 0; 1889 1890 fail: 1891 printk(KERN_WARNING "md: failed to register dev-%s for %s\n", 1892 b, mdname(mddev)); 1893 return err; 1894 } 1895 1896 static void md_delayed_delete(struct work_struct *ws) 1897 { 1898 mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work); 1899 kobject_del(&rdev->kobj); 1900 kobject_put(&rdev->kobj); 1901 } 1902 1903 static void unbind_rdev_from_array(mdk_rdev_t * rdev) 1904 { 1905 char b[BDEVNAME_SIZE]; 1906 if (!rdev->mddev) { 1907 MD_BUG(); 1908 return; 1909 } 1910 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk); 1911 list_del_rcu(&rdev->same_set); 1912 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 1913 rdev->mddev = NULL; 1914 sysfs_remove_link(&rdev->kobj, "block"); 1915 sysfs_put(rdev->sysfs_state); 1916 rdev->sysfs_state = NULL; 1917 /* We need to delay this, otherwise we can deadlock when 1918 * writing to 'remove' to "dev/state". We also need 1919 * to delay it due to rcu usage. 1920 */ 1921 synchronize_rcu(); 1922 INIT_WORK(&rdev->del_work, md_delayed_delete); 1923 kobject_get(&rdev->kobj); 1924 queue_work(md_misc_wq, &rdev->del_work); 1925 } 1926 1927 /* 1928 * prevent the device from being mounted, repartitioned or 1929 * otherwise reused by a RAID array (or any other kernel 1930 * subsystem), by bd_claiming the device. 1931 */ 1932 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared) 1933 { 1934 int err = 0; 1935 struct block_device *bdev; 1936 char b[BDEVNAME_SIZE]; 1937 1938 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); 1939 if (IS_ERR(bdev)) { 1940 printk(KERN_ERR "md: could not open %s.\n", 1941 __bdevname(dev, b)); 1942 return PTR_ERR(bdev); 1943 } 1944 err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev); 1945 if (err) { 1946 printk(KERN_ERR "md: could not bd_claim %s.\n", 1947 bdevname(bdev, b)); 1948 blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 1949 return err; 1950 } 1951 if (!shared) 1952 set_bit(AllReserved, &rdev->flags); 1953 rdev->bdev = bdev; 1954 return err; 1955 } 1956 1957 static void unlock_rdev(mdk_rdev_t *rdev) 1958 { 1959 struct block_device *bdev = rdev->bdev; 1960 rdev->bdev = NULL; 1961 if (!bdev) 1962 MD_BUG(); 1963 bd_release(bdev); 1964 blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 1965 } 1966 1967 void md_autodetect_dev(dev_t dev); 1968 1969 static void export_rdev(mdk_rdev_t * rdev) 1970 { 1971 char b[BDEVNAME_SIZE]; 1972 printk(KERN_INFO "md: export_rdev(%s)\n", 1973 bdevname(rdev->bdev,b)); 1974 if (rdev->mddev) 1975 MD_BUG(); 1976 free_disk_sb(rdev); 1977 #ifndef MODULE 1978 if (test_bit(AutoDetected, &rdev->flags)) 1979 md_autodetect_dev(rdev->bdev->bd_dev); 1980 #endif 1981 unlock_rdev(rdev); 1982 kobject_put(&rdev->kobj); 1983 } 1984 1985 static void kick_rdev_from_array(mdk_rdev_t * rdev) 1986 { 1987 unbind_rdev_from_array(rdev); 1988 export_rdev(rdev); 1989 } 1990 1991 static void export_array(mddev_t *mddev) 1992 { 1993 mdk_rdev_t *rdev, *tmp; 1994 1995 rdev_for_each(rdev, tmp, mddev) { 1996 if (!rdev->mddev) { 1997 MD_BUG(); 1998 continue; 1999 } 2000 kick_rdev_from_array(rdev); 2001 } 2002 if (!list_empty(&mddev->disks)) 2003 MD_BUG(); 2004 mddev->raid_disks = 0; 2005 mddev->major_version = 0; 2006 } 2007 2008 static void print_desc(mdp_disk_t *desc) 2009 { 2010 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number, 2011 desc->major,desc->minor,desc->raid_disk,desc->state); 2012 } 2013 2014 static void print_sb_90(mdp_super_t *sb) 2015 { 2016 int i; 2017 2018 printk(KERN_INFO 2019 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", 2020 sb->major_version, sb->minor_version, sb->patch_version, 2021 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, 2022 sb->ctime); 2023 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", 2024 sb->level, sb->size, sb->nr_disks, sb->raid_disks, 2025 sb->md_minor, sb->layout, sb->chunk_size); 2026 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d" 2027 " FD:%d SD:%d CSUM:%08x E:%08lx\n", 2028 sb->utime, sb->state, sb->active_disks, sb->working_disks, 2029 sb->failed_disks, sb->spare_disks, 2030 sb->sb_csum, (unsigned long)sb->events_lo); 2031 2032 printk(KERN_INFO); 2033 for (i = 0; i < MD_SB_DISKS; i++) { 2034 mdp_disk_t *desc; 2035 2036 desc = sb->disks + i; 2037 if (desc->number || desc->major || desc->minor || 2038 desc->raid_disk || (desc->state && (desc->state != 4))) { 2039 printk(" D %2d: ", i); 2040 print_desc(desc); 2041 } 2042 } 2043 printk(KERN_INFO "md: THIS: "); 2044 print_desc(&sb->this_disk); 2045 } 2046 2047 static void print_sb_1(struct mdp_superblock_1 *sb) 2048 { 2049 __u8 *uuid; 2050 2051 uuid = sb->set_uuid; 2052 printk(KERN_INFO 2053 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n" 2054 "md: Name: \"%s\" CT:%llu\n", 2055 le32_to_cpu(sb->major_version), 2056 le32_to_cpu(sb->feature_map), 2057 uuid, 2058 sb->set_name, 2059 (unsigned long long)le64_to_cpu(sb->ctime) 2060 & MD_SUPERBLOCK_1_TIME_SEC_MASK); 2061 2062 uuid = sb->device_uuid; 2063 printk(KERN_INFO 2064 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" 2065 " RO:%llu\n" 2066 "md: Dev:%08x UUID: %pU\n" 2067 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" 2068 "md: (MaxDev:%u) \n", 2069 le32_to_cpu(sb->level), 2070 (unsigned long long)le64_to_cpu(sb->size), 2071 le32_to_cpu(sb->raid_disks), 2072 le32_to_cpu(sb->layout), 2073 le32_to_cpu(sb->chunksize), 2074 (unsigned long long)le64_to_cpu(sb->data_offset), 2075 (unsigned long long)le64_to_cpu(sb->data_size), 2076 (unsigned long long)le64_to_cpu(sb->super_offset), 2077 (unsigned long long)le64_to_cpu(sb->recovery_offset), 2078 le32_to_cpu(sb->dev_number), 2079 uuid, 2080 sb->devflags, 2081 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK, 2082 (unsigned long long)le64_to_cpu(sb->events), 2083 (unsigned long long)le64_to_cpu(sb->resync_offset), 2084 le32_to_cpu(sb->sb_csum), 2085 le32_to_cpu(sb->max_dev) 2086 ); 2087 } 2088 2089 static void print_rdev(mdk_rdev_t *rdev, int major_version) 2090 { 2091 char b[BDEVNAME_SIZE]; 2092 printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n", 2093 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors, 2094 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), 2095 rdev->desc_nr); 2096 if (rdev->sb_loaded) { 2097 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version); 2098 switch (major_version) { 2099 case 0: 2100 print_sb_90((mdp_super_t*)page_address(rdev->sb_page)); 2101 break; 2102 case 1: 2103 print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page)); 2104 break; 2105 } 2106 } else 2107 printk(KERN_INFO "md: no rdev superblock!\n"); 2108 } 2109 2110 static void md_print_devices(void) 2111 { 2112 struct list_head *tmp; 2113 mdk_rdev_t *rdev; 2114 mddev_t *mddev; 2115 char b[BDEVNAME_SIZE]; 2116 2117 printk("\n"); 2118 printk("md: **********************************\n"); 2119 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n"); 2120 printk("md: **********************************\n"); 2121 for_each_mddev(mddev, tmp) { 2122 2123 if (mddev->bitmap) 2124 bitmap_print_sb(mddev->bitmap); 2125 else 2126 printk("%s: ", mdname(mddev)); 2127 list_for_each_entry(rdev, &mddev->disks, same_set) 2128 printk("<%s>", bdevname(rdev->bdev,b)); 2129 printk("\n"); 2130 2131 list_for_each_entry(rdev, &mddev->disks, same_set) 2132 print_rdev(rdev, mddev->major_version); 2133 } 2134 printk("md: **********************************\n"); 2135 printk("\n"); 2136 } 2137 2138 2139 static void sync_sbs(mddev_t * mddev, int nospares) 2140 { 2141 /* Update each superblock (in-memory image), but 2142 * if we are allowed to, skip spares which already 2143 * have the right event counter, or have one earlier 2144 * (which would mean they aren't being marked as dirty 2145 * with the rest of the array) 2146 */ 2147 mdk_rdev_t *rdev; 2148 list_for_each_entry(rdev, &mddev->disks, same_set) { 2149 if (rdev->sb_events == mddev->events || 2150 (nospares && 2151 rdev->raid_disk < 0 && 2152 rdev->sb_events+1 == mddev->events)) { 2153 /* Don't update this superblock */ 2154 rdev->sb_loaded = 2; 2155 } else { 2156 super_types[mddev->major_version]. 2157 sync_super(mddev, rdev); 2158 rdev->sb_loaded = 1; 2159 } 2160 } 2161 } 2162 2163 static void md_update_sb(mddev_t * mddev, int force_change) 2164 { 2165 mdk_rdev_t *rdev; 2166 int sync_req; 2167 int nospares = 0; 2168 2169 repeat: 2170 /* First make sure individual recovery_offsets are correct */ 2171 list_for_each_entry(rdev, &mddev->disks, same_set) { 2172 if (rdev->raid_disk >= 0 && 2173 mddev->delta_disks >= 0 && 2174 !test_bit(In_sync, &rdev->flags) && 2175 mddev->curr_resync_completed > rdev->recovery_offset) 2176 rdev->recovery_offset = mddev->curr_resync_completed; 2177 2178 } 2179 if (!mddev->persistent) { 2180 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); 2181 clear_bit(MD_CHANGE_DEVS, &mddev->flags); 2182 if (!mddev->external) 2183 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2184 wake_up(&mddev->sb_wait); 2185 return; 2186 } 2187 2188 spin_lock_irq(&mddev->write_lock); 2189 2190 mddev->utime = get_seconds(); 2191 2192 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 2193 force_change = 1; 2194 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 2195 /* just a clean<-> dirty transition, possibly leave spares alone, 2196 * though if events isn't the right even/odd, we will have to do 2197 * spares after all 2198 */ 2199 nospares = 1; 2200 if (force_change) 2201 nospares = 0; 2202 if (mddev->degraded) 2203 /* If the array is degraded, then skipping spares is both 2204 * dangerous and fairly pointless. 2205 * Dangerous because a device that was removed from the array 2206 * might have a event_count that still looks up-to-date, 2207 * so it can be re-added without a resync. 2208 * Pointless because if there are any spares to skip, 2209 * then a recovery will happen and soon that array won't 2210 * be degraded any more and the spare can go back to sleep then. 2211 */ 2212 nospares = 0; 2213 2214 sync_req = mddev->in_sync; 2215 2216 /* If this is just a dirty<->clean transition, and the array is clean 2217 * and 'events' is odd, we can roll back to the previous clean state */ 2218 if (nospares 2219 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2220 && mddev->can_decrease_events 2221 && mddev->events != 1) { 2222 mddev->events--; 2223 mddev->can_decrease_events = 0; 2224 } else { 2225 /* otherwise we have to go forward and ... */ 2226 mddev->events ++; 2227 mddev->can_decrease_events = nospares; 2228 } 2229 2230 if (!mddev->events) { 2231 /* 2232 * oops, this 64-bit counter should never wrap. 2233 * Either we are in around ~1 trillion A.C., assuming 2234 * 1 reboot per second, or we have a bug: 2235 */ 2236 MD_BUG(); 2237 mddev->events --; 2238 } 2239 sync_sbs(mddev, nospares); 2240 spin_unlock_irq(&mddev->write_lock); 2241 2242 dprintk(KERN_INFO 2243 "md: updating %s RAID superblock on device (in sync %d)\n", 2244 mdname(mddev),mddev->in_sync); 2245 2246 bitmap_update_sb(mddev->bitmap); 2247 list_for_each_entry(rdev, &mddev->disks, same_set) { 2248 char b[BDEVNAME_SIZE]; 2249 dprintk(KERN_INFO "md: "); 2250 if (rdev->sb_loaded != 1) 2251 continue; /* no noise on spare devices */ 2252 if (test_bit(Faulty, &rdev->flags)) 2253 dprintk("(skipping faulty "); 2254 2255 dprintk("%s ", bdevname(rdev->bdev,b)); 2256 if (!test_bit(Faulty, &rdev->flags)) { 2257 md_super_write(mddev,rdev, 2258 rdev->sb_start, rdev->sb_size, 2259 rdev->sb_page); 2260 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", 2261 bdevname(rdev->bdev,b), 2262 (unsigned long long)rdev->sb_start); 2263 rdev->sb_events = mddev->events; 2264 2265 } else 2266 dprintk(")\n"); 2267 if (mddev->level == LEVEL_MULTIPATH) 2268 /* only need to write one superblock... */ 2269 break; 2270 } 2271 md_super_wait(mddev); 2272 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ 2273 2274 spin_lock_irq(&mddev->write_lock); 2275 if (mddev->in_sync != sync_req || 2276 test_bit(MD_CHANGE_DEVS, &mddev->flags)) { 2277 /* have to write it out again */ 2278 spin_unlock_irq(&mddev->write_lock); 2279 goto repeat; 2280 } 2281 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2282 spin_unlock_irq(&mddev->write_lock); 2283 wake_up(&mddev->sb_wait); 2284 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2285 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2286 2287 } 2288 2289 /* words written to sysfs files may, or may not, be \n terminated. 2290 * We want to accept with case. For this we use cmd_match. 2291 */ 2292 static int cmd_match(const char *cmd, const char *str) 2293 { 2294 /* See if cmd, written into a sysfs file, matches 2295 * str. They must either be the same, or cmd can 2296 * have a trailing newline 2297 */ 2298 while (*cmd && *str && *cmd == *str) { 2299 cmd++; 2300 str++; 2301 } 2302 if (*cmd == '\n') 2303 cmd++; 2304 if (*str || *cmd) 2305 return 0; 2306 return 1; 2307 } 2308 2309 struct rdev_sysfs_entry { 2310 struct attribute attr; 2311 ssize_t (*show)(mdk_rdev_t *, char *); 2312 ssize_t (*store)(mdk_rdev_t *, const char *, size_t); 2313 }; 2314 2315 static ssize_t 2316 state_show(mdk_rdev_t *rdev, char *page) 2317 { 2318 char *sep = ""; 2319 size_t len = 0; 2320 2321 if (test_bit(Faulty, &rdev->flags)) { 2322 len+= sprintf(page+len, "%sfaulty",sep); 2323 sep = ","; 2324 } 2325 if (test_bit(In_sync, &rdev->flags)) { 2326 len += sprintf(page+len, "%sin_sync",sep); 2327 sep = ","; 2328 } 2329 if (test_bit(WriteMostly, &rdev->flags)) { 2330 len += sprintf(page+len, "%swrite_mostly",sep); 2331 sep = ","; 2332 } 2333 if (test_bit(Blocked, &rdev->flags)) { 2334 len += sprintf(page+len, "%sblocked", sep); 2335 sep = ","; 2336 } 2337 if (!test_bit(Faulty, &rdev->flags) && 2338 !test_bit(In_sync, &rdev->flags)) { 2339 len += sprintf(page+len, "%sspare", sep); 2340 sep = ","; 2341 } 2342 return len+sprintf(page+len, "\n"); 2343 } 2344 2345 static ssize_t 2346 state_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2347 { 2348 /* can write 2349 * faulty - simulates and error 2350 * remove - disconnects the device 2351 * writemostly - sets write_mostly 2352 * -writemostly - clears write_mostly 2353 * blocked - sets the Blocked flag 2354 * -blocked - clears the Blocked flag 2355 * insync - sets Insync providing device isn't active 2356 */ 2357 int err = -EINVAL; 2358 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2359 md_error(rdev->mddev, rdev); 2360 err = 0; 2361 } else if (cmd_match(buf, "remove")) { 2362 if (rdev->raid_disk >= 0) 2363 err = -EBUSY; 2364 else { 2365 mddev_t *mddev = rdev->mddev; 2366 kick_rdev_from_array(rdev); 2367 if (mddev->pers) 2368 md_update_sb(mddev, 1); 2369 md_new_event(mddev); 2370 err = 0; 2371 } 2372 } else if (cmd_match(buf, "writemostly")) { 2373 set_bit(WriteMostly, &rdev->flags); 2374 err = 0; 2375 } else if (cmd_match(buf, "-writemostly")) { 2376 clear_bit(WriteMostly, &rdev->flags); 2377 err = 0; 2378 } else if (cmd_match(buf, "blocked")) { 2379 set_bit(Blocked, &rdev->flags); 2380 err = 0; 2381 } else if (cmd_match(buf, "-blocked")) { 2382 clear_bit(Blocked, &rdev->flags); 2383 wake_up(&rdev->blocked_wait); 2384 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2385 md_wakeup_thread(rdev->mddev->thread); 2386 2387 err = 0; 2388 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2389 set_bit(In_sync, &rdev->flags); 2390 err = 0; 2391 } 2392 if (!err) 2393 sysfs_notify_dirent_safe(rdev->sysfs_state); 2394 return err ? err : len; 2395 } 2396 static struct rdev_sysfs_entry rdev_state = 2397 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); 2398 2399 static ssize_t 2400 errors_show(mdk_rdev_t *rdev, char *page) 2401 { 2402 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 2403 } 2404 2405 static ssize_t 2406 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2407 { 2408 char *e; 2409 unsigned long n = simple_strtoul(buf, &e, 10); 2410 if (*buf && (*e == 0 || *e == '\n')) { 2411 atomic_set(&rdev->corrected_errors, n); 2412 return len; 2413 } 2414 return -EINVAL; 2415 } 2416 static struct rdev_sysfs_entry rdev_errors = 2417 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 2418 2419 static ssize_t 2420 slot_show(mdk_rdev_t *rdev, char *page) 2421 { 2422 if (rdev->raid_disk < 0) 2423 return sprintf(page, "none\n"); 2424 else 2425 return sprintf(page, "%d\n", rdev->raid_disk); 2426 } 2427 2428 static ssize_t 2429 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2430 { 2431 char *e; 2432 int err; 2433 char nm[20]; 2434 int slot = simple_strtoul(buf, &e, 10); 2435 if (strncmp(buf, "none", 4)==0) 2436 slot = -1; 2437 else if (e==buf || (*e && *e!= '\n')) 2438 return -EINVAL; 2439 if (rdev->mddev->pers && slot == -1) { 2440 /* Setting 'slot' on an active array requires also 2441 * updating the 'rd%d' link, and communicating 2442 * with the personality with ->hot_*_disk. 2443 * For now we only support removing 2444 * failed/spare devices. This normally happens automatically, 2445 * but not when the metadata is externally managed. 2446 */ 2447 if (rdev->raid_disk == -1) 2448 return -EEXIST; 2449 /* personality does all needed checks */ 2450 if (rdev->mddev->pers->hot_add_disk == NULL) 2451 return -EINVAL; 2452 err = rdev->mddev->pers-> 2453 hot_remove_disk(rdev->mddev, rdev->raid_disk); 2454 if (err) 2455 return err; 2456 sprintf(nm, "rd%d", rdev->raid_disk); 2457 sysfs_remove_link(&rdev->mddev->kobj, nm); 2458 rdev->raid_disk = -1; 2459 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2460 md_wakeup_thread(rdev->mddev->thread); 2461 } else if (rdev->mddev->pers) { 2462 mdk_rdev_t *rdev2; 2463 /* Activating a spare .. or possibly reactivating 2464 * if we ever get bitmaps working here. 2465 */ 2466 2467 if (rdev->raid_disk != -1) 2468 return -EBUSY; 2469 2470 if (rdev->mddev->pers->hot_add_disk == NULL) 2471 return -EINVAL; 2472 2473 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set) 2474 if (rdev2->raid_disk == slot) 2475 return -EEXIST; 2476 2477 rdev->raid_disk = slot; 2478 if (test_bit(In_sync, &rdev->flags)) 2479 rdev->saved_raid_disk = slot; 2480 else 2481 rdev->saved_raid_disk = -1; 2482 err = rdev->mddev->pers-> 2483 hot_add_disk(rdev->mddev, rdev); 2484 if (err) { 2485 rdev->raid_disk = -1; 2486 return err; 2487 } else 2488 sysfs_notify_dirent_safe(rdev->sysfs_state); 2489 sprintf(nm, "rd%d", rdev->raid_disk); 2490 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm)) 2491 /* failure here is OK */; 2492 /* don't wakeup anyone, leave that to userspace. */ 2493 } else { 2494 if (slot >= rdev->mddev->raid_disks) 2495 return -ENOSPC; 2496 rdev->raid_disk = slot; 2497 /* assume it is working */ 2498 clear_bit(Faulty, &rdev->flags); 2499 clear_bit(WriteMostly, &rdev->flags); 2500 set_bit(In_sync, &rdev->flags); 2501 sysfs_notify_dirent_safe(rdev->sysfs_state); 2502 } 2503 return len; 2504 } 2505 2506 2507 static struct rdev_sysfs_entry rdev_slot = 2508 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 2509 2510 static ssize_t 2511 offset_show(mdk_rdev_t *rdev, char *page) 2512 { 2513 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 2514 } 2515 2516 static ssize_t 2517 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2518 { 2519 char *e; 2520 unsigned long long offset = simple_strtoull(buf, &e, 10); 2521 if (e==buf || (*e && *e != '\n')) 2522 return -EINVAL; 2523 if (rdev->mddev->pers && rdev->raid_disk >= 0) 2524 return -EBUSY; 2525 if (rdev->sectors && rdev->mddev->external) 2526 /* Must set offset before size, so overlap checks 2527 * can be sane */ 2528 return -EBUSY; 2529 rdev->data_offset = offset; 2530 return len; 2531 } 2532 2533 static struct rdev_sysfs_entry rdev_offset = 2534 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 2535 2536 static ssize_t 2537 rdev_size_show(mdk_rdev_t *rdev, char *page) 2538 { 2539 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 2540 } 2541 2542 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 2543 { 2544 /* check if two start/length pairs overlap */ 2545 if (s1+l1 <= s2) 2546 return 0; 2547 if (s2+l2 <= s1) 2548 return 0; 2549 return 1; 2550 } 2551 2552 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 2553 { 2554 unsigned long long blocks; 2555 sector_t new; 2556 2557 if (strict_strtoull(buf, 10, &blocks) < 0) 2558 return -EINVAL; 2559 2560 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 2561 return -EINVAL; /* sector conversion overflow */ 2562 2563 new = blocks * 2; 2564 if (new != blocks * 2) 2565 return -EINVAL; /* unsigned long long to sector_t overflow */ 2566 2567 *sectors = new; 2568 return 0; 2569 } 2570 2571 static ssize_t 2572 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2573 { 2574 mddev_t *my_mddev = rdev->mddev; 2575 sector_t oldsectors = rdev->sectors; 2576 sector_t sectors; 2577 2578 if (strict_blocks_to_sectors(buf, §ors) < 0) 2579 return -EINVAL; 2580 if (my_mddev->pers && rdev->raid_disk >= 0) { 2581 if (my_mddev->persistent) { 2582 sectors = super_types[my_mddev->major_version]. 2583 rdev_size_change(rdev, sectors); 2584 if (!sectors) 2585 return -EBUSY; 2586 } else if (!sectors) 2587 sectors = (rdev->bdev->bd_inode->i_size >> 9) - 2588 rdev->data_offset; 2589 } 2590 if (sectors < my_mddev->dev_sectors) 2591 return -EINVAL; /* component must fit device */ 2592 2593 rdev->sectors = sectors; 2594 if (sectors > oldsectors && my_mddev->external) { 2595 /* need to check that all other rdevs with the same ->bdev 2596 * do not overlap. We need to unlock the mddev to avoid 2597 * a deadlock. We have already changed rdev->sectors, and if 2598 * we have to change it back, we will have the lock again. 2599 */ 2600 mddev_t *mddev; 2601 int overlap = 0; 2602 struct list_head *tmp; 2603 2604 mddev_unlock(my_mddev); 2605 for_each_mddev(mddev, tmp) { 2606 mdk_rdev_t *rdev2; 2607 2608 mddev_lock(mddev); 2609 list_for_each_entry(rdev2, &mddev->disks, same_set) 2610 if (test_bit(AllReserved, &rdev2->flags) || 2611 (rdev->bdev == rdev2->bdev && 2612 rdev != rdev2 && 2613 overlaps(rdev->data_offset, rdev->sectors, 2614 rdev2->data_offset, 2615 rdev2->sectors))) { 2616 overlap = 1; 2617 break; 2618 } 2619 mddev_unlock(mddev); 2620 if (overlap) { 2621 mddev_put(mddev); 2622 break; 2623 } 2624 } 2625 mddev_lock(my_mddev); 2626 if (overlap) { 2627 /* Someone else could have slipped in a size 2628 * change here, but doing so is just silly. 2629 * We put oldsectors back because we *know* it is 2630 * safe, and trust userspace not to race with 2631 * itself 2632 */ 2633 rdev->sectors = oldsectors; 2634 return -EBUSY; 2635 } 2636 } 2637 return len; 2638 } 2639 2640 static struct rdev_sysfs_entry rdev_size = 2641 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 2642 2643 2644 static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page) 2645 { 2646 unsigned long long recovery_start = rdev->recovery_offset; 2647 2648 if (test_bit(In_sync, &rdev->flags) || 2649 recovery_start == MaxSector) 2650 return sprintf(page, "none\n"); 2651 2652 return sprintf(page, "%llu\n", recovery_start); 2653 } 2654 2655 static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2656 { 2657 unsigned long long recovery_start; 2658 2659 if (cmd_match(buf, "none")) 2660 recovery_start = MaxSector; 2661 else if (strict_strtoull(buf, 10, &recovery_start)) 2662 return -EINVAL; 2663 2664 if (rdev->mddev->pers && 2665 rdev->raid_disk >= 0) 2666 return -EBUSY; 2667 2668 rdev->recovery_offset = recovery_start; 2669 if (recovery_start == MaxSector) 2670 set_bit(In_sync, &rdev->flags); 2671 else 2672 clear_bit(In_sync, &rdev->flags); 2673 return len; 2674 } 2675 2676 static struct rdev_sysfs_entry rdev_recovery_start = 2677 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 2678 2679 static struct attribute *rdev_default_attrs[] = { 2680 &rdev_state.attr, 2681 &rdev_errors.attr, 2682 &rdev_slot.attr, 2683 &rdev_offset.attr, 2684 &rdev_size.attr, 2685 &rdev_recovery_start.attr, 2686 NULL, 2687 }; 2688 static ssize_t 2689 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 2690 { 2691 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2692 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 2693 mddev_t *mddev = rdev->mddev; 2694 ssize_t rv; 2695 2696 if (!entry->show) 2697 return -EIO; 2698 2699 rv = mddev ? mddev_lock(mddev) : -EBUSY; 2700 if (!rv) { 2701 if (rdev->mddev == NULL) 2702 rv = -EBUSY; 2703 else 2704 rv = entry->show(rdev, page); 2705 mddev_unlock(mddev); 2706 } 2707 return rv; 2708 } 2709 2710 static ssize_t 2711 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 2712 const char *page, size_t length) 2713 { 2714 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2715 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 2716 ssize_t rv; 2717 mddev_t *mddev = rdev->mddev; 2718 2719 if (!entry->store) 2720 return -EIO; 2721 if (!capable(CAP_SYS_ADMIN)) 2722 return -EACCES; 2723 rv = mddev ? mddev_lock(mddev): -EBUSY; 2724 if (!rv) { 2725 if (rdev->mddev == NULL) 2726 rv = -EBUSY; 2727 else 2728 rv = entry->store(rdev, page, length); 2729 mddev_unlock(mddev); 2730 } 2731 return rv; 2732 } 2733 2734 static void rdev_free(struct kobject *ko) 2735 { 2736 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); 2737 kfree(rdev); 2738 } 2739 static const struct sysfs_ops rdev_sysfs_ops = { 2740 .show = rdev_attr_show, 2741 .store = rdev_attr_store, 2742 }; 2743 static struct kobj_type rdev_ktype = { 2744 .release = rdev_free, 2745 .sysfs_ops = &rdev_sysfs_ops, 2746 .default_attrs = rdev_default_attrs, 2747 }; 2748 2749 void md_rdev_init(mdk_rdev_t *rdev) 2750 { 2751 rdev->desc_nr = -1; 2752 rdev->saved_raid_disk = -1; 2753 rdev->raid_disk = -1; 2754 rdev->flags = 0; 2755 rdev->data_offset = 0; 2756 rdev->sb_events = 0; 2757 rdev->last_read_error.tv_sec = 0; 2758 rdev->last_read_error.tv_nsec = 0; 2759 atomic_set(&rdev->nr_pending, 0); 2760 atomic_set(&rdev->read_errors, 0); 2761 atomic_set(&rdev->corrected_errors, 0); 2762 2763 INIT_LIST_HEAD(&rdev->same_set); 2764 init_waitqueue_head(&rdev->blocked_wait); 2765 } 2766 EXPORT_SYMBOL_GPL(md_rdev_init); 2767 /* 2768 * Import a device. If 'super_format' >= 0, then sanity check the superblock 2769 * 2770 * mark the device faulty if: 2771 * 2772 * - the device is nonexistent (zero size) 2773 * - the device has no valid superblock 2774 * 2775 * a faulty rdev _never_ has rdev->sb set. 2776 */ 2777 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor) 2778 { 2779 char b[BDEVNAME_SIZE]; 2780 int err; 2781 mdk_rdev_t *rdev; 2782 sector_t size; 2783 2784 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 2785 if (!rdev) { 2786 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 2787 return ERR_PTR(-ENOMEM); 2788 } 2789 2790 md_rdev_init(rdev); 2791 if ((err = alloc_disk_sb(rdev))) 2792 goto abort_free; 2793 2794 err = lock_rdev(rdev, newdev, super_format == -2); 2795 if (err) 2796 goto abort_free; 2797 2798 kobject_init(&rdev->kobj, &rdev_ktype); 2799 2800 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 2801 if (!size) { 2802 printk(KERN_WARNING 2803 "md: %s has zero or unknown size, marking faulty!\n", 2804 bdevname(rdev->bdev,b)); 2805 err = -EINVAL; 2806 goto abort_free; 2807 } 2808 2809 if (super_format >= 0) { 2810 err = super_types[super_format]. 2811 load_super(rdev, NULL, super_minor); 2812 if (err == -EINVAL) { 2813 printk(KERN_WARNING 2814 "md: %s does not have a valid v%d.%d " 2815 "superblock, not importing!\n", 2816 bdevname(rdev->bdev,b), 2817 super_format, super_minor); 2818 goto abort_free; 2819 } 2820 if (err < 0) { 2821 printk(KERN_WARNING 2822 "md: could not read %s's sb, not importing!\n", 2823 bdevname(rdev->bdev,b)); 2824 goto abort_free; 2825 } 2826 } 2827 2828 return rdev; 2829 2830 abort_free: 2831 if (rdev->sb_page) { 2832 if (rdev->bdev) 2833 unlock_rdev(rdev); 2834 free_disk_sb(rdev); 2835 } 2836 kfree(rdev); 2837 return ERR_PTR(err); 2838 } 2839 2840 /* 2841 * Check a full RAID array for plausibility 2842 */ 2843 2844 2845 static void analyze_sbs(mddev_t * mddev) 2846 { 2847 int i; 2848 mdk_rdev_t *rdev, *freshest, *tmp; 2849 char b[BDEVNAME_SIZE]; 2850 2851 freshest = NULL; 2852 rdev_for_each(rdev, tmp, mddev) 2853 switch (super_types[mddev->major_version]. 2854 load_super(rdev, freshest, mddev->minor_version)) { 2855 case 1: 2856 freshest = rdev; 2857 break; 2858 case 0: 2859 break; 2860 default: 2861 printk( KERN_ERR \ 2862 "md: fatal superblock inconsistency in %s" 2863 " -- removing from array\n", 2864 bdevname(rdev->bdev,b)); 2865 kick_rdev_from_array(rdev); 2866 } 2867 2868 2869 super_types[mddev->major_version]. 2870 validate_super(mddev, freshest); 2871 2872 i = 0; 2873 rdev_for_each(rdev, tmp, mddev) { 2874 if (mddev->max_disks && 2875 (rdev->desc_nr >= mddev->max_disks || 2876 i > mddev->max_disks)) { 2877 printk(KERN_WARNING 2878 "md: %s: %s: only %d devices permitted\n", 2879 mdname(mddev), bdevname(rdev->bdev, b), 2880 mddev->max_disks); 2881 kick_rdev_from_array(rdev); 2882 continue; 2883 } 2884 if (rdev != freshest) 2885 if (super_types[mddev->major_version]. 2886 validate_super(mddev, rdev)) { 2887 printk(KERN_WARNING "md: kicking non-fresh %s" 2888 " from array!\n", 2889 bdevname(rdev->bdev,b)); 2890 kick_rdev_from_array(rdev); 2891 continue; 2892 } 2893 if (mddev->level == LEVEL_MULTIPATH) { 2894 rdev->desc_nr = i++; 2895 rdev->raid_disk = rdev->desc_nr; 2896 set_bit(In_sync, &rdev->flags); 2897 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { 2898 rdev->raid_disk = -1; 2899 clear_bit(In_sync, &rdev->flags); 2900 } 2901 } 2902 } 2903 2904 /* Read a fixed-point number. 2905 * Numbers in sysfs attributes should be in "standard" units where 2906 * possible, so time should be in seconds. 2907 * However we internally use a a much smaller unit such as 2908 * milliseconds or jiffies. 2909 * This function takes a decimal number with a possible fractional 2910 * component, and produces an integer which is the result of 2911 * multiplying that number by 10^'scale'. 2912 * all without any floating-point arithmetic. 2913 */ 2914 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 2915 { 2916 unsigned long result = 0; 2917 long decimals = -1; 2918 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 2919 if (*cp == '.') 2920 decimals = 0; 2921 else if (decimals < scale) { 2922 unsigned int value; 2923 value = *cp - '0'; 2924 result = result * 10 + value; 2925 if (decimals >= 0) 2926 decimals++; 2927 } 2928 cp++; 2929 } 2930 if (*cp == '\n') 2931 cp++; 2932 if (*cp) 2933 return -EINVAL; 2934 if (decimals < 0) 2935 decimals = 0; 2936 while (decimals < scale) { 2937 result *= 10; 2938 decimals ++; 2939 } 2940 *res = result; 2941 return 0; 2942 } 2943 2944 2945 static void md_safemode_timeout(unsigned long data); 2946 2947 static ssize_t 2948 safe_delay_show(mddev_t *mddev, char *page) 2949 { 2950 int msec = (mddev->safemode_delay*1000)/HZ; 2951 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 2952 } 2953 static ssize_t 2954 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len) 2955 { 2956 unsigned long msec; 2957 2958 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) 2959 return -EINVAL; 2960 if (msec == 0) 2961 mddev->safemode_delay = 0; 2962 else { 2963 unsigned long old_delay = mddev->safemode_delay; 2964 mddev->safemode_delay = (msec*HZ)/1000; 2965 if (mddev->safemode_delay == 0) 2966 mddev->safemode_delay = 1; 2967 if (mddev->safemode_delay < old_delay) 2968 md_safemode_timeout((unsigned long)mddev); 2969 } 2970 return len; 2971 } 2972 static struct md_sysfs_entry md_safe_delay = 2973 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 2974 2975 static ssize_t 2976 level_show(mddev_t *mddev, char *page) 2977 { 2978 struct mdk_personality *p = mddev->pers; 2979 if (p) 2980 return sprintf(page, "%s\n", p->name); 2981 else if (mddev->clevel[0]) 2982 return sprintf(page, "%s\n", mddev->clevel); 2983 else if (mddev->level != LEVEL_NONE) 2984 return sprintf(page, "%d\n", mddev->level); 2985 else 2986 return 0; 2987 } 2988 2989 static ssize_t 2990 level_store(mddev_t *mddev, const char *buf, size_t len) 2991 { 2992 char clevel[16]; 2993 ssize_t rv = len; 2994 struct mdk_personality *pers; 2995 long level; 2996 void *priv; 2997 mdk_rdev_t *rdev; 2998 2999 if (mddev->pers == NULL) { 3000 if (len == 0) 3001 return 0; 3002 if (len >= sizeof(mddev->clevel)) 3003 return -ENOSPC; 3004 strncpy(mddev->clevel, buf, len); 3005 if (mddev->clevel[len-1] == '\n') 3006 len--; 3007 mddev->clevel[len] = 0; 3008 mddev->level = LEVEL_NONE; 3009 return rv; 3010 } 3011 3012 /* request to change the personality. Need to ensure: 3013 * - array is not engaged in resync/recovery/reshape 3014 * - old personality can be suspended 3015 * - new personality will access other array. 3016 */ 3017 3018 if (mddev->sync_thread || 3019 mddev->reshape_position != MaxSector || 3020 mddev->sysfs_active) 3021 return -EBUSY; 3022 3023 if (!mddev->pers->quiesce) { 3024 printk(KERN_WARNING "md: %s: %s does not support online personality change\n", 3025 mdname(mddev), mddev->pers->name); 3026 return -EINVAL; 3027 } 3028 3029 /* Now find the new personality */ 3030 if (len == 0 || len >= sizeof(clevel)) 3031 return -EINVAL; 3032 strncpy(clevel, buf, len); 3033 if (clevel[len-1] == '\n') 3034 len--; 3035 clevel[len] = 0; 3036 if (strict_strtol(clevel, 10, &level)) 3037 level = LEVEL_NONE; 3038 3039 if (request_module("md-%s", clevel) != 0) 3040 request_module("md-level-%s", clevel); 3041 spin_lock(&pers_lock); 3042 pers = find_pers(level, clevel); 3043 if (!pers || !try_module_get(pers->owner)) { 3044 spin_unlock(&pers_lock); 3045 printk(KERN_WARNING "md: personality %s not loaded\n", clevel); 3046 return -EINVAL; 3047 } 3048 spin_unlock(&pers_lock); 3049 3050 if (pers == mddev->pers) { 3051 /* Nothing to do! */ 3052 module_put(pers->owner); 3053 return rv; 3054 } 3055 if (!pers->takeover) { 3056 module_put(pers->owner); 3057 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n", 3058 mdname(mddev), clevel); 3059 return -EINVAL; 3060 } 3061 3062 list_for_each_entry(rdev, &mddev->disks, same_set) 3063 rdev->new_raid_disk = rdev->raid_disk; 3064 3065 /* ->takeover must set new_* and/or delta_disks 3066 * if it succeeds, and may set them when it fails. 3067 */ 3068 priv = pers->takeover(mddev); 3069 if (IS_ERR(priv)) { 3070 mddev->new_level = mddev->level; 3071 mddev->new_layout = mddev->layout; 3072 mddev->new_chunk_sectors = mddev->chunk_sectors; 3073 mddev->raid_disks -= mddev->delta_disks; 3074 mddev->delta_disks = 0; 3075 module_put(pers->owner); 3076 printk(KERN_WARNING "md: %s: %s would not accept array\n", 3077 mdname(mddev), clevel); 3078 return PTR_ERR(priv); 3079 } 3080 3081 /* Looks like we have a winner */ 3082 mddev_suspend(mddev); 3083 mddev->pers->stop(mddev); 3084 3085 if (mddev->pers->sync_request == NULL && 3086 pers->sync_request != NULL) { 3087 /* need to add the md_redundancy_group */ 3088 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 3089 printk(KERN_WARNING 3090 "md: cannot register extra attributes for %s\n", 3091 mdname(mddev)); 3092 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action"); 3093 } 3094 if (mddev->pers->sync_request != NULL && 3095 pers->sync_request == NULL) { 3096 /* need to remove the md_redundancy_group */ 3097 if (mddev->to_remove == NULL) 3098 mddev->to_remove = &md_redundancy_group; 3099 } 3100 3101 if (mddev->pers->sync_request == NULL && 3102 mddev->external) { 3103 /* We are converting from a no-redundancy array 3104 * to a redundancy array and metadata is managed 3105 * externally so we need to be sure that writes 3106 * won't block due to a need to transition 3107 * clean->dirty 3108 * until external management is started. 3109 */ 3110 mddev->in_sync = 0; 3111 mddev->safemode_delay = 0; 3112 mddev->safemode = 0; 3113 } 3114 3115 list_for_each_entry(rdev, &mddev->disks, same_set) { 3116 char nm[20]; 3117 if (rdev->raid_disk < 0) 3118 continue; 3119 if (rdev->new_raid_disk > mddev->raid_disks) 3120 rdev->new_raid_disk = -1; 3121 if (rdev->new_raid_disk == rdev->raid_disk) 3122 continue; 3123 sprintf(nm, "rd%d", rdev->raid_disk); 3124 sysfs_remove_link(&mddev->kobj, nm); 3125 } 3126 list_for_each_entry(rdev, &mddev->disks, same_set) { 3127 if (rdev->raid_disk < 0) 3128 continue; 3129 if (rdev->new_raid_disk == rdev->raid_disk) 3130 continue; 3131 rdev->raid_disk = rdev->new_raid_disk; 3132 if (rdev->raid_disk < 0) 3133 clear_bit(In_sync, &rdev->flags); 3134 else { 3135 char nm[20]; 3136 sprintf(nm, "rd%d", rdev->raid_disk); 3137 if(sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) 3138 printk("md: cannot register %s for %s after level change\n", 3139 nm, mdname(mddev)); 3140 } 3141 } 3142 3143 module_put(mddev->pers->owner); 3144 mddev->pers = pers; 3145 mddev->private = priv; 3146 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 3147 mddev->level = mddev->new_level; 3148 mddev->layout = mddev->new_layout; 3149 mddev->chunk_sectors = mddev->new_chunk_sectors; 3150 mddev->delta_disks = 0; 3151 if (mddev->pers->sync_request == NULL) { 3152 /* this is now an array without redundancy, so 3153 * it must always be in_sync 3154 */ 3155 mddev->in_sync = 1; 3156 del_timer_sync(&mddev->safemode_timer); 3157 } 3158 pers->run(mddev); 3159 mddev_resume(mddev); 3160 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3161 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3162 md_wakeup_thread(mddev->thread); 3163 sysfs_notify(&mddev->kobj, NULL, "level"); 3164 md_new_event(mddev); 3165 return rv; 3166 } 3167 3168 static struct md_sysfs_entry md_level = 3169 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 3170 3171 3172 static ssize_t 3173 layout_show(mddev_t *mddev, char *page) 3174 { 3175 /* just a number, not meaningful for all levels */ 3176 if (mddev->reshape_position != MaxSector && 3177 mddev->layout != mddev->new_layout) 3178 return sprintf(page, "%d (%d)\n", 3179 mddev->new_layout, mddev->layout); 3180 return sprintf(page, "%d\n", mddev->layout); 3181 } 3182 3183 static ssize_t 3184 layout_store(mddev_t *mddev, const char *buf, size_t len) 3185 { 3186 char *e; 3187 unsigned long n = simple_strtoul(buf, &e, 10); 3188 3189 if (!*buf || (*e && *e != '\n')) 3190 return -EINVAL; 3191 3192 if (mddev->pers) { 3193 int err; 3194 if (mddev->pers->check_reshape == NULL) 3195 return -EBUSY; 3196 mddev->new_layout = n; 3197 err = mddev->pers->check_reshape(mddev); 3198 if (err) { 3199 mddev->new_layout = mddev->layout; 3200 return err; 3201 } 3202 } else { 3203 mddev->new_layout = n; 3204 if (mddev->reshape_position == MaxSector) 3205 mddev->layout = n; 3206 } 3207 return len; 3208 } 3209 static struct md_sysfs_entry md_layout = 3210 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 3211 3212 3213 static ssize_t 3214 raid_disks_show(mddev_t *mddev, char *page) 3215 { 3216 if (mddev->raid_disks == 0) 3217 return 0; 3218 if (mddev->reshape_position != MaxSector && 3219 mddev->delta_disks != 0) 3220 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 3221 mddev->raid_disks - mddev->delta_disks); 3222 return sprintf(page, "%d\n", mddev->raid_disks); 3223 } 3224 3225 static int update_raid_disks(mddev_t *mddev, int raid_disks); 3226 3227 static ssize_t 3228 raid_disks_store(mddev_t *mddev, const char *buf, size_t len) 3229 { 3230 char *e; 3231 int rv = 0; 3232 unsigned long n = simple_strtoul(buf, &e, 10); 3233 3234 if (!*buf || (*e && *e != '\n')) 3235 return -EINVAL; 3236 3237 if (mddev->pers) 3238 rv = update_raid_disks(mddev, n); 3239 else if (mddev->reshape_position != MaxSector) { 3240 int olddisks = mddev->raid_disks - mddev->delta_disks; 3241 mddev->delta_disks = n - olddisks; 3242 mddev->raid_disks = n; 3243 } else 3244 mddev->raid_disks = n; 3245 return rv ? rv : len; 3246 } 3247 static struct md_sysfs_entry md_raid_disks = 3248 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 3249 3250 static ssize_t 3251 chunk_size_show(mddev_t *mddev, char *page) 3252 { 3253 if (mddev->reshape_position != MaxSector && 3254 mddev->chunk_sectors != mddev->new_chunk_sectors) 3255 return sprintf(page, "%d (%d)\n", 3256 mddev->new_chunk_sectors << 9, 3257 mddev->chunk_sectors << 9); 3258 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 3259 } 3260 3261 static ssize_t 3262 chunk_size_store(mddev_t *mddev, const char *buf, size_t len) 3263 { 3264 char *e; 3265 unsigned long n = simple_strtoul(buf, &e, 10); 3266 3267 if (!*buf || (*e && *e != '\n')) 3268 return -EINVAL; 3269 3270 if (mddev->pers) { 3271 int err; 3272 if (mddev->pers->check_reshape == NULL) 3273 return -EBUSY; 3274 mddev->new_chunk_sectors = n >> 9; 3275 err = mddev->pers->check_reshape(mddev); 3276 if (err) { 3277 mddev->new_chunk_sectors = mddev->chunk_sectors; 3278 return err; 3279 } 3280 } else { 3281 mddev->new_chunk_sectors = n >> 9; 3282 if (mddev->reshape_position == MaxSector) 3283 mddev->chunk_sectors = n >> 9; 3284 } 3285 return len; 3286 } 3287 static struct md_sysfs_entry md_chunk_size = 3288 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 3289 3290 static ssize_t 3291 resync_start_show(mddev_t *mddev, char *page) 3292 { 3293 if (mddev->recovery_cp == MaxSector) 3294 return sprintf(page, "none\n"); 3295 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 3296 } 3297 3298 static ssize_t 3299 resync_start_store(mddev_t *mddev, const char *buf, size_t len) 3300 { 3301 char *e; 3302 unsigned long long n = simple_strtoull(buf, &e, 10); 3303 3304 if (mddev->pers) 3305 return -EBUSY; 3306 if (cmd_match(buf, "none")) 3307 n = MaxSector; 3308 else if (!*buf || (*e && *e != '\n')) 3309 return -EINVAL; 3310 3311 mddev->recovery_cp = n; 3312 return len; 3313 } 3314 static struct md_sysfs_entry md_resync_start = 3315 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store); 3316 3317 /* 3318 * The array state can be: 3319 * 3320 * clear 3321 * No devices, no size, no level 3322 * Equivalent to STOP_ARRAY ioctl 3323 * inactive 3324 * May have some settings, but array is not active 3325 * all IO results in error 3326 * When written, doesn't tear down array, but just stops it 3327 * suspended (not supported yet) 3328 * All IO requests will block. The array can be reconfigured. 3329 * Writing this, if accepted, will block until array is quiescent 3330 * readonly 3331 * no resync can happen. no superblocks get written. 3332 * write requests fail 3333 * read-auto 3334 * like readonly, but behaves like 'clean' on a write request. 3335 * 3336 * clean - no pending writes, but otherwise active. 3337 * When written to inactive array, starts without resync 3338 * If a write request arrives then 3339 * if metadata is known, mark 'dirty' and switch to 'active'. 3340 * if not known, block and switch to write-pending 3341 * If written to an active array that has pending writes, then fails. 3342 * active 3343 * fully active: IO and resync can be happening. 3344 * When written to inactive array, starts with resync 3345 * 3346 * write-pending 3347 * clean, but writes are blocked waiting for 'active' to be written. 3348 * 3349 * active-idle 3350 * like active, but no writes have been seen for a while (100msec). 3351 * 3352 */ 3353 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 3354 write_pending, active_idle, bad_word}; 3355 static char *array_states[] = { 3356 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 3357 "write-pending", "active-idle", NULL }; 3358 3359 static int match_word(const char *word, char **list) 3360 { 3361 int n; 3362 for (n=0; list[n]; n++) 3363 if (cmd_match(word, list[n])) 3364 break; 3365 return n; 3366 } 3367 3368 static ssize_t 3369 array_state_show(mddev_t *mddev, char *page) 3370 { 3371 enum array_state st = inactive; 3372 3373 if (mddev->pers) 3374 switch(mddev->ro) { 3375 case 1: 3376 st = readonly; 3377 break; 3378 case 2: 3379 st = read_auto; 3380 break; 3381 case 0: 3382 if (mddev->in_sync) 3383 st = clean; 3384 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 3385 st = write_pending; 3386 else if (mddev->safemode) 3387 st = active_idle; 3388 else 3389 st = active; 3390 } 3391 else { 3392 if (list_empty(&mddev->disks) && 3393 mddev->raid_disks == 0 && 3394 mddev->dev_sectors == 0) 3395 st = clear; 3396 else 3397 st = inactive; 3398 } 3399 return sprintf(page, "%s\n", array_states[st]); 3400 } 3401 3402 static int do_md_stop(mddev_t * mddev, int ro, int is_open); 3403 static int md_set_readonly(mddev_t * mddev, int is_open); 3404 static int do_md_run(mddev_t * mddev); 3405 static int restart_array(mddev_t *mddev); 3406 3407 static ssize_t 3408 array_state_store(mddev_t *mddev, const char *buf, size_t len) 3409 { 3410 int err = -EINVAL; 3411 enum array_state st = match_word(buf, array_states); 3412 switch(st) { 3413 case bad_word: 3414 break; 3415 case clear: 3416 /* stopping an active array */ 3417 if (atomic_read(&mddev->openers) > 0) 3418 return -EBUSY; 3419 err = do_md_stop(mddev, 0, 0); 3420 break; 3421 case inactive: 3422 /* stopping an active array */ 3423 if (mddev->pers) { 3424 if (atomic_read(&mddev->openers) > 0) 3425 return -EBUSY; 3426 err = do_md_stop(mddev, 2, 0); 3427 } else 3428 err = 0; /* already inactive */ 3429 break; 3430 case suspended: 3431 break; /* not supported yet */ 3432 case readonly: 3433 if (mddev->pers) 3434 err = md_set_readonly(mddev, 0); 3435 else { 3436 mddev->ro = 1; 3437 set_disk_ro(mddev->gendisk, 1); 3438 err = do_md_run(mddev); 3439 } 3440 break; 3441 case read_auto: 3442 if (mddev->pers) { 3443 if (mddev->ro == 0) 3444 err = md_set_readonly(mddev, 0); 3445 else if (mddev->ro == 1) 3446 err = restart_array(mddev); 3447 if (err == 0) { 3448 mddev->ro = 2; 3449 set_disk_ro(mddev->gendisk, 0); 3450 } 3451 } else { 3452 mddev->ro = 2; 3453 err = do_md_run(mddev); 3454 } 3455 break; 3456 case clean: 3457 if (mddev->pers) { 3458 restart_array(mddev); 3459 spin_lock_irq(&mddev->write_lock); 3460 if (atomic_read(&mddev->writes_pending) == 0) { 3461 if (mddev->in_sync == 0) { 3462 mddev->in_sync = 1; 3463 if (mddev->safemode == 1) 3464 mddev->safemode = 0; 3465 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3466 } 3467 err = 0; 3468 } else 3469 err = -EBUSY; 3470 spin_unlock_irq(&mddev->write_lock); 3471 } else 3472 err = -EINVAL; 3473 break; 3474 case active: 3475 if (mddev->pers) { 3476 restart_array(mddev); 3477 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 3478 wake_up(&mddev->sb_wait); 3479 err = 0; 3480 } else { 3481 mddev->ro = 0; 3482 set_disk_ro(mddev->gendisk, 0); 3483 err = do_md_run(mddev); 3484 } 3485 break; 3486 case write_pending: 3487 case active_idle: 3488 /* these cannot be set */ 3489 break; 3490 } 3491 if (err) 3492 return err; 3493 else { 3494 sysfs_notify_dirent_safe(mddev->sysfs_state); 3495 return len; 3496 } 3497 } 3498 static struct md_sysfs_entry md_array_state = 3499 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3500 3501 static ssize_t 3502 max_corrected_read_errors_show(mddev_t *mddev, char *page) { 3503 return sprintf(page, "%d\n", 3504 atomic_read(&mddev->max_corr_read_errors)); 3505 } 3506 3507 static ssize_t 3508 max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len) 3509 { 3510 char *e; 3511 unsigned long n = simple_strtoul(buf, &e, 10); 3512 3513 if (*buf && (*e == 0 || *e == '\n')) { 3514 atomic_set(&mddev->max_corr_read_errors, n); 3515 return len; 3516 } 3517 return -EINVAL; 3518 } 3519 3520 static struct md_sysfs_entry max_corr_read_errors = 3521 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 3522 max_corrected_read_errors_store); 3523 3524 static ssize_t 3525 null_show(mddev_t *mddev, char *page) 3526 { 3527 return -EINVAL; 3528 } 3529 3530 static ssize_t 3531 new_dev_store(mddev_t *mddev, const char *buf, size_t len) 3532 { 3533 /* buf must be %d:%d\n? giving major and minor numbers */ 3534 /* The new device is added to the array. 3535 * If the array has a persistent superblock, we read the 3536 * superblock to initialise info and check validity. 3537 * Otherwise, only checking done is that in bind_rdev_to_array, 3538 * which mainly checks size. 3539 */ 3540 char *e; 3541 int major = simple_strtoul(buf, &e, 10); 3542 int minor; 3543 dev_t dev; 3544 mdk_rdev_t *rdev; 3545 int err; 3546 3547 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 3548 return -EINVAL; 3549 minor = simple_strtoul(e+1, &e, 10); 3550 if (*e && *e != '\n') 3551 return -EINVAL; 3552 dev = MKDEV(major, minor); 3553 if (major != MAJOR(dev) || 3554 minor != MINOR(dev)) 3555 return -EOVERFLOW; 3556 3557 3558 if (mddev->persistent) { 3559 rdev = md_import_device(dev, mddev->major_version, 3560 mddev->minor_version); 3561 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 3562 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 3563 mdk_rdev_t, same_set); 3564 err = super_types[mddev->major_version] 3565 .load_super(rdev, rdev0, mddev->minor_version); 3566 if (err < 0) 3567 goto out; 3568 } 3569 } else if (mddev->external) 3570 rdev = md_import_device(dev, -2, -1); 3571 else 3572 rdev = md_import_device(dev, -1, -1); 3573 3574 if (IS_ERR(rdev)) 3575 return PTR_ERR(rdev); 3576 err = bind_rdev_to_array(rdev, mddev); 3577 out: 3578 if (err) 3579 export_rdev(rdev); 3580 return err ? err : len; 3581 } 3582 3583 static struct md_sysfs_entry md_new_device = 3584 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 3585 3586 static ssize_t 3587 bitmap_store(mddev_t *mddev, const char *buf, size_t len) 3588 { 3589 char *end; 3590 unsigned long chunk, end_chunk; 3591 3592 if (!mddev->bitmap) 3593 goto out; 3594 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 3595 while (*buf) { 3596 chunk = end_chunk = simple_strtoul(buf, &end, 0); 3597 if (buf == end) break; 3598 if (*end == '-') { /* range */ 3599 buf = end + 1; 3600 end_chunk = simple_strtoul(buf, &end, 0); 3601 if (buf == end) break; 3602 } 3603 if (*end && !isspace(*end)) break; 3604 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 3605 buf = skip_spaces(end); 3606 } 3607 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 3608 out: 3609 return len; 3610 } 3611 3612 static struct md_sysfs_entry md_bitmap = 3613 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 3614 3615 static ssize_t 3616 size_show(mddev_t *mddev, char *page) 3617 { 3618 return sprintf(page, "%llu\n", 3619 (unsigned long long)mddev->dev_sectors / 2); 3620 } 3621 3622 static int update_size(mddev_t *mddev, sector_t num_sectors); 3623 3624 static ssize_t 3625 size_store(mddev_t *mddev, const char *buf, size_t len) 3626 { 3627 /* If array is inactive, we can reduce the component size, but 3628 * not increase it (except from 0). 3629 * If array is active, we can try an on-line resize 3630 */ 3631 sector_t sectors; 3632 int err = strict_blocks_to_sectors(buf, §ors); 3633 3634 if (err < 0) 3635 return err; 3636 if (mddev->pers) { 3637 err = update_size(mddev, sectors); 3638 md_update_sb(mddev, 1); 3639 } else { 3640 if (mddev->dev_sectors == 0 || 3641 mddev->dev_sectors > sectors) 3642 mddev->dev_sectors = sectors; 3643 else 3644 err = -ENOSPC; 3645 } 3646 return err ? err : len; 3647 } 3648 3649 static struct md_sysfs_entry md_size = 3650 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 3651 3652 3653 /* Metdata version. 3654 * This is one of 3655 * 'none' for arrays with no metadata (good luck...) 3656 * 'external' for arrays with externally managed metadata, 3657 * or N.M for internally known formats 3658 */ 3659 static ssize_t 3660 metadata_show(mddev_t *mddev, char *page) 3661 { 3662 if (mddev->persistent) 3663 return sprintf(page, "%d.%d\n", 3664 mddev->major_version, mddev->minor_version); 3665 else if (mddev->external) 3666 return sprintf(page, "external:%s\n", mddev->metadata_type); 3667 else 3668 return sprintf(page, "none\n"); 3669 } 3670 3671 static ssize_t 3672 metadata_store(mddev_t *mddev, const char *buf, size_t len) 3673 { 3674 int major, minor; 3675 char *e; 3676 /* Changing the details of 'external' metadata is 3677 * always permitted. Otherwise there must be 3678 * no devices attached to the array. 3679 */ 3680 if (mddev->external && strncmp(buf, "external:", 9) == 0) 3681 ; 3682 else if (!list_empty(&mddev->disks)) 3683 return -EBUSY; 3684 3685 if (cmd_match(buf, "none")) { 3686 mddev->persistent = 0; 3687 mddev->external = 0; 3688 mddev->major_version = 0; 3689 mddev->minor_version = 90; 3690 return len; 3691 } 3692 if (strncmp(buf, "external:", 9) == 0) { 3693 size_t namelen = len-9; 3694 if (namelen >= sizeof(mddev->metadata_type)) 3695 namelen = sizeof(mddev->metadata_type)-1; 3696 strncpy(mddev->metadata_type, buf+9, namelen); 3697 mddev->metadata_type[namelen] = 0; 3698 if (namelen && mddev->metadata_type[namelen-1] == '\n') 3699 mddev->metadata_type[--namelen] = 0; 3700 mddev->persistent = 0; 3701 mddev->external = 1; 3702 mddev->major_version = 0; 3703 mddev->minor_version = 90; 3704 return len; 3705 } 3706 major = simple_strtoul(buf, &e, 10); 3707 if (e==buf || *e != '.') 3708 return -EINVAL; 3709 buf = e+1; 3710 minor = simple_strtoul(buf, &e, 10); 3711 if (e==buf || (*e && *e != '\n') ) 3712 return -EINVAL; 3713 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 3714 return -ENOENT; 3715 mddev->major_version = major; 3716 mddev->minor_version = minor; 3717 mddev->persistent = 1; 3718 mddev->external = 0; 3719 return len; 3720 } 3721 3722 static struct md_sysfs_entry md_metadata = 3723 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 3724 3725 static ssize_t 3726 action_show(mddev_t *mddev, char *page) 3727 { 3728 char *type = "idle"; 3729 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 3730 type = "frozen"; 3731 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3732 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { 3733 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3734 type = "reshape"; 3735 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3736 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 3737 type = "resync"; 3738 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 3739 type = "check"; 3740 else 3741 type = "repair"; 3742 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 3743 type = "recover"; 3744 } 3745 return sprintf(page, "%s\n", type); 3746 } 3747 3748 static ssize_t 3749 action_store(mddev_t *mddev, const char *page, size_t len) 3750 { 3751 if (!mddev->pers || !mddev->pers->sync_request) 3752 return -EINVAL; 3753 3754 if (cmd_match(page, "frozen")) 3755 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 3756 else 3757 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 3758 3759 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 3760 if (mddev->sync_thread) { 3761 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 3762 md_unregister_thread(mddev->sync_thread); 3763 mddev->sync_thread = NULL; 3764 mddev->recovery = 0; 3765 } 3766 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3767 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 3768 return -EBUSY; 3769 else if (cmd_match(page, "resync")) 3770 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3771 else if (cmd_match(page, "recover")) { 3772 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 3773 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3774 } else if (cmd_match(page, "reshape")) { 3775 int err; 3776 if (mddev->pers->start_reshape == NULL) 3777 return -EINVAL; 3778 err = mddev->pers->start_reshape(mddev); 3779 if (err) 3780 return err; 3781 sysfs_notify(&mddev->kobj, NULL, "degraded"); 3782 } else { 3783 if (cmd_match(page, "check")) 3784 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3785 else if (!cmd_match(page, "repair")) 3786 return -EINVAL; 3787 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 3788 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3789 } 3790 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3791 md_wakeup_thread(mddev->thread); 3792 sysfs_notify_dirent_safe(mddev->sysfs_action); 3793 return len; 3794 } 3795 3796 static ssize_t 3797 mismatch_cnt_show(mddev_t *mddev, char *page) 3798 { 3799 return sprintf(page, "%llu\n", 3800 (unsigned long long) mddev->resync_mismatches); 3801 } 3802 3803 static struct md_sysfs_entry md_scan_mode = 3804 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 3805 3806 3807 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 3808 3809 static ssize_t 3810 sync_min_show(mddev_t *mddev, char *page) 3811 { 3812 return sprintf(page, "%d (%s)\n", speed_min(mddev), 3813 mddev->sync_speed_min ? "local": "system"); 3814 } 3815 3816 static ssize_t 3817 sync_min_store(mddev_t *mddev, const char *buf, size_t len) 3818 { 3819 int min; 3820 char *e; 3821 if (strncmp(buf, "system", 6)==0) { 3822 mddev->sync_speed_min = 0; 3823 return len; 3824 } 3825 min = simple_strtoul(buf, &e, 10); 3826 if (buf == e || (*e && *e != '\n') || min <= 0) 3827 return -EINVAL; 3828 mddev->sync_speed_min = min; 3829 return len; 3830 } 3831 3832 static struct md_sysfs_entry md_sync_min = 3833 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 3834 3835 static ssize_t 3836 sync_max_show(mddev_t *mddev, char *page) 3837 { 3838 return sprintf(page, "%d (%s)\n", speed_max(mddev), 3839 mddev->sync_speed_max ? "local": "system"); 3840 } 3841 3842 static ssize_t 3843 sync_max_store(mddev_t *mddev, const char *buf, size_t len) 3844 { 3845 int max; 3846 char *e; 3847 if (strncmp(buf, "system", 6)==0) { 3848 mddev->sync_speed_max = 0; 3849 return len; 3850 } 3851 max = simple_strtoul(buf, &e, 10); 3852 if (buf == e || (*e && *e != '\n') || max <= 0) 3853 return -EINVAL; 3854 mddev->sync_speed_max = max; 3855 return len; 3856 } 3857 3858 static struct md_sysfs_entry md_sync_max = 3859 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 3860 3861 static ssize_t 3862 degraded_show(mddev_t *mddev, char *page) 3863 { 3864 return sprintf(page, "%d\n", mddev->degraded); 3865 } 3866 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 3867 3868 static ssize_t 3869 sync_force_parallel_show(mddev_t *mddev, char *page) 3870 { 3871 return sprintf(page, "%d\n", mddev->parallel_resync); 3872 } 3873 3874 static ssize_t 3875 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len) 3876 { 3877 long n; 3878 3879 if (strict_strtol(buf, 10, &n)) 3880 return -EINVAL; 3881 3882 if (n != 0 && n != 1) 3883 return -EINVAL; 3884 3885 mddev->parallel_resync = n; 3886 3887 if (mddev->sync_thread) 3888 wake_up(&resync_wait); 3889 3890 return len; 3891 } 3892 3893 /* force parallel resync, even with shared block devices */ 3894 static struct md_sysfs_entry md_sync_force_parallel = 3895 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 3896 sync_force_parallel_show, sync_force_parallel_store); 3897 3898 static ssize_t 3899 sync_speed_show(mddev_t *mddev, char *page) 3900 { 3901 unsigned long resync, dt, db; 3902 if (mddev->curr_resync == 0) 3903 return sprintf(page, "none\n"); 3904 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 3905 dt = (jiffies - mddev->resync_mark) / HZ; 3906 if (!dt) dt++; 3907 db = resync - mddev->resync_mark_cnt; 3908 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 3909 } 3910 3911 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 3912 3913 static ssize_t 3914 sync_completed_show(mddev_t *mddev, char *page) 3915 { 3916 unsigned long max_sectors, resync; 3917 3918 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3919 return sprintf(page, "none\n"); 3920 3921 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 3922 max_sectors = mddev->resync_max_sectors; 3923 else 3924 max_sectors = mddev->dev_sectors; 3925 3926 resync = mddev->curr_resync_completed; 3927 return sprintf(page, "%lu / %lu\n", resync, max_sectors); 3928 } 3929 3930 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); 3931 3932 static ssize_t 3933 min_sync_show(mddev_t *mddev, char *page) 3934 { 3935 return sprintf(page, "%llu\n", 3936 (unsigned long long)mddev->resync_min); 3937 } 3938 static ssize_t 3939 min_sync_store(mddev_t *mddev, const char *buf, size_t len) 3940 { 3941 unsigned long long min; 3942 if (strict_strtoull(buf, 10, &min)) 3943 return -EINVAL; 3944 if (min > mddev->resync_max) 3945 return -EINVAL; 3946 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3947 return -EBUSY; 3948 3949 /* Must be a multiple of chunk_size */ 3950 if (mddev->chunk_sectors) { 3951 sector_t temp = min; 3952 if (sector_div(temp, mddev->chunk_sectors)) 3953 return -EINVAL; 3954 } 3955 mddev->resync_min = min; 3956 3957 return len; 3958 } 3959 3960 static struct md_sysfs_entry md_min_sync = 3961 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 3962 3963 static ssize_t 3964 max_sync_show(mddev_t *mddev, char *page) 3965 { 3966 if (mddev->resync_max == MaxSector) 3967 return sprintf(page, "max\n"); 3968 else 3969 return sprintf(page, "%llu\n", 3970 (unsigned long long)mddev->resync_max); 3971 } 3972 static ssize_t 3973 max_sync_store(mddev_t *mddev, const char *buf, size_t len) 3974 { 3975 if (strncmp(buf, "max", 3) == 0) 3976 mddev->resync_max = MaxSector; 3977 else { 3978 unsigned long long max; 3979 if (strict_strtoull(buf, 10, &max)) 3980 return -EINVAL; 3981 if (max < mddev->resync_min) 3982 return -EINVAL; 3983 if (max < mddev->resync_max && 3984 mddev->ro == 0 && 3985 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3986 return -EBUSY; 3987 3988 /* Must be a multiple of chunk_size */ 3989 if (mddev->chunk_sectors) { 3990 sector_t temp = max; 3991 if (sector_div(temp, mddev->chunk_sectors)) 3992 return -EINVAL; 3993 } 3994 mddev->resync_max = max; 3995 } 3996 wake_up(&mddev->recovery_wait); 3997 return len; 3998 } 3999 4000 static struct md_sysfs_entry md_max_sync = 4001 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 4002 4003 static ssize_t 4004 suspend_lo_show(mddev_t *mddev, char *page) 4005 { 4006 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 4007 } 4008 4009 static ssize_t 4010 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) 4011 { 4012 char *e; 4013 unsigned long long new = simple_strtoull(buf, &e, 10); 4014 4015 if (mddev->pers == NULL || 4016 mddev->pers->quiesce == NULL) 4017 return -EINVAL; 4018 if (buf == e || (*e && *e != '\n')) 4019 return -EINVAL; 4020 if (new >= mddev->suspend_hi || 4021 (new > mddev->suspend_lo && new < mddev->suspend_hi)) { 4022 mddev->suspend_lo = new; 4023 mddev->pers->quiesce(mddev, 2); 4024 return len; 4025 } else 4026 return -EINVAL; 4027 } 4028 static struct md_sysfs_entry md_suspend_lo = 4029 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 4030 4031 4032 static ssize_t 4033 suspend_hi_show(mddev_t *mddev, char *page) 4034 { 4035 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 4036 } 4037 4038 static ssize_t 4039 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) 4040 { 4041 char *e; 4042 unsigned long long new = simple_strtoull(buf, &e, 10); 4043 4044 if (mddev->pers == NULL || 4045 mddev->pers->quiesce == NULL) 4046 return -EINVAL; 4047 if (buf == e || (*e && *e != '\n')) 4048 return -EINVAL; 4049 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) || 4050 (new > mddev->suspend_lo && new > mddev->suspend_hi)) { 4051 mddev->suspend_hi = new; 4052 mddev->pers->quiesce(mddev, 1); 4053 mddev->pers->quiesce(mddev, 0); 4054 return len; 4055 } else 4056 return -EINVAL; 4057 } 4058 static struct md_sysfs_entry md_suspend_hi = 4059 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 4060 4061 static ssize_t 4062 reshape_position_show(mddev_t *mddev, char *page) 4063 { 4064 if (mddev->reshape_position != MaxSector) 4065 return sprintf(page, "%llu\n", 4066 (unsigned long long)mddev->reshape_position); 4067 strcpy(page, "none\n"); 4068 return 5; 4069 } 4070 4071 static ssize_t 4072 reshape_position_store(mddev_t *mddev, const char *buf, size_t len) 4073 { 4074 char *e; 4075 unsigned long long new = simple_strtoull(buf, &e, 10); 4076 if (mddev->pers) 4077 return -EBUSY; 4078 if (buf == e || (*e && *e != '\n')) 4079 return -EINVAL; 4080 mddev->reshape_position = new; 4081 mddev->delta_disks = 0; 4082 mddev->new_level = mddev->level; 4083 mddev->new_layout = mddev->layout; 4084 mddev->new_chunk_sectors = mddev->chunk_sectors; 4085 return len; 4086 } 4087 4088 static struct md_sysfs_entry md_reshape_position = 4089 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 4090 reshape_position_store); 4091 4092 static ssize_t 4093 array_size_show(mddev_t *mddev, char *page) 4094 { 4095 if (mddev->external_size) 4096 return sprintf(page, "%llu\n", 4097 (unsigned long long)mddev->array_sectors/2); 4098 else 4099 return sprintf(page, "default\n"); 4100 } 4101 4102 static ssize_t 4103 array_size_store(mddev_t *mddev, const char *buf, size_t len) 4104 { 4105 sector_t sectors; 4106 4107 if (strncmp(buf, "default", 7) == 0) { 4108 if (mddev->pers) 4109 sectors = mddev->pers->size(mddev, 0, 0); 4110 else 4111 sectors = mddev->array_sectors; 4112 4113 mddev->external_size = 0; 4114 } else { 4115 if (strict_blocks_to_sectors(buf, §ors) < 0) 4116 return -EINVAL; 4117 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 4118 return -E2BIG; 4119 4120 mddev->external_size = 1; 4121 } 4122 4123 mddev->array_sectors = sectors; 4124 set_capacity(mddev->gendisk, mddev->array_sectors); 4125 if (mddev->pers) 4126 revalidate_disk(mddev->gendisk); 4127 4128 return len; 4129 } 4130 4131 static struct md_sysfs_entry md_array_size = 4132 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 4133 array_size_store); 4134 4135 static struct attribute *md_default_attrs[] = { 4136 &md_level.attr, 4137 &md_layout.attr, 4138 &md_raid_disks.attr, 4139 &md_chunk_size.attr, 4140 &md_size.attr, 4141 &md_resync_start.attr, 4142 &md_metadata.attr, 4143 &md_new_device.attr, 4144 &md_safe_delay.attr, 4145 &md_array_state.attr, 4146 &md_reshape_position.attr, 4147 &md_array_size.attr, 4148 &max_corr_read_errors.attr, 4149 NULL, 4150 }; 4151 4152 static struct attribute *md_redundancy_attrs[] = { 4153 &md_scan_mode.attr, 4154 &md_mismatches.attr, 4155 &md_sync_min.attr, 4156 &md_sync_max.attr, 4157 &md_sync_speed.attr, 4158 &md_sync_force_parallel.attr, 4159 &md_sync_completed.attr, 4160 &md_min_sync.attr, 4161 &md_max_sync.attr, 4162 &md_suspend_lo.attr, 4163 &md_suspend_hi.attr, 4164 &md_bitmap.attr, 4165 &md_degraded.attr, 4166 NULL, 4167 }; 4168 static struct attribute_group md_redundancy_group = { 4169 .name = NULL, 4170 .attrs = md_redundancy_attrs, 4171 }; 4172 4173 4174 static ssize_t 4175 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 4176 { 4177 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4178 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 4179 ssize_t rv; 4180 4181 if (!entry->show) 4182 return -EIO; 4183 rv = mddev_lock(mddev); 4184 if (!rv) { 4185 rv = entry->show(mddev, page); 4186 mddev_unlock(mddev); 4187 } 4188 return rv; 4189 } 4190 4191 static ssize_t 4192 md_attr_store(struct kobject *kobj, struct attribute *attr, 4193 const char *page, size_t length) 4194 { 4195 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4196 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 4197 ssize_t rv; 4198 4199 if (!entry->store) 4200 return -EIO; 4201 if (!capable(CAP_SYS_ADMIN)) 4202 return -EACCES; 4203 rv = mddev_lock(mddev); 4204 if (mddev->hold_active == UNTIL_IOCTL) 4205 mddev->hold_active = 0; 4206 if (!rv) { 4207 rv = entry->store(mddev, page, length); 4208 mddev_unlock(mddev); 4209 } 4210 return rv; 4211 } 4212 4213 static void md_free(struct kobject *ko) 4214 { 4215 mddev_t *mddev = container_of(ko, mddev_t, kobj); 4216 4217 if (mddev->sysfs_state) 4218 sysfs_put(mddev->sysfs_state); 4219 4220 if (mddev->gendisk) { 4221 del_gendisk(mddev->gendisk); 4222 put_disk(mddev->gendisk); 4223 } 4224 if (mddev->queue) 4225 blk_cleanup_queue(mddev->queue); 4226 4227 kfree(mddev); 4228 } 4229 4230 static const struct sysfs_ops md_sysfs_ops = { 4231 .show = md_attr_show, 4232 .store = md_attr_store, 4233 }; 4234 static struct kobj_type md_ktype = { 4235 .release = md_free, 4236 .sysfs_ops = &md_sysfs_ops, 4237 .default_attrs = md_default_attrs, 4238 }; 4239 4240 int mdp_major = 0; 4241 4242 static void mddev_delayed_delete(struct work_struct *ws) 4243 { 4244 mddev_t *mddev = container_of(ws, mddev_t, del_work); 4245 4246 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); 4247 kobject_del(&mddev->kobj); 4248 kobject_put(&mddev->kobj); 4249 } 4250 4251 static int md_alloc(dev_t dev, char *name) 4252 { 4253 static DEFINE_MUTEX(disks_mutex); 4254 mddev_t *mddev = mddev_find(dev); 4255 struct gendisk *disk; 4256 int partitioned; 4257 int shift; 4258 int unit; 4259 int error; 4260 4261 if (!mddev) 4262 return -ENODEV; 4263 4264 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 4265 shift = partitioned ? MdpMinorShift : 0; 4266 unit = MINOR(mddev->unit) >> shift; 4267 4268 /* wait for any previous instance of this device to be 4269 * completely removed (mddev_delayed_delete). 4270 */ 4271 flush_workqueue(md_misc_wq); 4272 4273 mutex_lock(&disks_mutex); 4274 error = -EEXIST; 4275 if (mddev->gendisk) 4276 goto abort; 4277 4278 if (name) { 4279 /* Need to ensure that 'name' is not a duplicate. 4280 */ 4281 mddev_t *mddev2; 4282 spin_lock(&all_mddevs_lock); 4283 4284 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 4285 if (mddev2->gendisk && 4286 strcmp(mddev2->gendisk->disk_name, name) == 0) { 4287 spin_unlock(&all_mddevs_lock); 4288 goto abort; 4289 } 4290 spin_unlock(&all_mddevs_lock); 4291 } 4292 4293 error = -ENOMEM; 4294 mddev->queue = blk_alloc_queue(GFP_KERNEL); 4295 if (!mddev->queue) 4296 goto abort; 4297 mddev->queue->queuedata = mddev; 4298 4299 /* Can be unlocked because the queue is new: no concurrency */ 4300 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue); 4301 4302 blk_queue_make_request(mddev->queue, md_make_request); 4303 4304 disk = alloc_disk(1 << shift); 4305 if (!disk) { 4306 blk_cleanup_queue(mddev->queue); 4307 mddev->queue = NULL; 4308 goto abort; 4309 } 4310 disk->major = MAJOR(mddev->unit); 4311 disk->first_minor = unit << shift; 4312 if (name) 4313 strcpy(disk->disk_name, name); 4314 else if (partitioned) 4315 sprintf(disk->disk_name, "md_d%d", unit); 4316 else 4317 sprintf(disk->disk_name, "md%d", unit); 4318 disk->fops = &md_fops; 4319 disk->private_data = mddev; 4320 disk->queue = mddev->queue; 4321 /* Allow extended partitions. This makes the 4322 * 'mdp' device redundant, but we can't really 4323 * remove it now. 4324 */ 4325 disk->flags |= GENHD_FL_EXT_DEVT; 4326 add_disk(disk); 4327 mddev->gendisk = disk; 4328 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 4329 &disk_to_dev(disk)->kobj, "%s", "md"); 4330 if (error) { 4331 /* This isn't possible, but as kobject_init_and_add is marked 4332 * __must_check, we must do something with the result 4333 */ 4334 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 4335 disk->disk_name); 4336 error = 0; 4337 } 4338 if (mddev->kobj.sd && 4339 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) 4340 printk(KERN_DEBUG "pointless warning\n"); 4341 abort: 4342 mutex_unlock(&disks_mutex); 4343 if (!error && mddev->kobj.sd) { 4344 kobject_uevent(&mddev->kobj, KOBJ_ADD); 4345 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 4346 } 4347 mddev_put(mddev); 4348 return error; 4349 } 4350 4351 static struct kobject *md_probe(dev_t dev, int *part, void *data) 4352 { 4353 md_alloc(dev, NULL); 4354 return NULL; 4355 } 4356 4357 static int add_named_array(const char *val, struct kernel_param *kp) 4358 { 4359 /* val must be "md_*" where * is not all digits. 4360 * We allocate an array with a large free minor number, and 4361 * set the name to val. val must not already be an active name. 4362 */ 4363 int len = strlen(val); 4364 char buf[DISK_NAME_LEN]; 4365 4366 while (len && val[len-1] == '\n') 4367 len--; 4368 if (len >= DISK_NAME_LEN) 4369 return -E2BIG; 4370 strlcpy(buf, val, len+1); 4371 if (strncmp(buf, "md_", 3) != 0) 4372 return -EINVAL; 4373 return md_alloc(0, buf); 4374 } 4375 4376 static void md_safemode_timeout(unsigned long data) 4377 { 4378 mddev_t *mddev = (mddev_t *) data; 4379 4380 if (!atomic_read(&mddev->writes_pending)) { 4381 mddev->safemode = 1; 4382 if (mddev->external) 4383 sysfs_notify_dirent_safe(mddev->sysfs_state); 4384 } 4385 md_wakeup_thread(mddev->thread); 4386 } 4387 4388 static int start_dirty_degraded; 4389 4390 int md_run(mddev_t *mddev) 4391 { 4392 int err; 4393 mdk_rdev_t *rdev; 4394 struct mdk_personality *pers; 4395 4396 if (list_empty(&mddev->disks)) 4397 /* cannot run an array with no devices.. */ 4398 return -EINVAL; 4399 4400 if (mddev->pers) 4401 return -EBUSY; 4402 /* Cannot run until previous stop completes properly */ 4403 if (mddev->sysfs_active) 4404 return -EBUSY; 4405 4406 /* 4407 * Analyze all RAID superblock(s) 4408 */ 4409 if (!mddev->raid_disks) { 4410 if (!mddev->persistent) 4411 return -EINVAL; 4412 analyze_sbs(mddev); 4413 } 4414 4415 if (mddev->level != LEVEL_NONE) 4416 request_module("md-level-%d", mddev->level); 4417 else if (mddev->clevel[0]) 4418 request_module("md-%s", mddev->clevel); 4419 4420 /* 4421 * Drop all container device buffers, from now on 4422 * the only valid external interface is through the md 4423 * device. 4424 */ 4425 list_for_each_entry(rdev, &mddev->disks, same_set) { 4426 if (test_bit(Faulty, &rdev->flags)) 4427 continue; 4428 sync_blockdev(rdev->bdev); 4429 invalidate_bdev(rdev->bdev); 4430 4431 /* perform some consistency tests on the device. 4432 * We don't want the data to overlap the metadata, 4433 * Internal Bitmap issues have been handled elsewhere. 4434 */ 4435 if (rdev->data_offset < rdev->sb_start) { 4436 if (mddev->dev_sectors && 4437 rdev->data_offset + mddev->dev_sectors 4438 > rdev->sb_start) { 4439 printk("md: %s: data overlaps metadata\n", 4440 mdname(mddev)); 4441 return -EINVAL; 4442 } 4443 } else { 4444 if (rdev->sb_start + rdev->sb_size/512 4445 > rdev->data_offset) { 4446 printk("md: %s: metadata overlaps data\n", 4447 mdname(mddev)); 4448 return -EINVAL; 4449 } 4450 } 4451 sysfs_notify_dirent_safe(rdev->sysfs_state); 4452 } 4453 4454 if (mddev->bio_set == NULL) 4455 mddev->bio_set = bioset_create(BIO_POOL_SIZE, sizeof(mddev)); 4456 4457 spin_lock(&pers_lock); 4458 pers = find_pers(mddev->level, mddev->clevel); 4459 if (!pers || !try_module_get(pers->owner)) { 4460 spin_unlock(&pers_lock); 4461 if (mddev->level != LEVEL_NONE) 4462 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 4463 mddev->level); 4464 else 4465 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 4466 mddev->clevel); 4467 return -EINVAL; 4468 } 4469 mddev->pers = pers; 4470 spin_unlock(&pers_lock); 4471 if (mddev->level != pers->level) { 4472 mddev->level = pers->level; 4473 mddev->new_level = pers->level; 4474 } 4475 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 4476 4477 if (mddev->reshape_position != MaxSector && 4478 pers->start_reshape == NULL) { 4479 /* This personality cannot handle reshaping... */ 4480 mddev->pers = NULL; 4481 module_put(pers->owner); 4482 return -EINVAL; 4483 } 4484 4485 if (pers->sync_request) { 4486 /* Warn if this is a potentially silly 4487 * configuration. 4488 */ 4489 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 4490 mdk_rdev_t *rdev2; 4491 int warned = 0; 4492 4493 list_for_each_entry(rdev, &mddev->disks, same_set) 4494 list_for_each_entry(rdev2, &mddev->disks, same_set) { 4495 if (rdev < rdev2 && 4496 rdev->bdev->bd_contains == 4497 rdev2->bdev->bd_contains) { 4498 printk(KERN_WARNING 4499 "%s: WARNING: %s appears to be" 4500 " on the same physical disk as" 4501 " %s.\n", 4502 mdname(mddev), 4503 bdevname(rdev->bdev,b), 4504 bdevname(rdev2->bdev,b2)); 4505 warned = 1; 4506 } 4507 } 4508 4509 if (warned) 4510 printk(KERN_WARNING 4511 "True protection against single-disk" 4512 " failure might be compromised.\n"); 4513 } 4514 4515 mddev->recovery = 0; 4516 /* may be over-ridden by personality */ 4517 mddev->resync_max_sectors = mddev->dev_sectors; 4518 4519 mddev->ok_start_degraded = start_dirty_degraded; 4520 4521 if (start_readonly && mddev->ro == 0) 4522 mddev->ro = 2; /* read-only, but switch on first write */ 4523 4524 err = mddev->pers->run(mddev); 4525 if (err) 4526 printk(KERN_ERR "md: pers->run() failed ...\n"); 4527 else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) { 4528 WARN_ONCE(!mddev->external_size, "%s: default size too small," 4529 " but 'external_size' not in effect?\n", __func__); 4530 printk(KERN_ERR 4531 "md: invalid array_size %llu > default size %llu\n", 4532 (unsigned long long)mddev->array_sectors / 2, 4533 (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2); 4534 err = -EINVAL; 4535 mddev->pers->stop(mddev); 4536 } 4537 if (err == 0 && mddev->pers->sync_request) { 4538 err = bitmap_create(mddev); 4539 if (err) { 4540 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 4541 mdname(mddev), err); 4542 mddev->pers->stop(mddev); 4543 } 4544 } 4545 if (err) { 4546 module_put(mddev->pers->owner); 4547 mddev->pers = NULL; 4548 bitmap_destroy(mddev); 4549 return err; 4550 } 4551 if (mddev->pers->sync_request) { 4552 if (mddev->kobj.sd && 4553 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 4554 printk(KERN_WARNING 4555 "md: cannot register extra attributes for %s\n", 4556 mdname(mddev)); 4557 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 4558 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 4559 mddev->ro = 0; 4560 4561 atomic_set(&mddev->writes_pending,0); 4562 atomic_set(&mddev->max_corr_read_errors, 4563 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 4564 mddev->safemode = 0; 4565 mddev->safemode_timer.function = md_safemode_timeout; 4566 mddev->safemode_timer.data = (unsigned long) mddev; 4567 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 4568 mddev->in_sync = 1; 4569 4570 list_for_each_entry(rdev, &mddev->disks, same_set) 4571 if (rdev->raid_disk >= 0) { 4572 char nm[20]; 4573 sprintf(nm, "rd%d", rdev->raid_disk); 4574 if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) 4575 /* failure here is OK */; 4576 } 4577 4578 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4579 4580 if (mddev->flags) 4581 md_update_sb(mddev, 0); 4582 4583 md_wakeup_thread(mddev->thread); 4584 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 4585 4586 md_new_event(mddev); 4587 sysfs_notify_dirent_safe(mddev->sysfs_state); 4588 sysfs_notify_dirent_safe(mddev->sysfs_action); 4589 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4590 return 0; 4591 } 4592 EXPORT_SYMBOL_GPL(md_run); 4593 4594 static int do_md_run(mddev_t *mddev) 4595 { 4596 int err; 4597 4598 err = md_run(mddev); 4599 if (err) 4600 goto out; 4601 err = bitmap_load(mddev); 4602 if (err) { 4603 bitmap_destroy(mddev); 4604 goto out; 4605 } 4606 set_capacity(mddev->gendisk, mddev->array_sectors); 4607 revalidate_disk(mddev->gendisk); 4608 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4609 out: 4610 return err; 4611 } 4612 4613 static int restart_array(mddev_t *mddev) 4614 { 4615 struct gendisk *disk = mddev->gendisk; 4616 4617 /* Complain if it has no devices */ 4618 if (list_empty(&mddev->disks)) 4619 return -ENXIO; 4620 if (!mddev->pers) 4621 return -EINVAL; 4622 if (!mddev->ro) 4623 return -EBUSY; 4624 mddev->safemode = 0; 4625 mddev->ro = 0; 4626 set_disk_ro(disk, 0); 4627 printk(KERN_INFO "md: %s switched to read-write mode.\n", 4628 mdname(mddev)); 4629 /* Kick recovery or resync if necessary */ 4630 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4631 md_wakeup_thread(mddev->thread); 4632 md_wakeup_thread(mddev->sync_thread); 4633 sysfs_notify_dirent_safe(mddev->sysfs_state); 4634 return 0; 4635 } 4636 4637 /* similar to deny_write_access, but accounts for our holding a reference 4638 * to the file ourselves */ 4639 static int deny_bitmap_write_access(struct file * file) 4640 { 4641 struct inode *inode = file->f_mapping->host; 4642 4643 spin_lock(&inode->i_lock); 4644 if (atomic_read(&inode->i_writecount) > 1) { 4645 spin_unlock(&inode->i_lock); 4646 return -ETXTBSY; 4647 } 4648 atomic_set(&inode->i_writecount, -1); 4649 spin_unlock(&inode->i_lock); 4650 4651 return 0; 4652 } 4653 4654 void restore_bitmap_write_access(struct file *file) 4655 { 4656 struct inode *inode = file->f_mapping->host; 4657 4658 spin_lock(&inode->i_lock); 4659 atomic_set(&inode->i_writecount, 1); 4660 spin_unlock(&inode->i_lock); 4661 } 4662 4663 static void md_clean(mddev_t *mddev) 4664 { 4665 mddev->array_sectors = 0; 4666 mddev->external_size = 0; 4667 mddev->dev_sectors = 0; 4668 mddev->raid_disks = 0; 4669 mddev->recovery_cp = 0; 4670 mddev->resync_min = 0; 4671 mddev->resync_max = MaxSector; 4672 mddev->reshape_position = MaxSector; 4673 mddev->external = 0; 4674 mddev->persistent = 0; 4675 mddev->level = LEVEL_NONE; 4676 mddev->clevel[0] = 0; 4677 mddev->flags = 0; 4678 mddev->ro = 0; 4679 mddev->metadata_type[0] = 0; 4680 mddev->chunk_sectors = 0; 4681 mddev->ctime = mddev->utime = 0; 4682 mddev->layout = 0; 4683 mddev->max_disks = 0; 4684 mddev->events = 0; 4685 mddev->can_decrease_events = 0; 4686 mddev->delta_disks = 0; 4687 mddev->new_level = LEVEL_NONE; 4688 mddev->new_layout = 0; 4689 mddev->new_chunk_sectors = 0; 4690 mddev->curr_resync = 0; 4691 mddev->resync_mismatches = 0; 4692 mddev->suspend_lo = mddev->suspend_hi = 0; 4693 mddev->sync_speed_min = mddev->sync_speed_max = 0; 4694 mddev->recovery = 0; 4695 mddev->in_sync = 0; 4696 mddev->degraded = 0; 4697 mddev->safemode = 0; 4698 mddev->bitmap_info.offset = 0; 4699 mddev->bitmap_info.default_offset = 0; 4700 mddev->bitmap_info.chunksize = 0; 4701 mddev->bitmap_info.daemon_sleep = 0; 4702 mddev->bitmap_info.max_write_behind = 0; 4703 mddev->plug = NULL; 4704 } 4705 4706 void md_stop_writes(mddev_t *mddev) 4707 { 4708 if (mddev->sync_thread) { 4709 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4710 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4711 md_unregister_thread(mddev->sync_thread); 4712 mddev->sync_thread = NULL; 4713 } 4714 4715 del_timer_sync(&mddev->safemode_timer); 4716 4717 bitmap_flush(mddev); 4718 md_super_wait(mddev); 4719 4720 if (!mddev->in_sync || mddev->flags) { 4721 /* mark array as shutdown cleanly */ 4722 mddev->in_sync = 1; 4723 md_update_sb(mddev, 1); 4724 } 4725 } 4726 EXPORT_SYMBOL_GPL(md_stop_writes); 4727 4728 void md_stop(mddev_t *mddev) 4729 { 4730 mddev->pers->stop(mddev); 4731 if (mddev->pers->sync_request && mddev->to_remove == NULL) 4732 mddev->to_remove = &md_redundancy_group; 4733 module_put(mddev->pers->owner); 4734 mddev->pers = NULL; 4735 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4736 } 4737 EXPORT_SYMBOL_GPL(md_stop); 4738 4739 static int md_set_readonly(mddev_t *mddev, int is_open) 4740 { 4741 int err = 0; 4742 mutex_lock(&mddev->open_mutex); 4743 if (atomic_read(&mddev->openers) > is_open) { 4744 printk("md: %s still in use.\n",mdname(mddev)); 4745 err = -EBUSY; 4746 goto out; 4747 } 4748 if (mddev->pers) { 4749 md_stop_writes(mddev); 4750 4751 err = -ENXIO; 4752 if (mddev->ro==1) 4753 goto out; 4754 mddev->ro = 1; 4755 set_disk_ro(mddev->gendisk, 1); 4756 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4757 sysfs_notify_dirent_safe(mddev->sysfs_state); 4758 err = 0; 4759 } 4760 out: 4761 mutex_unlock(&mddev->open_mutex); 4762 return err; 4763 } 4764 4765 /* mode: 4766 * 0 - completely stop and dis-assemble array 4767 * 2 - stop but do not disassemble array 4768 */ 4769 static int do_md_stop(mddev_t * mddev, int mode, int is_open) 4770 { 4771 struct gendisk *disk = mddev->gendisk; 4772 mdk_rdev_t *rdev; 4773 4774 mutex_lock(&mddev->open_mutex); 4775 if (atomic_read(&mddev->openers) > is_open || 4776 mddev->sysfs_active) { 4777 printk("md: %s still in use.\n",mdname(mddev)); 4778 mutex_unlock(&mddev->open_mutex); 4779 return -EBUSY; 4780 } 4781 4782 if (mddev->pers) { 4783 if (mddev->ro) 4784 set_disk_ro(disk, 0); 4785 4786 md_stop_writes(mddev); 4787 md_stop(mddev); 4788 mddev->queue->merge_bvec_fn = NULL; 4789 mddev->queue->unplug_fn = NULL; 4790 mddev->queue->backing_dev_info.congested_fn = NULL; 4791 4792 /* tell userspace to handle 'inactive' */ 4793 sysfs_notify_dirent_safe(mddev->sysfs_state); 4794 4795 list_for_each_entry(rdev, &mddev->disks, same_set) 4796 if (rdev->raid_disk >= 0) { 4797 char nm[20]; 4798 sprintf(nm, "rd%d", rdev->raid_disk); 4799 sysfs_remove_link(&mddev->kobj, nm); 4800 } 4801 4802 set_capacity(disk, 0); 4803 mutex_unlock(&mddev->open_mutex); 4804 revalidate_disk(disk); 4805 4806 if (mddev->ro) 4807 mddev->ro = 0; 4808 } else 4809 mutex_unlock(&mddev->open_mutex); 4810 /* 4811 * Free resources if final stop 4812 */ 4813 if (mode == 0) { 4814 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 4815 4816 bitmap_destroy(mddev); 4817 if (mddev->bitmap_info.file) { 4818 restore_bitmap_write_access(mddev->bitmap_info.file); 4819 fput(mddev->bitmap_info.file); 4820 mddev->bitmap_info.file = NULL; 4821 } 4822 mddev->bitmap_info.offset = 0; 4823 4824 export_array(mddev); 4825 4826 md_clean(mddev); 4827 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4828 if (mddev->hold_active == UNTIL_STOP) 4829 mddev->hold_active = 0; 4830 } 4831 blk_integrity_unregister(disk); 4832 md_new_event(mddev); 4833 sysfs_notify_dirent_safe(mddev->sysfs_state); 4834 return 0; 4835 } 4836 4837 #ifndef MODULE 4838 static void autorun_array(mddev_t *mddev) 4839 { 4840 mdk_rdev_t *rdev; 4841 int err; 4842 4843 if (list_empty(&mddev->disks)) 4844 return; 4845 4846 printk(KERN_INFO "md: running: "); 4847 4848 list_for_each_entry(rdev, &mddev->disks, same_set) { 4849 char b[BDEVNAME_SIZE]; 4850 printk("<%s>", bdevname(rdev->bdev,b)); 4851 } 4852 printk("\n"); 4853 4854 err = do_md_run(mddev); 4855 if (err) { 4856 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 4857 do_md_stop(mddev, 0, 0); 4858 } 4859 } 4860 4861 /* 4862 * lets try to run arrays based on all disks that have arrived 4863 * until now. (those are in pending_raid_disks) 4864 * 4865 * the method: pick the first pending disk, collect all disks with 4866 * the same UUID, remove all from the pending list and put them into 4867 * the 'same_array' list. Then order this list based on superblock 4868 * update time (freshest comes first), kick out 'old' disks and 4869 * compare superblocks. If everything's fine then run it. 4870 * 4871 * If "unit" is allocated, then bump its reference count 4872 */ 4873 static void autorun_devices(int part) 4874 { 4875 mdk_rdev_t *rdev0, *rdev, *tmp; 4876 mddev_t *mddev; 4877 char b[BDEVNAME_SIZE]; 4878 4879 printk(KERN_INFO "md: autorun ...\n"); 4880 while (!list_empty(&pending_raid_disks)) { 4881 int unit; 4882 dev_t dev; 4883 LIST_HEAD(candidates); 4884 rdev0 = list_entry(pending_raid_disks.next, 4885 mdk_rdev_t, same_set); 4886 4887 printk(KERN_INFO "md: considering %s ...\n", 4888 bdevname(rdev0->bdev,b)); 4889 INIT_LIST_HEAD(&candidates); 4890 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 4891 if (super_90_load(rdev, rdev0, 0) >= 0) { 4892 printk(KERN_INFO "md: adding %s ...\n", 4893 bdevname(rdev->bdev,b)); 4894 list_move(&rdev->same_set, &candidates); 4895 } 4896 /* 4897 * now we have a set of devices, with all of them having 4898 * mostly sane superblocks. It's time to allocate the 4899 * mddev. 4900 */ 4901 if (part) { 4902 dev = MKDEV(mdp_major, 4903 rdev0->preferred_minor << MdpMinorShift); 4904 unit = MINOR(dev) >> MdpMinorShift; 4905 } else { 4906 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 4907 unit = MINOR(dev); 4908 } 4909 if (rdev0->preferred_minor != unit) { 4910 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 4911 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 4912 break; 4913 } 4914 4915 md_probe(dev, NULL, NULL); 4916 mddev = mddev_find(dev); 4917 if (!mddev || !mddev->gendisk) { 4918 if (mddev) 4919 mddev_put(mddev); 4920 printk(KERN_ERR 4921 "md: cannot allocate memory for md drive.\n"); 4922 break; 4923 } 4924 if (mddev_lock(mddev)) 4925 printk(KERN_WARNING "md: %s locked, cannot run\n", 4926 mdname(mddev)); 4927 else if (mddev->raid_disks || mddev->major_version 4928 || !list_empty(&mddev->disks)) { 4929 printk(KERN_WARNING 4930 "md: %s already running, cannot run %s\n", 4931 mdname(mddev), bdevname(rdev0->bdev,b)); 4932 mddev_unlock(mddev); 4933 } else { 4934 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 4935 mddev->persistent = 1; 4936 rdev_for_each_list(rdev, tmp, &candidates) { 4937 list_del_init(&rdev->same_set); 4938 if (bind_rdev_to_array(rdev, mddev)) 4939 export_rdev(rdev); 4940 } 4941 autorun_array(mddev); 4942 mddev_unlock(mddev); 4943 } 4944 /* on success, candidates will be empty, on error 4945 * it won't... 4946 */ 4947 rdev_for_each_list(rdev, tmp, &candidates) { 4948 list_del_init(&rdev->same_set); 4949 export_rdev(rdev); 4950 } 4951 mddev_put(mddev); 4952 } 4953 printk(KERN_INFO "md: ... autorun DONE.\n"); 4954 } 4955 #endif /* !MODULE */ 4956 4957 static int get_version(void __user * arg) 4958 { 4959 mdu_version_t ver; 4960 4961 ver.major = MD_MAJOR_VERSION; 4962 ver.minor = MD_MINOR_VERSION; 4963 ver.patchlevel = MD_PATCHLEVEL_VERSION; 4964 4965 if (copy_to_user(arg, &ver, sizeof(ver))) 4966 return -EFAULT; 4967 4968 return 0; 4969 } 4970 4971 static int get_array_info(mddev_t * mddev, void __user * arg) 4972 { 4973 mdu_array_info_t info; 4974 int nr,working,insync,failed,spare; 4975 mdk_rdev_t *rdev; 4976 4977 nr=working=insync=failed=spare=0; 4978 list_for_each_entry(rdev, &mddev->disks, same_set) { 4979 nr++; 4980 if (test_bit(Faulty, &rdev->flags)) 4981 failed++; 4982 else { 4983 working++; 4984 if (test_bit(In_sync, &rdev->flags)) 4985 insync++; 4986 else 4987 spare++; 4988 } 4989 } 4990 4991 info.major_version = mddev->major_version; 4992 info.minor_version = mddev->minor_version; 4993 info.patch_version = MD_PATCHLEVEL_VERSION; 4994 info.ctime = mddev->ctime; 4995 info.level = mddev->level; 4996 info.size = mddev->dev_sectors / 2; 4997 if (info.size != mddev->dev_sectors / 2) /* overflow */ 4998 info.size = -1; 4999 info.nr_disks = nr; 5000 info.raid_disks = mddev->raid_disks; 5001 info.md_minor = mddev->md_minor; 5002 info.not_persistent= !mddev->persistent; 5003 5004 info.utime = mddev->utime; 5005 info.state = 0; 5006 if (mddev->in_sync) 5007 info.state = (1<<MD_SB_CLEAN); 5008 if (mddev->bitmap && mddev->bitmap_info.offset) 5009 info.state = (1<<MD_SB_BITMAP_PRESENT); 5010 info.active_disks = insync; 5011 info.working_disks = working; 5012 info.failed_disks = failed; 5013 info.spare_disks = spare; 5014 5015 info.layout = mddev->layout; 5016 info.chunk_size = mddev->chunk_sectors << 9; 5017 5018 if (copy_to_user(arg, &info, sizeof(info))) 5019 return -EFAULT; 5020 5021 return 0; 5022 } 5023 5024 static int get_bitmap_file(mddev_t * mddev, void __user * arg) 5025 { 5026 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 5027 char *ptr, *buf = NULL; 5028 int err = -ENOMEM; 5029 5030 if (md_allow_write(mddev)) 5031 file = kmalloc(sizeof(*file), GFP_NOIO); 5032 else 5033 file = kmalloc(sizeof(*file), GFP_KERNEL); 5034 5035 if (!file) 5036 goto out; 5037 5038 /* bitmap disabled, zero the first byte and copy out */ 5039 if (!mddev->bitmap || !mddev->bitmap->file) { 5040 file->pathname[0] = '\0'; 5041 goto copy_out; 5042 } 5043 5044 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL); 5045 if (!buf) 5046 goto out; 5047 5048 ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname)); 5049 if (IS_ERR(ptr)) 5050 goto out; 5051 5052 strcpy(file->pathname, ptr); 5053 5054 copy_out: 5055 err = 0; 5056 if (copy_to_user(arg, file, sizeof(*file))) 5057 err = -EFAULT; 5058 out: 5059 kfree(buf); 5060 kfree(file); 5061 return err; 5062 } 5063 5064 static int get_disk_info(mddev_t * mddev, void __user * arg) 5065 { 5066 mdu_disk_info_t info; 5067 mdk_rdev_t *rdev; 5068 5069 if (copy_from_user(&info, arg, sizeof(info))) 5070 return -EFAULT; 5071 5072 rdev = find_rdev_nr(mddev, info.number); 5073 if (rdev) { 5074 info.major = MAJOR(rdev->bdev->bd_dev); 5075 info.minor = MINOR(rdev->bdev->bd_dev); 5076 info.raid_disk = rdev->raid_disk; 5077 info.state = 0; 5078 if (test_bit(Faulty, &rdev->flags)) 5079 info.state |= (1<<MD_DISK_FAULTY); 5080 else if (test_bit(In_sync, &rdev->flags)) { 5081 info.state |= (1<<MD_DISK_ACTIVE); 5082 info.state |= (1<<MD_DISK_SYNC); 5083 } 5084 if (test_bit(WriteMostly, &rdev->flags)) 5085 info.state |= (1<<MD_DISK_WRITEMOSTLY); 5086 } else { 5087 info.major = info.minor = 0; 5088 info.raid_disk = -1; 5089 info.state = (1<<MD_DISK_REMOVED); 5090 } 5091 5092 if (copy_to_user(arg, &info, sizeof(info))) 5093 return -EFAULT; 5094 5095 return 0; 5096 } 5097 5098 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) 5099 { 5100 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5101 mdk_rdev_t *rdev; 5102 dev_t dev = MKDEV(info->major,info->minor); 5103 5104 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 5105 return -EOVERFLOW; 5106 5107 if (!mddev->raid_disks) { 5108 int err; 5109 /* expecting a device which has a superblock */ 5110 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 5111 if (IS_ERR(rdev)) { 5112 printk(KERN_WARNING 5113 "md: md_import_device returned %ld\n", 5114 PTR_ERR(rdev)); 5115 return PTR_ERR(rdev); 5116 } 5117 if (!list_empty(&mddev->disks)) { 5118 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 5119 mdk_rdev_t, same_set); 5120 err = super_types[mddev->major_version] 5121 .load_super(rdev, rdev0, mddev->minor_version); 5122 if (err < 0) { 5123 printk(KERN_WARNING 5124 "md: %s has different UUID to %s\n", 5125 bdevname(rdev->bdev,b), 5126 bdevname(rdev0->bdev,b2)); 5127 export_rdev(rdev); 5128 return -EINVAL; 5129 } 5130 } 5131 err = bind_rdev_to_array(rdev, mddev); 5132 if (err) 5133 export_rdev(rdev); 5134 return err; 5135 } 5136 5137 /* 5138 * add_new_disk can be used once the array is assembled 5139 * to add "hot spares". They must already have a superblock 5140 * written 5141 */ 5142 if (mddev->pers) { 5143 int err; 5144 if (!mddev->pers->hot_add_disk) { 5145 printk(KERN_WARNING 5146 "%s: personality does not support diskops!\n", 5147 mdname(mddev)); 5148 return -EINVAL; 5149 } 5150 if (mddev->persistent) 5151 rdev = md_import_device(dev, mddev->major_version, 5152 mddev->minor_version); 5153 else 5154 rdev = md_import_device(dev, -1, -1); 5155 if (IS_ERR(rdev)) { 5156 printk(KERN_WARNING 5157 "md: md_import_device returned %ld\n", 5158 PTR_ERR(rdev)); 5159 return PTR_ERR(rdev); 5160 } 5161 /* set save_raid_disk if appropriate */ 5162 if (!mddev->persistent) { 5163 if (info->state & (1<<MD_DISK_SYNC) && 5164 info->raid_disk < mddev->raid_disks) 5165 rdev->raid_disk = info->raid_disk; 5166 else 5167 rdev->raid_disk = -1; 5168 } else 5169 super_types[mddev->major_version]. 5170 validate_super(mddev, rdev); 5171 rdev->saved_raid_disk = rdev->raid_disk; 5172 5173 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 5174 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5175 set_bit(WriteMostly, &rdev->flags); 5176 else 5177 clear_bit(WriteMostly, &rdev->flags); 5178 5179 rdev->raid_disk = -1; 5180 err = bind_rdev_to_array(rdev, mddev); 5181 if (!err && !mddev->pers->hot_remove_disk) { 5182 /* If there is hot_add_disk but no hot_remove_disk 5183 * then added disks for geometry changes, 5184 * and should be added immediately. 5185 */ 5186 super_types[mddev->major_version]. 5187 validate_super(mddev, rdev); 5188 err = mddev->pers->hot_add_disk(mddev, rdev); 5189 if (err) 5190 unbind_rdev_from_array(rdev); 5191 } 5192 if (err) 5193 export_rdev(rdev); 5194 else 5195 sysfs_notify_dirent_safe(rdev->sysfs_state); 5196 5197 md_update_sb(mddev, 1); 5198 if (mddev->degraded) 5199 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5200 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5201 md_wakeup_thread(mddev->thread); 5202 return err; 5203 } 5204 5205 /* otherwise, add_new_disk is only allowed 5206 * for major_version==0 superblocks 5207 */ 5208 if (mddev->major_version != 0) { 5209 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 5210 mdname(mddev)); 5211 return -EINVAL; 5212 } 5213 5214 if (!(info->state & (1<<MD_DISK_FAULTY))) { 5215 int err; 5216 rdev = md_import_device(dev, -1, 0); 5217 if (IS_ERR(rdev)) { 5218 printk(KERN_WARNING 5219 "md: error, md_import_device() returned %ld\n", 5220 PTR_ERR(rdev)); 5221 return PTR_ERR(rdev); 5222 } 5223 rdev->desc_nr = info->number; 5224 if (info->raid_disk < mddev->raid_disks) 5225 rdev->raid_disk = info->raid_disk; 5226 else 5227 rdev->raid_disk = -1; 5228 5229 if (rdev->raid_disk < mddev->raid_disks) 5230 if (info->state & (1<<MD_DISK_SYNC)) 5231 set_bit(In_sync, &rdev->flags); 5232 5233 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5234 set_bit(WriteMostly, &rdev->flags); 5235 5236 if (!mddev->persistent) { 5237 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 5238 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 5239 } else 5240 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 5241 rdev->sectors = rdev->sb_start; 5242 5243 err = bind_rdev_to_array(rdev, mddev); 5244 if (err) { 5245 export_rdev(rdev); 5246 return err; 5247 } 5248 } 5249 5250 return 0; 5251 } 5252 5253 static int hot_remove_disk(mddev_t * mddev, dev_t dev) 5254 { 5255 char b[BDEVNAME_SIZE]; 5256 mdk_rdev_t *rdev; 5257 5258 rdev = find_rdev(mddev, dev); 5259 if (!rdev) 5260 return -ENXIO; 5261 5262 if (rdev->raid_disk >= 0) 5263 goto busy; 5264 5265 kick_rdev_from_array(rdev); 5266 md_update_sb(mddev, 1); 5267 md_new_event(mddev); 5268 5269 return 0; 5270 busy: 5271 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", 5272 bdevname(rdev->bdev,b), mdname(mddev)); 5273 return -EBUSY; 5274 } 5275 5276 static int hot_add_disk(mddev_t * mddev, dev_t dev) 5277 { 5278 char b[BDEVNAME_SIZE]; 5279 int err; 5280 mdk_rdev_t *rdev; 5281 5282 if (!mddev->pers) 5283 return -ENODEV; 5284 5285 if (mddev->major_version != 0) { 5286 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 5287 " version-0 superblocks.\n", 5288 mdname(mddev)); 5289 return -EINVAL; 5290 } 5291 if (!mddev->pers->hot_add_disk) { 5292 printk(KERN_WARNING 5293 "%s: personality does not support diskops!\n", 5294 mdname(mddev)); 5295 return -EINVAL; 5296 } 5297 5298 rdev = md_import_device(dev, -1, 0); 5299 if (IS_ERR(rdev)) { 5300 printk(KERN_WARNING 5301 "md: error, md_import_device() returned %ld\n", 5302 PTR_ERR(rdev)); 5303 return -EINVAL; 5304 } 5305 5306 if (mddev->persistent) 5307 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 5308 else 5309 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 5310 5311 rdev->sectors = rdev->sb_start; 5312 5313 if (test_bit(Faulty, &rdev->flags)) { 5314 printk(KERN_WARNING 5315 "md: can not hot-add faulty %s disk to %s!\n", 5316 bdevname(rdev->bdev,b), mdname(mddev)); 5317 err = -EINVAL; 5318 goto abort_export; 5319 } 5320 clear_bit(In_sync, &rdev->flags); 5321 rdev->desc_nr = -1; 5322 rdev->saved_raid_disk = -1; 5323 err = bind_rdev_to_array(rdev, mddev); 5324 if (err) 5325 goto abort_export; 5326 5327 /* 5328 * The rest should better be atomic, we can have disk failures 5329 * noticed in interrupt contexts ... 5330 */ 5331 5332 rdev->raid_disk = -1; 5333 5334 md_update_sb(mddev, 1); 5335 5336 /* 5337 * Kick recovery, maybe this spare has to be added to the 5338 * array immediately. 5339 */ 5340 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5341 md_wakeup_thread(mddev->thread); 5342 md_new_event(mddev); 5343 return 0; 5344 5345 abort_export: 5346 export_rdev(rdev); 5347 return err; 5348 } 5349 5350 static int set_bitmap_file(mddev_t *mddev, int fd) 5351 { 5352 int err; 5353 5354 if (mddev->pers) { 5355 if (!mddev->pers->quiesce) 5356 return -EBUSY; 5357 if (mddev->recovery || mddev->sync_thread) 5358 return -EBUSY; 5359 /* we should be able to change the bitmap.. */ 5360 } 5361 5362 5363 if (fd >= 0) { 5364 if (mddev->bitmap) 5365 return -EEXIST; /* cannot add when bitmap is present */ 5366 mddev->bitmap_info.file = fget(fd); 5367 5368 if (mddev->bitmap_info.file == NULL) { 5369 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 5370 mdname(mddev)); 5371 return -EBADF; 5372 } 5373 5374 err = deny_bitmap_write_access(mddev->bitmap_info.file); 5375 if (err) { 5376 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 5377 mdname(mddev)); 5378 fput(mddev->bitmap_info.file); 5379 mddev->bitmap_info.file = NULL; 5380 return err; 5381 } 5382 mddev->bitmap_info.offset = 0; /* file overrides offset */ 5383 } else if (mddev->bitmap == NULL) 5384 return -ENOENT; /* cannot remove what isn't there */ 5385 err = 0; 5386 if (mddev->pers) { 5387 mddev->pers->quiesce(mddev, 1); 5388 if (fd >= 0) { 5389 err = bitmap_create(mddev); 5390 if (!err) 5391 err = bitmap_load(mddev); 5392 } 5393 if (fd < 0 || err) { 5394 bitmap_destroy(mddev); 5395 fd = -1; /* make sure to put the file */ 5396 } 5397 mddev->pers->quiesce(mddev, 0); 5398 } 5399 if (fd < 0) { 5400 if (mddev->bitmap_info.file) { 5401 restore_bitmap_write_access(mddev->bitmap_info.file); 5402 fput(mddev->bitmap_info.file); 5403 } 5404 mddev->bitmap_info.file = NULL; 5405 } 5406 5407 return err; 5408 } 5409 5410 /* 5411 * set_array_info is used two different ways 5412 * The original usage is when creating a new array. 5413 * In this usage, raid_disks is > 0 and it together with 5414 * level, size, not_persistent,layout,chunksize determine the 5415 * shape of the array. 5416 * This will always create an array with a type-0.90.0 superblock. 5417 * The newer usage is when assembling an array. 5418 * In this case raid_disks will be 0, and the major_version field is 5419 * use to determine which style super-blocks are to be found on the devices. 5420 * The minor and patch _version numbers are also kept incase the 5421 * super_block handler wishes to interpret them. 5422 */ 5423 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) 5424 { 5425 5426 if (info->raid_disks == 0) { 5427 /* just setting version number for superblock loading */ 5428 if (info->major_version < 0 || 5429 info->major_version >= ARRAY_SIZE(super_types) || 5430 super_types[info->major_version].name == NULL) { 5431 /* maybe try to auto-load a module? */ 5432 printk(KERN_INFO 5433 "md: superblock version %d not known\n", 5434 info->major_version); 5435 return -EINVAL; 5436 } 5437 mddev->major_version = info->major_version; 5438 mddev->minor_version = info->minor_version; 5439 mddev->patch_version = info->patch_version; 5440 mddev->persistent = !info->not_persistent; 5441 /* ensure mddev_put doesn't delete this now that there 5442 * is some minimal configuration. 5443 */ 5444 mddev->ctime = get_seconds(); 5445 return 0; 5446 } 5447 mddev->major_version = MD_MAJOR_VERSION; 5448 mddev->minor_version = MD_MINOR_VERSION; 5449 mddev->patch_version = MD_PATCHLEVEL_VERSION; 5450 mddev->ctime = get_seconds(); 5451 5452 mddev->level = info->level; 5453 mddev->clevel[0] = 0; 5454 mddev->dev_sectors = 2 * (sector_t)info->size; 5455 mddev->raid_disks = info->raid_disks; 5456 /* don't set md_minor, it is determined by which /dev/md* was 5457 * openned 5458 */ 5459 if (info->state & (1<<MD_SB_CLEAN)) 5460 mddev->recovery_cp = MaxSector; 5461 else 5462 mddev->recovery_cp = 0; 5463 mddev->persistent = ! info->not_persistent; 5464 mddev->external = 0; 5465 5466 mddev->layout = info->layout; 5467 mddev->chunk_sectors = info->chunk_size >> 9; 5468 5469 mddev->max_disks = MD_SB_DISKS; 5470 5471 if (mddev->persistent) 5472 mddev->flags = 0; 5473 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5474 5475 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 5476 mddev->bitmap_info.offset = 0; 5477 5478 mddev->reshape_position = MaxSector; 5479 5480 /* 5481 * Generate a 128 bit UUID 5482 */ 5483 get_random_bytes(mddev->uuid, 16); 5484 5485 mddev->new_level = mddev->level; 5486 mddev->new_chunk_sectors = mddev->chunk_sectors; 5487 mddev->new_layout = mddev->layout; 5488 mddev->delta_disks = 0; 5489 5490 return 0; 5491 } 5492 5493 void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors) 5494 { 5495 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); 5496 5497 if (mddev->external_size) 5498 return; 5499 5500 mddev->array_sectors = array_sectors; 5501 } 5502 EXPORT_SYMBOL(md_set_array_sectors); 5503 5504 static int update_size(mddev_t *mddev, sector_t num_sectors) 5505 { 5506 mdk_rdev_t *rdev; 5507 int rv; 5508 int fit = (num_sectors == 0); 5509 5510 if (mddev->pers->resize == NULL) 5511 return -EINVAL; 5512 /* The "num_sectors" is the number of sectors of each device that 5513 * is used. This can only make sense for arrays with redundancy. 5514 * linear and raid0 always use whatever space is available. We can only 5515 * consider changing this number if no resync or reconstruction is 5516 * happening, and if the new size is acceptable. It must fit before the 5517 * sb_start or, if that is <data_offset, it must fit before the size 5518 * of each device. If num_sectors is zero, we find the largest size 5519 * that fits. 5520 5521 */ 5522 if (mddev->sync_thread) 5523 return -EBUSY; 5524 if (mddev->bitmap) 5525 /* Sorry, cannot grow a bitmap yet, just remove it, 5526 * grow, and re-add. 5527 */ 5528 return -EBUSY; 5529 list_for_each_entry(rdev, &mddev->disks, same_set) { 5530 sector_t avail = rdev->sectors; 5531 5532 if (fit && (num_sectors == 0 || num_sectors > avail)) 5533 num_sectors = avail; 5534 if (avail < num_sectors) 5535 return -ENOSPC; 5536 } 5537 rv = mddev->pers->resize(mddev, num_sectors); 5538 if (!rv) 5539 revalidate_disk(mddev->gendisk); 5540 return rv; 5541 } 5542 5543 static int update_raid_disks(mddev_t *mddev, int raid_disks) 5544 { 5545 int rv; 5546 /* change the number of raid disks */ 5547 if (mddev->pers->check_reshape == NULL) 5548 return -EINVAL; 5549 if (raid_disks <= 0 || 5550 (mddev->max_disks && raid_disks >= mddev->max_disks)) 5551 return -EINVAL; 5552 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 5553 return -EBUSY; 5554 mddev->delta_disks = raid_disks - mddev->raid_disks; 5555 5556 rv = mddev->pers->check_reshape(mddev); 5557 return rv; 5558 } 5559 5560 5561 /* 5562 * update_array_info is used to change the configuration of an 5563 * on-line array. 5564 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 5565 * fields in the info are checked against the array. 5566 * Any differences that cannot be handled will cause an error. 5567 * Normally, only one change can be managed at a time. 5568 */ 5569 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) 5570 { 5571 int rv = 0; 5572 int cnt = 0; 5573 int state = 0; 5574 5575 /* calculate expected state,ignoring low bits */ 5576 if (mddev->bitmap && mddev->bitmap_info.offset) 5577 state |= (1 << MD_SB_BITMAP_PRESENT); 5578 5579 if (mddev->major_version != info->major_version || 5580 mddev->minor_version != info->minor_version || 5581 /* mddev->patch_version != info->patch_version || */ 5582 mddev->ctime != info->ctime || 5583 mddev->level != info->level || 5584 /* mddev->layout != info->layout || */ 5585 !mddev->persistent != info->not_persistent|| 5586 mddev->chunk_sectors != info->chunk_size >> 9 || 5587 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 5588 ((state^info->state) & 0xfffffe00) 5589 ) 5590 return -EINVAL; 5591 /* Check there is only one change */ 5592 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 5593 cnt++; 5594 if (mddev->raid_disks != info->raid_disks) 5595 cnt++; 5596 if (mddev->layout != info->layout) 5597 cnt++; 5598 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 5599 cnt++; 5600 if (cnt == 0) 5601 return 0; 5602 if (cnt > 1) 5603 return -EINVAL; 5604 5605 if (mddev->layout != info->layout) { 5606 /* Change layout 5607 * we don't need to do anything at the md level, the 5608 * personality will take care of it all. 5609 */ 5610 if (mddev->pers->check_reshape == NULL) 5611 return -EINVAL; 5612 else { 5613 mddev->new_layout = info->layout; 5614 rv = mddev->pers->check_reshape(mddev); 5615 if (rv) 5616 mddev->new_layout = mddev->layout; 5617 return rv; 5618 } 5619 } 5620 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 5621 rv = update_size(mddev, (sector_t)info->size * 2); 5622 5623 if (mddev->raid_disks != info->raid_disks) 5624 rv = update_raid_disks(mddev, info->raid_disks); 5625 5626 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 5627 if (mddev->pers->quiesce == NULL) 5628 return -EINVAL; 5629 if (mddev->recovery || mddev->sync_thread) 5630 return -EBUSY; 5631 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 5632 /* add the bitmap */ 5633 if (mddev->bitmap) 5634 return -EEXIST; 5635 if (mddev->bitmap_info.default_offset == 0) 5636 return -EINVAL; 5637 mddev->bitmap_info.offset = 5638 mddev->bitmap_info.default_offset; 5639 mddev->pers->quiesce(mddev, 1); 5640 rv = bitmap_create(mddev); 5641 if (!rv) 5642 rv = bitmap_load(mddev); 5643 if (rv) 5644 bitmap_destroy(mddev); 5645 mddev->pers->quiesce(mddev, 0); 5646 } else { 5647 /* remove the bitmap */ 5648 if (!mddev->bitmap) 5649 return -ENOENT; 5650 if (mddev->bitmap->file) 5651 return -EINVAL; 5652 mddev->pers->quiesce(mddev, 1); 5653 bitmap_destroy(mddev); 5654 mddev->pers->quiesce(mddev, 0); 5655 mddev->bitmap_info.offset = 0; 5656 } 5657 } 5658 md_update_sb(mddev, 1); 5659 return rv; 5660 } 5661 5662 static int set_disk_faulty(mddev_t *mddev, dev_t dev) 5663 { 5664 mdk_rdev_t *rdev; 5665 5666 if (mddev->pers == NULL) 5667 return -ENODEV; 5668 5669 rdev = find_rdev(mddev, dev); 5670 if (!rdev) 5671 return -ENODEV; 5672 5673 md_error(mddev, rdev); 5674 return 0; 5675 } 5676 5677 /* 5678 * We have a problem here : there is no easy way to give a CHS 5679 * virtual geometry. We currently pretend that we have a 2 heads 5680 * 4 sectors (with a BIG number of cylinders...). This drives 5681 * dosfs just mad... ;-) 5682 */ 5683 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 5684 { 5685 mddev_t *mddev = bdev->bd_disk->private_data; 5686 5687 geo->heads = 2; 5688 geo->sectors = 4; 5689 geo->cylinders = mddev->array_sectors / 8; 5690 return 0; 5691 } 5692 5693 static int md_ioctl(struct block_device *bdev, fmode_t mode, 5694 unsigned int cmd, unsigned long arg) 5695 { 5696 int err = 0; 5697 void __user *argp = (void __user *)arg; 5698 mddev_t *mddev = NULL; 5699 int ro; 5700 5701 if (!capable(CAP_SYS_ADMIN)) 5702 return -EACCES; 5703 5704 /* 5705 * Commands dealing with the RAID driver but not any 5706 * particular array: 5707 */ 5708 switch (cmd) 5709 { 5710 case RAID_VERSION: 5711 err = get_version(argp); 5712 goto done; 5713 5714 case PRINT_RAID_DEBUG: 5715 err = 0; 5716 md_print_devices(); 5717 goto done; 5718 5719 #ifndef MODULE 5720 case RAID_AUTORUN: 5721 err = 0; 5722 autostart_arrays(arg); 5723 goto done; 5724 #endif 5725 default:; 5726 } 5727 5728 /* 5729 * Commands creating/starting a new array: 5730 */ 5731 5732 mddev = bdev->bd_disk->private_data; 5733 5734 if (!mddev) { 5735 BUG(); 5736 goto abort; 5737 } 5738 5739 err = mddev_lock(mddev); 5740 if (err) { 5741 printk(KERN_INFO 5742 "md: ioctl lock interrupted, reason %d, cmd %d\n", 5743 err, cmd); 5744 goto abort; 5745 } 5746 5747 switch (cmd) 5748 { 5749 case SET_ARRAY_INFO: 5750 { 5751 mdu_array_info_t info; 5752 if (!arg) 5753 memset(&info, 0, sizeof(info)); 5754 else if (copy_from_user(&info, argp, sizeof(info))) { 5755 err = -EFAULT; 5756 goto abort_unlock; 5757 } 5758 if (mddev->pers) { 5759 err = update_array_info(mddev, &info); 5760 if (err) { 5761 printk(KERN_WARNING "md: couldn't update" 5762 " array info. %d\n", err); 5763 goto abort_unlock; 5764 } 5765 goto done_unlock; 5766 } 5767 if (!list_empty(&mddev->disks)) { 5768 printk(KERN_WARNING 5769 "md: array %s already has disks!\n", 5770 mdname(mddev)); 5771 err = -EBUSY; 5772 goto abort_unlock; 5773 } 5774 if (mddev->raid_disks) { 5775 printk(KERN_WARNING 5776 "md: array %s already initialised!\n", 5777 mdname(mddev)); 5778 err = -EBUSY; 5779 goto abort_unlock; 5780 } 5781 err = set_array_info(mddev, &info); 5782 if (err) { 5783 printk(KERN_WARNING "md: couldn't set" 5784 " array info. %d\n", err); 5785 goto abort_unlock; 5786 } 5787 } 5788 goto done_unlock; 5789 5790 default:; 5791 } 5792 5793 /* 5794 * Commands querying/configuring an existing array: 5795 */ 5796 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 5797 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 5798 if ((!mddev->raid_disks && !mddev->external) 5799 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 5800 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 5801 && cmd != GET_BITMAP_FILE) { 5802 err = -ENODEV; 5803 goto abort_unlock; 5804 } 5805 5806 /* 5807 * Commands even a read-only array can execute: 5808 */ 5809 switch (cmd) 5810 { 5811 case GET_ARRAY_INFO: 5812 err = get_array_info(mddev, argp); 5813 goto done_unlock; 5814 5815 case GET_BITMAP_FILE: 5816 err = get_bitmap_file(mddev, argp); 5817 goto done_unlock; 5818 5819 case GET_DISK_INFO: 5820 err = get_disk_info(mddev, argp); 5821 goto done_unlock; 5822 5823 case RESTART_ARRAY_RW: 5824 err = restart_array(mddev); 5825 goto done_unlock; 5826 5827 case STOP_ARRAY: 5828 err = do_md_stop(mddev, 0, 1); 5829 goto done_unlock; 5830 5831 case STOP_ARRAY_RO: 5832 err = md_set_readonly(mddev, 1); 5833 goto done_unlock; 5834 5835 case BLKROSET: 5836 if (get_user(ro, (int __user *)(arg))) { 5837 err = -EFAULT; 5838 goto done_unlock; 5839 } 5840 err = -EINVAL; 5841 5842 /* if the bdev is going readonly the value of mddev->ro 5843 * does not matter, no writes are coming 5844 */ 5845 if (ro) 5846 goto done_unlock; 5847 5848 /* are we are already prepared for writes? */ 5849 if (mddev->ro != 1) 5850 goto done_unlock; 5851 5852 /* transitioning to readauto need only happen for 5853 * arrays that call md_write_start 5854 */ 5855 if (mddev->pers) { 5856 err = restart_array(mddev); 5857 if (err == 0) { 5858 mddev->ro = 2; 5859 set_disk_ro(mddev->gendisk, 0); 5860 } 5861 } 5862 goto done_unlock; 5863 } 5864 5865 /* 5866 * The remaining ioctls are changing the state of the 5867 * superblock, so we do not allow them on read-only arrays. 5868 * However non-MD ioctls (e.g. get-size) will still come through 5869 * here and hit the 'default' below, so only disallow 5870 * 'md' ioctls, and switch to rw mode if started auto-readonly. 5871 */ 5872 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) { 5873 if (mddev->ro == 2) { 5874 mddev->ro = 0; 5875 sysfs_notify_dirent_safe(mddev->sysfs_state); 5876 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5877 md_wakeup_thread(mddev->thread); 5878 } else { 5879 err = -EROFS; 5880 goto abort_unlock; 5881 } 5882 } 5883 5884 switch (cmd) 5885 { 5886 case ADD_NEW_DISK: 5887 { 5888 mdu_disk_info_t info; 5889 if (copy_from_user(&info, argp, sizeof(info))) 5890 err = -EFAULT; 5891 else 5892 err = add_new_disk(mddev, &info); 5893 goto done_unlock; 5894 } 5895 5896 case HOT_REMOVE_DISK: 5897 err = hot_remove_disk(mddev, new_decode_dev(arg)); 5898 goto done_unlock; 5899 5900 case HOT_ADD_DISK: 5901 err = hot_add_disk(mddev, new_decode_dev(arg)); 5902 goto done_unlock; 5903 5904 case SET_DISK_FAULTY: 5905 err = set_disk_faulty(mddev, new_decode_dev(arg)); 5906 goto done_unlock; 5907 5908 case RUN_ARRAY: 5909 err = do_md_run(mddev); 5910 goto done_unlock; 5911 5912 case SET_BITMAP_FILE: 5913 err = set_bitmap_file(mddev, (int)arg); 5914 goto done_unlock; 5915 5916 default: 5917 err = -EINVAL; 5918 goto abort_unlock; 5919 } 5920 5921 done_unlock: 5922 abort_unlock: 5923 if (mddev->hold_active == UNTIL_IOCTL && 5924 err != -EINVAL) 5925 mddev->hold_active = 0; 5926 mddev_unlock(mddev); 5927 5928 return err; 5929 done: 5930 if (err) 5931 MD_BUG(); 5932 abort: 5933 return err; 5934 } 5935 #ifdef CONFIG_COMPAT 5936 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, 5937 unsigned int cmd, unsigned long arg) 5938 { 5939 switch (cmd) { 5940 case HOT_REMOVE_DISK: 5941 case HOT_ADD_DISK: 5942 case SET_DISK_FAULTY: 5943 case SET_BITMAP_FILE: 5944 /* These take in integer arg, do not convert */ 5945 break; 5946 default: 5947 arg = (unsigned long)compat_ptr(arg); 5948 break; 5949 } 5950 5951 return md_ioctl(bdev, mode, cmd, arg); 5952 } 5953 #endif /* CONFIG_COMPAT */ 5954 5955 static int md_open(struct block_device *bdev, fmode_t mode) 5956 { 5957 /* 5958 * Succeed if we can lock the mddev, which confirms that 5959 * it isn't being stopped right now. 5960 */ 5961 mddev_t *mddev = mddev_find(bdev->bd_dev); 5962 int err; 5963 5964 if (mddev->gendisk != bdev->bd_disk) { 5965 /* we are racing with mddev_put which is discarding this 5966 * bd_disk. 5967 */ 5968 mddev_put(mddev); 5969 /* Wait until bdev->bd_disk is definitely gone */ 5970 flush_workqueue(md_misc_wq); 5971 /* Then retry the open from the top */ 5972 return -ERESTARTSYS; 5973 } 5974 BUG_ON(mddev != bdev->bd_disk->private_data); 5975 5976 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) 5977 goto out; 5978 5979 err = 0; 5980 atomic_inc(&mddev->openers); 5981 mutex_unlock(&mddev->open_mutex); 5982 5983 check_disk_size_change(mddev->gendisk, bdev); 5984 out: 5985 return err; 5986 } 5987 5988 static int md_release(struct gendisk *disk, fmode_t mode) 5989 { 5990 mddev_t *mddev = disk->private_data; 5991 5992 BUG_ON(!mddev); 5993 atomic_dec(&mddev->openers); 5994 mddev_put(mddev); 5995 5996 return 0; 5997 } 5998 static const struct block_device_operations md_fops = 5999 { 6000 .owner = THIS_MODULE, 6001 .open = md_open, 6002 .release = md_release, 6003 .ioctl = md_ioctl, 6004 #ifdef CONFIG_COMPAT 6005 .compat_ioctl = md_compat_ioctl, 6006 #endif 6007 .getgeo = md_getgeo, 6008 }; 6009 6010 static int md_thread(void * arg) 6011 { 6012 mdk_thread_t *thread = arg; 6013 6014 /* 6015 * md_thread is a 'system-thread', it's priority should be very 6016 * high. We avoid resource deadlocks individually in each 6017 * raid personality. (RAID5 does preallocation) We also use RR and 6018 * the very same RT priority as kswapd, thus we will never get 6019 * into a priority inversion deadlock. 6020 * 6021 * we definitely have to have equal or higher priority than 6022 * bdflush, otherwise bdflush will deadlock if there are too 6023 * many dirty RAID5 blocks. 6024 */ 6025 6026 allow_signal(SIGKILL); 6027 while (!kthread_should_stop()) { 6028 6029 /* We need to wait INTERRUPTIBLE so that 6030 * we don't add to the load-average. 6031 * That means we need to be sure no signals are 6032 * pending 6033 */ 6034 if (signal_pending(current)) 6035 flush_signals(current); 6036 6037 wait_event_interruptible_timeout 6038 (thread->wqueue, 6039 test_bit(THREAD_WAKEUP, &thread->flags) 6040 || kthread_should_stop(), 6041 thread->timeout); 6042 6043 clear_bit(THREAD_WAKEUP, &thread->flags); 6044 6045 thread->run(thread->mddev); 6046 } 6047 6048 return 0; 6049 } 6050 6051 void md_wakeup_thread(mdk_thread_t *thread) 6052 { 6053 if (thread) { 6054 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm); 6055 set_bit(THREAD_WAKEUP, &thread->flags); 6056 wake_up(&thread->wqueue); 6057 } 6058 } 6059 6060 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, 6061 const char *name) 6062 { 6063 mdk_thread_t *thread; 6064 6065 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL); 6066 if (!thread) 6067 return NULL; 6068 6069 init_waitqueue_head(&thread->wqueue); 6070 6071 thread->run = run; 6072 thread->mddev = mddev; 6073 thread->timeout = MAX_SCHEDULE_TIMEOUT; 6074 thread->tsk = kthread_run(md_thread, thread, 6075 "%s_%s", 6076 mdname(thread->mddev), 6077 name ?: mddev->pers->name); 6078 if (IS_ERR(thread->tsk)) { 6079 kfree(thread); 6080 return NULL; 6081 } 6082 return thread; 6083 } 6084 6085 void md_unregister_thread(mdk_thread_t *thread) 6086 { 6087 if (!thread) 6088 return; 6089 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 6090 6091 kthread_stop(thread->tsk); 6092 kfree(thread); 6093 } 6094 6095 void md_error(mddev_t *mddev, mdk_rdev_t *rdev) 6096 { 6097 if (!mddev) { 6098 MD_BUG(); 6099 return; 6100 } 6101 6102 if (!rdev || test_bit(Faulty, &rdev->flags)) 6103 return; 6104 6105 if (mddev->external) 6106 set_bit(Blocked, &rdev->flags); 6107 /* 6108 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", 6109 mdname(mddev), 6110 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev), 6111 __builtin_return_address(0),__builtin_return_address(1), 6112 __builtin_return_address(2),__builtin_return_address(3)); 6113 */ 6114 if (!mddev->pers) 6115 return; 6116 if (!mddev->pers->error_handler) 6117 return; 6118 mddev->pers->error_handler(mddev,rdev); 6119 if (mddev->degraded) 6120 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6121 sysfs_notify_dirent_safe(rdev->sysfs_state); 6122 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6123 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6124 md_wakeup_thread(mddev->thread); 6125 if (mddev->event_work.func) 6126 queue_work(md_misc_wq, &mddev->event_work); 6127 md_new_event_inintr(mddev); 6128 } 6129 6130 /* seq_file implementation /proc/mdstat */ 6131 6132 static void status_unused(struct seq_file *seq) 6133 { 6134 int i = 0; 6135 mdk_rdev_t *rdev; 6136 6137 seq_printf(seq, "unused devices: "); 6138 6139 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 6140 char b[BDEVNAME_SIZE]; 6141 i++; 6142 seq_printf(seq, "%s ", 6143 bdevname(rdev->bdev,b)); 6144 } 6145 if (!i) 6146 seq_printf(seq, "<none>"); 6147 6148 seq_printf(seq, "\n"); 6149 } 6150 6151 6152 static void status_resync(struct seq_file *seq, mddev_t * mddev) 6153 { 6154 sector_t max_sectors, resync, res; 6155 unsigned long dt, db; 6156 sector_t rt; 6157 int scale; 6158 unsigned int per_milli; 6159 6160 resync = mddev->curr_resync - atomic_read(&mddev->recovery_active); 6161 6162 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 6163 max_sectors = mddev->resync_max_sectors; 6164 else 6165 max_sectors = mddev->dev_sectors; 6166 6167 /* 6168 * Should not happen. 6169 */ 6170 if (!max_sectors) { 6171 MD_BUG(); 6172 return; 6173 } 6174 /* Pick 'scale' such that (resync>>scale)*1000 will fit 6175 * in a sector_t, and (max_sectors>>scale) will fit in a 6176 * u32, as those are the requirements for sector_div. 6177 * Thus 'scale' must be at least 10 6178 */ 6179 scale = 10; 6180 if (sizeof(sector_t) > sizeof(unsigned long)) { 6181 while ( max_sectors/2 > (1ULL<<(scale+32))) 6182 scale++; 6183 } 6184 res = (resync>>scale)*1000; 6185 sector_div(res, (u32)((max_sectors>>scale)+1)); 6186 6187 per_milli = res; 6188 { 6189 int i, x = per_milli/50, y = 20-x; 6190 seq_printf(seq, "["); 6191 for (i = 0; i < x; i++) 6192 seq_printf(seq, "="); 6193 seq_printf(seq, ">"); 6194 for (i = 0; i < y; i++) 6195 seq_printf(seq, "."); 6196 seq_printf(seq, "] "); 6197 } 6198 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 6199 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 6200 "reshape" : 6201 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 6202 "check" : 6203 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 6204 "resync" : "recovery"))), 6205 per_milli/10, per_milli % 10, 6206 (unsigned long long) resync/2, 6207 (unsigned long long) max_sectors/2); 6208 6209 /* 6210 * dt: time from mark until now 6211 * db: blocks written from mark until now 6212 * rt: remaining time 6213 * 6214 * rt is a sector_t, so could be 32bit or 64bit. 6215 * So we divide before multiply in case it is 32bit and close 6216 * to the limit. 6217 * We scale the divisor (db) by 32 to avoid loosing precision 6218 * near the end of resync when the number of remaining sectors 6219 * is close to 'db'. 6220 * We then divide rt by 32 after multiplying by db to compensate. 6221 * The '+1' avoids division by zero if db is very small. 6222 */ 6223 dt = ((jiffies - mddev->resync_mark) / HZ); 6224 if (!dt) dt++; 6225 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) 6226 - mddev->resync_mark_cnt; 6227 6228 rt = max_sectors - resync; /* number of remaining sectors */ 6229 sector_div(rt, db/32+1); 6230 rt *= dt; 6231 rt >>= 5; 6232 6233 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 6234 ((unsigned long)rt % 60)/6); 6235 6236 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 6237 } 6238 6239 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 6240 { 6241 struct list_head *tmp; 6242 loff_t l = *pos; 6243 mddev_t *mddev; 6244 6245 if (l >= 0x10000) 6246 return NULL; 6247 if (!l--) 6248 /* header */ 6249 return (void*)1; 6250 6251 spin_lock(&all_mddevs_lock); 6252 list_for_each(tmp,&all_mddevs) 6253 if (!l--) { 6254 mddev = list_entry(tmp, mddev_t, all_mddevs); 6255 mddev_get(mddev); 6256 spin_unlock(&all_mddevs_lock); 6257 return mddev; 6258 } 6259 spin_unlock(&all_mddevs_lock); 6260 if (!l--) 6261 return (void*)2;/* tail */ 6262 return NULL; 6263 } 6264 6265 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 6266 { 6267 struct list_head *tmp; 6268 mddev_t *next_mddev, *mddev = v; 6269 6270 ++*pos; 6271 if (v == (void*)2) 6272 return NULL; 6273 6274 spin_lock(&all_mddevs_lock); 6275 if (v == (void*)1) 6276 tmp = all_mddevs.next; 6277 else 6278 tmp = mddev->all_mddevs.next; 6279 if (tmp != &all_mddevs) 6280 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs)); 6281 else { 6282 next_mddev = (void*)2; 6283 *pos = 0x10000; 6284 } 6285 spin_unlock(&all_mddevs_lock); 6286 6287 if (v != (void*)1) 6288 mddev_put(mddev); 6289 return next_mddev; 6290 6291 } 6292 6293 static void md_seq_stop(struct seq_file *seq, void *v) 6294 { 6295 mddev_t *mddev = v; 6296 6297 if (mddev && v != (void*)1 && v != (void*)2) 6298 mddev_put(mddev); 6299 } 6300 6301 struct mdstat_info { 6302 int event; 6303 }; 6304 6305 static int md_seq_show(struct seq_file *seq, void *v) 6306 { 6307 mddev_t *mddev = v; 6308 sector_t sectors; 6309 mdk_rdev_t *rdev; 6310 struct mdstat_info *mi = seq->private; 6311 struct bitmap *bitmap; 6312 6313 if (v == (void*)1) { 6314 struct mdk_personality *pers; 6315 seq_printf(seq, "Personalities : "); 6316 spin_lock(&pers_lock); 6317 list_for_each_entry(pers, &pers_list, list) 6318 seq_printf(seq, "[%s] ", pers->name); 6319 6320 spin_unlock(&pers_lock); 6321 seq_printf(seq, "\n"); 6322 mi->event = atomic_read(&md_event_count); 6323 return 0; 6324 } 6325 if (v == (void*)2) { 6326 status_unused(seq); 6327 return 0; 6328 } 6329 6330 if (mddev_lock(mddev) < 0) 6331 return -EINTR; 6332 6333 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 6334 seq_printf(seq, "%s : %sactive", mdname(mddev), 6335 mddev->pers ? "" : "in"); 6336 if (mddev->pers) { 6337 if (mddev->ro==1) 6338 seq_printf(seq, " (read-only)"); 6339 if (mddev->ro==2) 6340 seq_printf(seq, " (auto-read-only)"); 6341 seq_printf(seq, " %s", mddev->pers->name); 6342 } 6343 6344 sectors = 0; 6345 list_for_each_entry(rdev, &mddev->disks, same_set) { 6346 char b[BDEVNAME_SIZE]; 6347 seq_printf(seq, " %s[%d]", 6348 bdevname(rdev->bdev,b), rdev->desc_nr); 6349 if (test_bit(WriteMostly, &rdev->flags)) 6350 seq_printf(seq, "(W)"); 6351 if (test_bit(Faulty, &rdev->flags)) { 6352 seq_printf(seq, "(F)"); 6353 continue; 6354 } else if (rdev->raid_disk < 0) 6355 seq_printf(seq, "(S)"); /* spare */ 6356 sectors += rdev->sectors; 6357 } 6358 6359 if (!list_empty(&mddev->disks)) { 6360 if (mddev->pers) 6361 seq_printf(seq, "\n %llu blocks", 6362 (unsigned long long) 6363 mddev->array_sectors / 2); 6364 else 6365 seq_printf(seq, "\n %llu blocks", 6366 (unsigned long long)sectors / 2); 6367 } 6368 if (mddev->persistent) { 6369 if (mddev->major_version != 0 || 6370 mddev->minor_version != 90) { 6371 seq_printf(seq," super %d.%d", 6372 mddev->major_version, 6373 mddev->minor_version); 6374 } 6375 } else if (mddev->external) 6376 seq_printf(seq, " super external:%s", 6377 mddev->metadata_type); 6378 else 6379 seq_printf(seq, " super non-persistent"); 6380 6381 if (mddev->pers) { 6382 mddev->pers->status(seq, mddev); 6383 seq_printf(seq, "\n "); 6384 if (mddev->pers->sync_request) { 6385 if (mddev->curr_resync > 2) { 6386 status_resync(seq, mddev); 6387 seq_printf(seq, "\n "); 6388 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) 6389 seq_printf(seq, "\tresync=DELAYED\n "); 6390 else if (mddev->recovery_cp < MaxSector) 6391 seq_printf(seq, "\tresync=PENDING\n "); 6392 } 6393 } else 6394 seq_printf(seq, "\n "); 6395 6396 if ((bitmap = mddev->bitmap)) { 6397 unsigned long chunk_kb; 6398 unsigned long flags; 6399 spin_lock_irqsave(&bitmap->lock, flags); 6400 chunk_kb = mddev->bitmap_info.chunksize >> 10; 6401 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 6402 "%lu%s chunk", 6403 bitmap->pages - bitmap->missing_pages, 6404 bitmap->pages, 6405 (bitmap->pages - bitmap->missing_pages) 6406 << (PAGE_SHIFT - 10), 6407 chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize, 6408 chunk_kb ? "KB" : "B"); 6409 if (bitmap->file) { 6410 seq_printf(seq, ", file: "); 6411 seq_path(seq, &bitmap->file->f_path, " \t\n"); 6412 } 6413 6414 seq_printf(seq, "\n"); 6415 spin_unlock_irqrestore(&bitmap->lock, flags); 6416 } 6417 6418 seq_printf(seq, "\n"); 6419 } 6420 mddev_unlock(mddev); 6421 6422 return 0; 6423 } 6424 6425 static const struct seq_operations md_seq_ops = { 6426 .start = md_seq_start, 6427 .next = md_seq_next, 6428 .stop = md_seq_stop, 6429 .show = md_seq_show, 6430 }; 6431 6432 static int md_seq_open(struct inode *inode, struct file *file) 6433 { 6434 int error; 6435 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL); 6436 if (mi == NULL) 6437 return -ENOMEM; 6438 6439 error = seq_open(file, &md_seq_ops); 6440 if (error) 6441 kfree(mi); 6442 else { 6443 struct seq_file *p = file->private_data; 6444 p->private = mi; 6445 mi->event = atomic_read(&md_event_count); 6446 } 6447 return error; 6448 } 6449 6450 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 6451 { 6452 struct seq_file *m = filp->private_data; 6453 struct mdstat_info *mi = m->private; 6454 int mask; 6455 6456 poll_wait(filp, &md_event_waiters, wait); 6457 6458 /* always allow read */ 6459 mask = POLLIN | POLLRDNORM; 6460 6461 if (mi->event != atomic_read(&md_event_count)) 6462 mask |= POLLERR | POLLPRI; 6463 return mask; 6464 } 6465 6466 static const struct file_operations md_seq_fops = { 6467 .owner = THIS_MODULE, 6468 .open = md_seq_open, 6469 .read = seq_read, 6470 .llseek = seq_lseek, 6471 .release = seq_release_private, 6472 .poll = mdstat_poll, 6473 }; 6474 6475 int register_md_personality(struct mdk_personality *p) 6476 { 6477 spin_lock(&pers_lock); 6478 list_add_tail(&p->list, &pers_list); 6479 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); 6480 spin_unlock(&pers_lock); 6481 return 0; 6482 } 6483 6484 int unregister_md_personality(struct mdk_personality *p) 6485 { 6486 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 6487 spin_lock(&pers_lock); 6488 list_del_init(&p->list); 6489 spin_unlock(&pers_lock); 6490 return 0; 6491 } 6492 6493 static int is_mddev_idle(mddev_t *mddev, int init) 6494 { 6495 mdk_rdev_t * rdev; 6496 int idle; 6497 int curr_events; 6498 6499 idle = 1; 6500 rcu_read_lock(); 6501 rdev_for_each_rcu(rdev, mddev) { 6502 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 6503 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + 6504 (int)part_stat_read(&disk->part0, sectors[1]) - 6505 atomic_read(&disk->sync_io); 6506 /* sync IO will cause sync_io to increase before the disk_stats 6507 * as sync_io is counted when a request starts, and 6508 * disk_stats is counted when it completes. 6509 * So resync activity will cause curr_events to be smaller than 6510 * when there was no such activity. 6511 * non-sync IO will cause disk_stat to increase without 6512 * increasing sync_io so curr_events will (eventually) 6513 * be larger than it was before. Once it becomes 6514 * substantially larger, the test below will cause 6515 * the array to appear non-idle, and resync will slow 6516 * down. 6517 * If there is a lot of outstanding resync activity when 6518 * we set last_event to curr_events, then all that activity 6519 * completing might cause the array to appear non-idle 6520 * and resync will be slowed down even though there might 6521 * not have been non-resync activity. This will only 6522 * happen once though. 'last_events' will soon reflect 6523 * the state where there is little or no outstanding 6524 * resync requests, and further resync activity will 6525 * always make curr_events less than last_events. 6526 * 6527 */ 6528 if (init || curr_events - rdev->last_events > 64) { 6529 rdev->last_events = curr_events; 6530 idle = 0; 6531 } 6532 } 6533 rcu_read_unlock(); 6534 return idle; 6535 } 6536 6537 void md_done_sync(mddev_t *mddev, int blocks, int ok) 6538 { 6539 /* another "blocks" (512byte) blocks have been synced */ 6540 atomic_sub(blocks, &mddev->recovery_active); 6541 wake_up(&mddev->recovery_wait); 6542 if (!ok) { 6543 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6544 md_wakeup_thread(mddev->thread); 6545 // stop recovery, signal do_sync .... 6546 } 6547 } 6548 6549 6550 /* md_write_start(mddev, bi) 6551 * If we need to update some array metadata (e.g. 'active' flag 6552 * in superblock) before writing, schedule a superblock update 6553 * and wait for it to complete. 6554 */ 6555 void md_write_start(mddev_t *mddev, struct bio *bi) 6556 { 6557 int did_change = 0; 6558 if (bio_data_dir(bi) != WRITE) 6559 return; 6560 6561 BUG_ON(mddev->ro == 1); 6562 if (mddev->ro == 2) { 6563 /* need to switch to read/write */ 6564 mddev->ro = 0; 6565 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6566 md_wakeup_thread(mddev->thread); 6567 md_wakeup_thread(mddev->sync_thread); 6568 did_change = 1; 6569 } 6570 atomic_inc(&mddev->writes_pending); 6571 if (mddev->safemode == 1) 6572 mddev->safemode = 0; 6573 if (mddev->in_sync) { 6574 spin_lock_irq(&mddev->write_lock); 6575 if (mddev->in_sync) { 6576 mddev->in_sync = 0; 6577 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6578 set_bit(MD_CHANGE_PENDING, &mddev->flags); 6579 md_wakeup_thread(mddev->thread); 6580 did_change = 1; 6581 } 6582 spin_unlock_irq(&mddev->write_lock); 6583 } 6584 if (did_change) 6585 sysfs_notify_dirent_safe(mddev->sysfs_state); 6586 wait_event(mddev->sb_wait, 6587 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 6588 } 6589 6590 void md_write_end(mddev_t *mddev) 6591 { 6592 if (atomic_dec_and_test(&mddev->writes_pending)) { 6593 if (mddev->safemode == 2) 6594 md_wakeup_thread(mddev->thread); 6595 else if (mddev->safemode_delay) 6596 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 6597 } 6598 } 6599 6600 /* md_allow_write(mddev) 6601 * Calling this ensures that the array is marked 'active' so that writes 6602 * may proceed without blocking. It is important to call this before 6603 * attempting a GFP_KERNEL allocation while holding the mddev lock. 6604 * Must be called with mddev_lock held. 6605 * 6606 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock 6607 * is dropped, so return -EAGAIN after notifying userspace. 6608 */ 6609 int md_allow_write(mddev_t *mddev) 6610 { 6611 if (!mddev->pers) 6612 return 0; 6613 if (mddev->ro) 6614 return 0; 6615 if (!mddev->pers->sync_request) 6616 return 0; 6617 6618 spin_lock_irq(&mddev->write_lock); 6619 if (mddev->in_sync) { 6620 mddev->in_sync = 0; 6621 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6622 set_bit(MD_CHANGE_PENDING, &mddev->flags); 6623 if (mddev->safemode_delay && 6624 mddev->safemode == 0) 6625 mddev->safemode = 1; 6626 spin_unlock_irq(&mddev->write_lock); 6627 md_update_sb(mddev, 0); 6628 sysfs_notify_dirent_safe(mddev->sysfs_state); 6629 } else 6630 spin_unlock_irq(&mddev->write_lock); 6631 6632 if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 6633 return -EAGAIN; 6634 else 6635 return 0; 6636 } 6637 EXPORT_SYMBOL_GPL(md_allow_write); 6638 6639 void md_unplug(mddev_t *mddev) 6640 { 6641 if (mddev->queue) 6642 blk_unplug(mddev->queue); 6643 if (mddev->plug) 6644 mddev->plug->unplug_fn(mddev->plug); 6645 } 6646 6647 #define SYNC_MARKS 10 6648 #define SYNC_MARK_STEP (3*HZ) 6649 void md_do_sync(mddev_t *mddev) 6650 { 6651 mddev_t *mddev2; 6652 unsigned int currspeed = 0, 6653 window; 6654 sector_t max_sectors,j, io_sectors; 6655 unsigned long mark[SYNC_MARKS]; 6656 sector_t mark_cnt[SYNC_MARKS]; 6657 int last_mark,m; 6658 struct list_head *tmp; 6659 sector_t last_check; 6660 int skipped = 0; 6661 mdk_rdev_t *rdev; 6662 char *desc; 6663 6664 /* just incase thread restarts... */ 6665 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 6666 return; 6667 if (mddev->ro) /* never try to sync a read-only array */ 6668 return; 6669 6670 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6671 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 6672 desc = "data-check"; 6673 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 6674 desc = "requested-resync"; 6675 else 6676 desc = "resync"; 6677 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 6678 desc = "reshape"; 6679 else 6680 desc = "recovery"; 6681 6682 /* we overload curr_resync somewhat here. 6683 * 0 == not engaged in resync at all 6684 * 2 == checking that there is no conflict with another sync 6685 * 1 == like 2, but have yielded to allow conflicting resync to 6686 * commense 6687 * other == active in resync - this many blocks 6688 * 6689 * Before starting a resync we must have set curr_resync to 6690 * 2, and then checked that every "conflicting" array has curr_resync 6691 * less than ours. When we find one that is the same or higher 6692 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 6693 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 6694 * This will mean we have to start checking from the beginning again. 6695 * 6696 */ 6697 6698 do { 6699 mddev->curr_resync = 2; 6700 6701 try_again: 6702 if (kthread_should_stop()) 6703 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6704 6705 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6706 goto skip; 6707 for_each_mddev(mddev2, tmp) { 6708 if (mddev2 == mddev) 6709 continue; 6710 if (!mddev->parallel_resync 6711 && mddev2->curr_resync 6712 && match_mddev_units(mddev, mddev2)) { 6713 DEFINE_WAIT(wq); 6714 if (mddev < mddev2 && mddev->curr_resync == 2) { 6715 /* arbitrarily yield */ 6716 mddev->curr_resync = 1; 6717 wake_up(&resync_wait); 6718 } 6719 if (mddev > mddev2 && mddev->curr_resync == 1) 6720 /* no need to wait here, we can wait the next 6721 * time 'round when curr_resync == 2 6722 */ 6723 continue; 6724 /* We need to wait 'interruptible' so as not to 6725 * contribute to the load average, and not to 6726 * be caught by 'softlockup' 6727 */ 6728 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 6729 if (!kthread_should_stop() && 6730 mddev2->curr_resync >= mddev->curr_resync) { 6731 printk(KERN_INFO "md: delaying %s of %s" 6732 " until %s has finished (they" 6733 " share one or more physical units)\n", 6734 desc, mdname(mddev), mdname(mddev2)); 6735 mddev_put(mddev2); 6736 if (signal_pending(current)) 6737 flush_signals(current); 6738 schedule(); 6739 finish_wait(&resync_wait, &wq); 6740 goto try_again; 6741 } 6742 finish_wait(&resync_wait, &wq); 6743 } 6744 } 6745 } while (mddev->curr_resync < 2); 6746 6747 j = 0; 6748 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6749 /* resync follows the size requested by the personality, 6750 * which defaults to physical size, but can be virtual size 6751 */ 6752 max_sectors = mddev->resync_max_sectors; 6753 mddev->resync_mismatches = 0; 6754 /* we don't use the checkpoint if there's a bitmap */ 6755 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 6756 j = mddev->resync_min; 6757 else if (!mddev->bitmap) 6758 j = mddev->recovery_cp; 6759 6760 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 6761 max_sectors = mddev->dev_sectors; 6762 else { 6763 /* recovery follows the physical size of devices */ 6764 max_sectors = mddev->dev_sectors; 6765 j = MaxSector; 6766 rcu_read_lock(); 6767 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 6768 if (rdev->raid_disk >= 0 && 6769 !test_bit(Faulty, &rdev->flags) && 6770 !test_bit(In_sync, &rdev->flags) && 6771 rdev->recovery_offset < j) 6772 j = rdev->recovery_offset; 6773 rcu_read_unlock(); 6774 } 6775 6776 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); 6777 printk(KERN_INFO "md: minimum _guaranteed_ speed:" 6778 " %d KB/sec/disk.\n", speed_min(mddev)); 6779 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 6780 "(but not more than %d KB/sec) for %s.\n", 6781 speed_max(mddev), desc); 6782 6783 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 6784 6785 io_sectors = 0; 6786 for (m = 0; m < SYNC_MARKS; m++) { 6787 mark[m] = jiffies; 6788 mark_cnt[m] = io_sectors; 6789 } 6790 last_mark = 0; 6791 mddev->resync_mark = mark[last_mark]; 6792 mddev->resync_mark_cnt = mark_cnt[last_mark]; 6793 6794 /* 6795 * Tune reconstruction: 6796 */ 6797 window = 32*(PAGE_SIZE/512); 6798 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", 6799 window/2,(unsigned long long) max_sectors/2); 6800 6801 atomic_set(&mddev->recovery_active, 0); 6802 last_check = 0; 6803 6804 if (j>2) { 6805 printk(KERN_INFO 6806 "md: resuming %s of %s from checkpoint.\n", 6807 desc, mdname(mddev)); 6808 mddev->curr_resync = j; 6809 } 6810 mddev->curr_resync_completed = mddev->curr_resync; 6811 6812 while (j < max_sectors) { 6813 sector_t sectors; 6814 6815 skipped = 0; 6816 6817 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 6818 ((mddev->curr_resync > mddev->curr_resync_completed && 6819 (mddev->curr_resync - mddev->curr_resync_completed) 6820 > (max_sectors >> 4)) || 6821 (j - mddev->curr_resync_completed)*2 6822 >= mddev->resync_max - mddev->curr_resync_completed 6823 )) { 6824 /* time to update curr_resync_completed */ 6825 md_unplug(mddev); 6826 wait_event(mddev->recovery_wait, 6827 atomic_read(&mddev->recovery_active) == 0); 6828 mddev->curr_resync_completed = 6829 mddev->curr_resync; 6830 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6831 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6832 } 6833 6834 while (j >= mddev->resync_max && !kthread_should_stop()) { 6835 /* As this condition is controlled by user-space, 6836 * we can block indefinitely, so use '_interruptible' 6837 * to avoid triggering warnings. 6838 */ 6839 flush_signals(current); /* just in case */ 6840 wait_event_interruptible(mddev->recovery_wait, 6841 mddev->resync_max > j 6842 || kthread_should_stop()); 6843 } 6844 6845 if (kthread_should_stop()) 6846 goto interrupted; 6847 6848 sectors = mddev->pers->sync_request(mddev, j, &skipped, 6849 currspeed < speed_min(mddev)); 6850 if (sectors == 0) { 6851 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6852 goto out; 6853 } 6854 6855 if (!skipped) { /* actual IO requested */ 6856 io_sectors += sectors; 6857 atomic_add(sectors, &mddev->recovery_active); 6858 } 6859 6860 j += sectors; 6861 if (j>1) mddev->curr_resync = j; 6862 mddev->curr_mark_cnt = io_sectors; 6863 if (last_check == 0) 6864 /* this is the earliers that rebuilt will be 6865 * visible in /proc/mdstat 6866 */ 6867 md_new_event(mddev); 6868 6869 if (last_check + window > io_sectors || j == max_sectors) 6870 continue; 6871 6872 last_check = io_sectors; 6873 6874 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6875 break; 6876 6877 repeat: 6878 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 6879 /* step marks */ 6880 int next = (last_mark+1) % SYNC_MARKS; 6881 6882 mddev->resync_mark = mark[next]; 6883 mddev->resync_mark_cnt = mark_cnt[next]; 6884 mark[next] = jiffies; 6885 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 6886 last_mark = next; 6887 } 6888 6889 6890 if (kthread_should_stop()) 6891 goto interrupted; 6892 6893 6894 /* 6895 * this loop exits only if either when we are slower than 6896 * the 'hard' speed limit, or the system was IO-idle for 6897 * a jiffy. 6898 * the system might be non-idle CPU-wise, but we only care 6899 * about not overloading the IO subsystem. (things like an 6900 * e2fsck being done on the RAID array should execute fast) 6901 */ 6902 md_unplug(mddev); 6903 cond_resched(); 6904 6905 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 6906 /((jiffies-mddev->resync_mark)/HZ +1) +1; 6907 6908 if (currspeed > speed_min(mddev)) { 6909 if ((currspeed > speed_max(mddev)) || 6910 !is_mddev_idle(mddev, 0)) { 6911 msleep(500); 6912 goto repeat; 6913 } 6914 } 6915 } 6916 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); 6917 /* 6918 * this also signals 'finished resyncing' to md_stop 6919 */ 6920 out: 6921 md_unplug(mddev); 6922 6923 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 6924 6925 /* tell personality that we are finished */ 6926 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 6927 6928 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 6929 mddev->curr_resync > 2) { 6930 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6931 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 6932 if (mddev->curr_resync >= mddev->recovery_cp) { 6933 printk(KERN_INFO 6934 "md: checkpointing %s of %s.\n", 6935 desc, mdname(mddev)); 6936 mddev->recovery_cp = mddev->curr_resync; 6937 } 6938 } else 6939 mddev->recovery_cp = MaxSector; 6940 } else { 6941 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6942 mddev->curr_resync = MaxSector; 6943 rcu_read_lock(); 6944 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 6945 if (rdev->raid_disk >= 0 && 6946 mddev->delta_disks >= 0 && 6947 !test_bit(Faulty, &rdev->flags) && 6948 !test_bit(In_sync, &rdev->flags) && 6949 rdev->recovery_offset < mddev->curr_resync) 6950 rdev->recovery_offset = mddev->curr_resync; 6951 rcu_read_unlock(); 6952 } 6953 } 6954 set_bit(MD_CHANGE_DEVS, &mddev->flags); 6955 6956 skip: 6957 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 6958 /* We completed so min/max setting can be forgotten if used. */ 6959 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 6960 mddev->resync_min = 0; 6961 mddev->resync_max = MaxSector; 6962 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 6963 mddev->resync_min = mddev->curr_resync_completed; 6964 mddev->curr_resync = 0; 6965 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6966 mddev->curr_resync_completed = 0; 6967 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6968 wake_up(&resync_wait); 6969 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 6970 md_wakeup_thread(mddev->thread); 6971 return; 6972 6973 interrupted: 6974 /* 6975 * got a signal, exit. 6976 */ 6977 printk(KERN_INFO 6978 "md: md_do_sync() got signal ... exiting\n"); 6979 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6980 goto out; 6981 6982 } 6983 EXPORT_SYMBOL_GPL(md_do_sync); 6984 6985 6986 static int remove_and_add_spares(mddev_t *mddev) 6987 { 6988 mdk_rdev_t *rdev; 6989 int spares = 0; 6990 6991 mddev->curr_resync_completed = 0; 6992 6993 list_for_each_entry(rdev, &mddev->disks, same_set) 6994 if (rdev->raid_disk >= 0 && 6995 !test_bit(Blocked, &rdev->flags) && 6996 (test_bit(Faulty, &rdev->flags) || 6997 ! test_bit(In_sync, &rdev->flags)) && 6998 atomic_read(&rdev->nr_pending)==0) { 6999 if (mddev->pers->hot_remove_disk( 7000 mddev, rdev->raid_disk)==0) { 7001 char nm[20]; 7002 sprintf(nm,"rd%d", rdev->raid_disk); 7003 sysfs_remove_link(&mddev->kobj, nm); 7004 rdev->raid_disk = -1; 7005 } 7006 } 7007 7008 if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) { 7009 list_for_each_entry(rdev, &mddev->disks, same_set) { 7010 if (rdev->raid_disk >= 0 && 7011 !test_bit(In_sync, &rdev->flags) && 7012 !test_bit(Blocked, &rdev->flags)) 7013 spares++; 7014 if (rdev->raid_disk < 0 7015 && !test_bit(Faulty, &rdev->flags)) { 7016 rdev->recovery_offset = 0; 7017 if (mddev->pers-> 7018 hot_add_disk(mddev, rdev) == 0) { 7019 char nm[20]; 7020 sprintf(nm, "rd%d", rdev->raid_disk); 7021 if (sysfs_create_link(&mddev->kobj, 7022 &rdev->kobj, nm)) 7023 /* failure here is OK */; 7024 spares++; 7025 md_new_event(mddev); 7026 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7027 } else 7028 break; 7029 } 7030 } 7031 } 7032 return spares; 7033 } 7034 /* 7035 * This routine is regularly called by all per-raid-array threads to 7036 * deal with generic issues like resync and super-block update. 7037 * Raid personalities that don't have a thread (linear/raid0) do not 7038 * need this as they never do any recovery or update the superblock. 7039 * 7040 * It does not do any resync itself, but rather "forks" off other threads 7041 * to do that as needed. 7042 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 7043 * "->recovery" and create a thread at ->sync_thread. 7044 * When the thread finishes it sets MD_RECOVERY_DONE 7045 * and wakeups up this thread which will reap the thread and finish up. 7046 * This thread also removes any faulty devices (with nr_pending == 0). 7047 * 7048 * The overall approach is: 7049 * 1/ if the superblock needs updating, update it. 7050 * 2/ If a recovery thread is running, don't do anything else. 7051 * 3/ If recovery has finished, clean up, possibly marking spares active. 7052 * 4/ If there are any faulty devices, remove them. 7053 * 5/ If array is degraded, try to add spares devices 7054 * 6/ If array has spares or is not in-sync, start a resync thread. 7055 */ 7056 void md_check_recovery(mddev_t *mddev) 7057 { 7058 mdk_rdev_t *rdev; 7059 7060 7061 if (mddev->bitmap) 7062 bitmap_daemon_work(mddev); 7063 7064 if (mddev->ro) 7065 return; 7066 7067 if (signal_pending(current)) { 7068 if (mddev->pers->sync_request && !mddev->external) { 7069 printk(KERN_INFO "md: %s in immediate safe mode\n", 7070 mdname(mddev)); 7071 mddev->safemode = 2; 7072 } 7073 flush_signals(current); 7074 } 7075 7076 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 7077 return; 7078 if ( ! ( 7079 (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) || 7080 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 7081 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 7082 (mddev->external == 0 && mddev->safemode == 1) || 7083 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 7084 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 7085 )) 7086 return; 7087 7088 if (mddev_trylock(mddev)) { 7089 int spares = 0; 7090 7091 if (mddev->ro) { 7092 /* Only thing we do on a ro array is remove 7093 * failed devices. 7094 */ 7095 remove_and_add_spares(mddev); 7096 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7097 goto unlock; 7098 } 7099 7100 if (!mddev->external) { 7101 int did_change = 0; 7102 spin_lock_irq(&mddev->write_lock); 7103 if (mddev->safemode && 7104 !atomic_read(&mddev->writes_pending) && 7105 !mddev->in_sync && 7106 mddev->recovery_cp == MaxSector) { 7107 mddev->in_sync = 1; 7108 did_change = 1; 7109 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7110 } 7111 if (mddev->safemode == 1) 7112 mddev->safemode = 0; 7113 spin_unlock_irq(&mddev->write_lock); 7114 if (did_change) 7115 sysfs_notify_dirent_safe(mddev->sysfs_state); 7116 } 7117 7118 if (mddev->flags) 7119 md_update_sb(mddev, 0); 7120 7121 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 7122 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 7123 /* resync/recovery still happening */ 7124 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7125 goto unlock; 7126 } 7127 if (mddev->sync_thread) { 7128 /* resync has finished, collect result */ 7129 md_unregister_thread(mddev->sync_thread); 7130 mddev->sync_thread = NULL; 7131 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7132 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7133 /* success...*/ 7134 /* activate any spares */ 7135 if (mddev->pers->spare_active(mddev)) 7136 sysfs_notify(&mddev->kobj, NULL, 7137 "degraded"); 7138 } 7139 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 7140 mddev->pers->finish_reshape) 7141 mddev->pers->finish_reshape(mddev); 7142 md_update_sb(mddev, 1); 7143 7144 /* if array is no-longer degraded, then any saved_raid_disk 7145 * information must be scrapped 7146 */ 7147 if (!mddev->degraded) 7148 list_for_each_entry(rdev, &mddev->disks, same_set) 7149 rdev->saved_raid_disk = -1; 7150 7151 mddev->recovery = 0; 7152 /* flag recovery needed just to double check */ 7153 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7154 sysfs_notify_dirent_safe(mddev->sysfs_action); 7155 md_new_event(mddev); 7156 goto unlock; 7157 } 7158 /* Set RUNNING before clearing NEEDED to avoid 7159 * any transients in the value of "sync_action". 7160 */ 7161 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7162 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7163 /* Clear some bits that don't mean anything, but 7164 * might be left set 7165 */ 7166 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 7167 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 7168 7169 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 7170 goto unlock; 7171 /* no recovery is running. 7172 * remove any failed drives, then 7173 * add spares if possible. 7174 * Spare are also removed and re-added, to allow 7175 * the personality to fail the re-add. 7176 */ 7177 7178 if (mddev->reshape_position != MaxSector) { 7179 if (mddev->pers->check_reshape == NULL || 7180 mddev->pers->check_reshape(mddev) != 0) 7181 /* Cannot proceed */ 7182 goto unlock; 7183 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7184 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7185 } else if ((spares = remove_and_add_spares(mddev))) { 7186 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7187 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7188 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7189 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7190 } else if (mddev->recovery_cp < MaxSector) { 7191 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7192 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7193 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 7194 /* nothing to be done ... */ 7195 goto unlock; 7196 7197 if (mddev->pers->sync_request) { 7198 if (spares && mddev->bitmap && ! mddev->bitmap->file) { 7199 /* We are adding a device or devices to an array 7200 * which has the bitmap stored on all devices. 7201 * So make sure all bitmap pages get written 7202 */ 7203 bitmap_write_all(mddev->bitmap); 7204 } 7205 mddev->sync_thread = md_register_thread(md_do_sync, 7206 mddev, 7207 "resync"); 7208 if (!mddev->sync_thread) { 7209 printk(KERN_ERR "%s: could not start resync" 7210 " thread...\n", 7211 mdname(mddev)); 7212 /* leave the spares where they are, it shouldn't hurt */ 7213 mddev->recovery = 0; 7214 } else 7215 md_wakeup_thread(mddev->sync_thread); 7216 sysfs_notify_dirent_safe(mddev->sysfs_action); 7217 md_new_event(mddev); 7218 } 7219 unlock: 7220 if (!mddev->sync_thread) { 7221 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7222 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 7223 &mddev->recovery)) 7224 if (mddev->sysfs_action) 7225 sysfs_notify_dirent_safe(mddev->sysfs_action); 7226 } 7227 mddev_unlock(mddev); 7228 } 7229 } 7230 7231 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) 7232 { 7233 sysfs_notify_dirent_safe(rdev->sysfs_state); 7234 wait_event_timeout(rdev->blocked_wait, 7235 !test_bit(Blocked, &rdev->flags), 7236 msecs_to_jiffies(5000)); 7237 rdev_dec_pending(rdev, mddev); 7238 } 7239 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 7240 7241 static int md_notify_reboot(struct notifier_block *this, 7242 unsigned long code, void *x) 7243 { 7244 struct list_head *tmp; 7245 mddev_t *mddev; 7246 7247 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { 7248 7249 printk(KERN_INFO "md: stopping all md devices.\n"); 7250 7251 for_each_mddev(mddev, tmp) 7252 if (mddev_trylock(mddev)) { 7253 /* Force a switch to readonly even array 7254 * appears to still be in use. Hence 7255 * the '100'. 7256 */ 7257 md_set_readonly(mddev, 100); 7258 mddev_unlock(mddev); 7259 } 7260 /* 7261 * certain more exotic SCSI devices are known to be 7262 * volatile wrt too early system reboots. While the 7263 * right place to handle this issue is the given 7264 * driver, we do want to have a safe RAID driver ... 7265 */ 7266 mdelay(1000*1); 7267 } 7268 return NOTIFY_DONE; 7269 } 7270 7271 static struct notifier_block md_notifier = { 7272 .notifier_call = md_notify_reboot, 7273 .next = NULL, 7274 .priority = INT_MAX, /* before any real devices */ 7275 }; 7276 7277 static void md_geninit(void) 7278 { 7279 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 7280 7281 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); 7282 } 7283 7284 static int __init md_init(void) 7285 { 7286 int ret = -ENOMEM; 7287 7288 md_wq = alloc_workqueue("md", WQ_RESCUER, 0); 7289 if (!md_wq) 7290 goto err_wq; 7291 7292 md_misc_wq = alloc_workqueue("md_misc", 0, 0); 7293 if (!md_misc_wq) 7294 goto err_misc_wq; 7295 7296 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0) 7297 goto err_md; 7298 7299 if ((ret = register_blkdev(0, "mdp")) < 0) 7300 goto err_mdp; 7301 mdp_major = ret; 7302 7303 blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE, 7304 md_probe, NULL, NULL); 7305 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 7306 md_probe, NULL, NULL); 7307 7308 register_reboot_notifier(&md_notifier); 7309 raid_table_header = register_sysctl_table(raid_root_table); 7310 7311 md_geninit(); 7312 return 0; 7313 7314 err_mdp: 7315 unregister_blkdev(MD_MAJOR, "md"); 7316 err_md: 7317 destroy_workqueue(md_misc_wq); 7318 err_misc_wq: 7319 destroy_workqueue(md_wq); 7320 err_wq: 7321 return ret; 7322 } 7323 7324 #ifndef MODULE 7325 7326 /* 7327 * Searches all registered partitions for autorun RAID arrays 7328 * at boot time. 7329 */ 7330 7331 static LIST_HEAD(all_detected_devices); 7332 struct detected_devices_node { 7333 struct list_head list; 7334 dev_t dev; 7335 }; 7336 7337 void md_autodetect_dev(dev_t dev) 7338 { 7339 struct detected_devices_node *node_detected_dev; 7340 7341 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 7342 if (node_detected_dev) { 7343 node_detected_dev->dev = dev; 7344 list_add_tail(&node_detected_dev->list, &all_detected_devices); 7345 } else { 7346 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed" 7347 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev)); 7348 } 7349 } 7350 7351 7352 static void autostart_arrays(int part) 7353 { 7354 mdk_rdev_t *rdev; 7355 struct detected_devices_node *node_detected_dev; 7356 dev_t dev; 7357 int i_scanned, i_passed; 7358 7359 i_scanned = 0; 7360 i_passed = 0; 7361 7362 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 7363 7364 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 7365 i_scanned++; 7366 node_detected_dev = list_entry(all_detected_devices.next, 7367 struct detected_devices_node, list); 7368 list_del(&node_detected_dev->list); 7369 dev = node_detected_dev->dev; 7370 kfree(node_detected_dev); 7371 rdev = md_import_device(dev,0, 90); 7372 if (IS_ERR(rdev)) 7373 continue; 7374 7375 if (test_bit(Faulty, &rdev->flags)) { 7376 MD_BUG(); 7377 continue; 7378 } 7379 set_bit(AutoDetected, &rdev->flags); 7380 list_add(&rdev->same_set, &pending_raid_disks); 7381 i_passed++; 7382 } 7383 7384 printk(KERN_INFO "md: Scanned %d and added %d devices.\n", 7385 i_scanned, i_passed); 7386 7387 autorun_devices(part); 7388 } 7389 7390 #endif /* !MODULE */ 7391 7392 static __exit void md_exit(void) 7393 { 7394 mddev_t *mddev; 7395 struct list_head *tmp; 7396 7397 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS); 7398 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 7399 7400 unregister_blkdev(MD_MAJOR,"md"); 7401 unregister_blkdev(mdp_major, "mdp"); 7402 unregister_reboot_notifier(&md_notifier); 7403 unregister_sysctl_table(raid_table_header); 7404 remove_proc_entry("mdstat", NULL); 7405 for_each_mddev(mddev, tmp) { 7406 export_array(mddev); 7407 mddev->hold_active = 0; 7408 } 7409 destroy_workqueue(md_misc_wq); 7410 destroy_workqueue(md_wq); 7411 } 7412 7413 subsys_initcall(md_init); 7414 module_exit(md_exit) 7415 7416 static int get_ro(char *buffer, struct kernel_param *kp) 7417 { 7418 return sprintf(buffer, "%d", start_readonly); 7419 } 7420 static int set_ro(const char *val, struct kernel_param *kp) 7421 { 7422 char *e; 7423 int num = simple_strtoul(val, &e, 10); 7424 if (*val && (*e == '\0' || *e == '\n')) { 7425 start_readonly = num; 7426 return 0; 7427 } 7428 return -EINVAL; 7429 } 7430 7431 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 7432 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 7433 7434 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 7435 7436 EXPORT_SYMBOL(register_md_personality); 7437 EXPORT_SYMBOL(unregister_md_personality); 7438 EXPORT_SYMBOL(md_error); 7439 EXPORT_SYMBOL(md_done_sync); 7440 EXPORT_SYMBOL(md_write_start); 7441 EXPORT_SYMBOL(md_write_end); 7442 EXPORT_SYMBOL(md_register_thread); 7443 EXPORT_SYMBOL(md_unregister_thread); 7444 EXPORT_SYMBOL(md_wakeup_thread); 7445 EXPORT_SYMBOL(md_check_recovery); 7446 MODULE_LICENSE("GPL"); 7447 MODULE_DESCRIPTION("MD RAID framework"); 7448 MODULE_ALIAS("md"); 7449 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 7450