1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 #include <linux/kthread.h> 36 #include <linux/blkdev.h> 37 #include <linux/sysctl.h> 38 #include <linux/seq_file.h> 39 #include <linux/mutex.h> 40 #include <linux/buffer_head.h> /* for invalidate_bdev */ 41 #include <linux/poll.h> 42 #include <linux/ctype.h> 43 #include <linux/string.h> 44 #include <linux/hdreg.h> 45 #include <linux/proc_fs.h> 46 #include <linux/random.h> 47 #include <linux/reboot.h> 48 #include <linux/file.h> 49 #include <linux/compat.h> 50 #include <linux/delay.h> 51 #include <linux/raid/md_p.h> 52 #include <linux/raid/md_u.h> 53 #include <linux/slab.h> 54 #include "md.h" 55 #include "bitmap.h" 56 57 #define DEBUG 0 58 #define dprintk(x...) ((void)(DEBUG && printk(x))) 59 60 #ifndef MODULE 61 static void autostart_arrays(int part); 62 #endif 63 64 static LIST_HEAD(pers_list); 65 static DEFINE_SPINLOCK(pers_lock); 66 67 static void md_print_devices(void); 68 69 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 70 static struct workqueue_struct *md_wq; 71 static struct workqueue_struct *md_misc_wq; 72 73 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } 74 75 /* 76 * Default number of read corrections we'll attempt on an rdev 77 * before ejecting it from the array. We divide the read error 78 * count by 2 for every hour elapsed between read errors. 79 */ 80 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 81 /* 82 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 83 * is 1000 KB/sec, so the extra system load does not show up that much. 84 * Increase it if you want to have more _guaranteed_ speed. Note that 85 * the RAID driver will use the maximum available bandwidth if the IO 86 * subsystem is idle. There is also an 'absolute maximum' reconstruction 87 * speed limit - in case reconstruction slows down your system despite 88 * idle IO detection. 89 * 90 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 91 * or /sys/block/mdX/md/sync_speed_{min,max} 92 */ 93 94 static int sysctl_speed_limit_min = 1000; 95 static int sysctl_speed_limit_max = 200000; 96 static inline int speed_min(mddev_t *mddev) 97 { 98 return mddev->sync_speed_min ? 99 mddev->sync_speed_min : sysctl_speed_limit_min; 100 } 101 102 static inline int speed_max(mddev_t *mddev) 103 { 104 return mddev->sync_speed_max ? 105 mddev->sync_speed_max : sysctl_speed_limit_max; 106 } 107 108 static struct ctl_table_header *raid_table_header; 109 110 static ctl_table raid_table[] = { 111 { 112 .procname = "speed_limit_min", 113 .data = &sysctl_speed_limit_min, 114 .maxlen = sizeof(int), 115 .mode = S_IRUGO|S_IWUSR, 116 .proc_handler = proc_dointvec, 117 }, 118 { 119 .procname = "speed_limit_max", 120 .data = &sysctl_speed_limit_max, 121 .maxlen = sizeof(int), 122 .mode = S_IRUGO|S_IWUSR, 123 .proc_handler = proc_dointvec, 124 }, 125 { } 126 }; 127 128 static ctl_table raid_dir_table[] = { 129 { 130 .procname = "raid", 131 .maxlen = 0, 132 .mode = S_IRUGO|S_IXUGO, 133 .child = raid_table, 134 }, 135 { } 136 }; 137 138 static ctl_table raid_root_table[] = { 139 { 140 .procname = "dev", 141 .maxlen = 0, 142 .mode = 0555, 143 .child = raid_dir_table, 144 }, 145 { } 146 }; 147 148 static const struct block_device_operations md_fops; 149 150 static int start_readonly; 151 152 /* bio_clone_mddev 153 * like bio_clone, but with a local bio set 154 */ 155 156 static void mddev_bio_destructor(struct bio *bio) 157 { 158 mddev_t *mddev, **mddevp; 159 160 mddevp = (void*)bio; 161 mddev = mddevp[-1]; 162 163 bio_free(bio, mddev->bio_set); 164 } 165 166 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 167 mddev_t *mddev) 168 { 169 struct bio *b; 170 mddev_t **mddevp; 171 172 if (!mddev || !mddev->bio_set) 173 return bio_alloc(gfp_mask, nr_iovecs); 174 175 b = bio_alloc_bioset(gfp_mask, nr_iovecs, 176 mddev->bio_set); 177 if (!b) 178 return NULL; 179 mddevp = (void*)b; 180 mddevp[-1] = mddev; 181 b->bi_destructor = mddev_bio_destructor; 182 return b; 183 } 184 EXPORT_SYMBOL_GPL(bio_alloc_mddev); 185 186 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, 187 mddev_t *mddev) 188 { 189 struct bio *b; 190 mddev_t **mddevp; 191 192 if (!mddev || !mddev->bio_set) 193 return bio_clone(bio, gfp_mask); 194 195 b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, 196 mddev->bio_set); 197 if (!b) 198 return NULL; 199 mddevp = (void*)b; 200 mddevp[-1] = mddev; 201 b->bi_destructor = mddev_bio_destructor; 202 __bio_clone(b, bio); 203 if (bio_integrity(bio)) { 204 int ret; 205 206 ret = bio_integrity_clone(b, bio, gfp_mask, mddev->bio_set); 207 208 if (ret < 0) { 209 bio_put(b); 210 return NULL; 211 } 212 } 213 214 return b; 215 } 216 EXPORT_SYMBOL_GPL(bio_clone_mddev); 217 218 /* 219 * We have a system wide 'event count' that is incremented 220 * on any 'interesting' event, and readers of /proc/mdstat 221 * can use 'poll' or 'select' to find out when the event 222 * count increases. 223 * 224 * Events are: 225 * start array, stop array, error, add device, remove device, 226 * start build, activate spare 227 */ 228 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 229 static atomic_t md_event_count; 230 void md_new_event(mddev_t *mddev) 231 { 232 atomic_inc(&md_event_count); 233 wake_up(&md_event_waiters); 234 } 235 EXPORT_SYMBOL_GPL(md_new_event); 236 237 /* Alternate version that can be called from interrupts 238 * when calling sysfs_notify isn't needed. 239 */ 240 static void md_new_event_inintr(mddev_t *mddev) 241 { 242 atomic_inc(&md_event_count); 243 wake_up(&md_event_waiters); 244 } 245 246 /* 247 * Enables to iterate over all existing md arrays 248 * all_mddevs_lock protects this list. 249 */ 250 static LIST_HEAD(all_mddevs); 251 static DEFINE_SPINLOCK(all_mddevs_lock); 252 253 254 /* 255 * iterates through all used mddevs in the system. 256 * We take care to grab the all_mddevs_lock whenever navigating 257 * the list, and to always hold a refcount when unlocked. 258 * Any code which breaks out of this loop while own 259 * a reference to the current mddev and must mddev_put it. 260 */ 261 #define for_each_mddev(mddev,tmp) \ 262 \ 263 for (({ spin_lock(&all_mddevs_lock); \ 264 tmp = all_mddevs.next; \ 265 mddev = NULL;}); \ 266 ({ if (tmp != &all_mddevs) \ 267 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\ 268 spin_unlock(&all_mddevs_lock); \ 269 if (mddev) mddev_put(mddev); \ 270 mddev = list_entry(tmp, mddev_t, all_mddevs); \ 271 tmp != &all_mddevs;}); \ 272 ({ spin_lock(&all_mddevs_lock); \ 273 tmp = tmp->next;}) \ 274 ) 275 276 277 /* Rather than calling directly into the personality make_request function, 278 * IO requests come here first so that we can check if the device is 279 * being suspended pending a reconfiguration. 280 * We hold a refcount over the call to ->make_request. By the time that 281 * call has finished, the bio has been linked into some internal structure 282 * and so is visible to ->quiesce(), so we don't need the refcount any more. 283 */ 284 static int md_make_request(struct request_queue *q, struct bio *bio) 285 { 286 const int rw = bio_data_dir(bio); 287 mddev_t *mddev = q->queuedata; 288 int rv; 289 int cpu; 290 unsigned int sectors; 291 292 if (mddev == NULL || mddev->pers == NULL 293 || !mddev->ready) { 294 bio_io_error(bio); 295 return 0; 296 } 297 smp_rmb(); /* Ensure implications of 'active' are visible */ 298 rcu_read_lock(); 299 if (mddev->suspended) { 300 DEFINE_WAIT(__wait); 301 for (;;) { 302 prepare_to_wait(&mddev->sb_wait, &__wait, 303 TASK_UNINTERRUPTIBLE); 304 if (!mddev->suspended) 305 break; 306 rcu_read_unlock(); 307 schedule(); 308 rcu_read_lock(); 309 } 310 finish_wait(&mddev->sb_wait, &__wait); 311 } 312 atomic_inc(&mddev->active_io); 313 rcu_read_unlock(); 314 315 /* 316 * save the sectors now since our bio can 317 * go away inside make_request 318 */ 319 sectors = bio_sectors(bio); 320 rv = mddev->pers->make_request(mddev, bio); 321 322 cpu = part_stat_lock(); 323 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 324 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); 325 part_stat_unlock(); 326 327 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 328 wake_up(&mddev->sb_wait); 329 330 return rv; 331 } 332 333 /* mddev_suspend makes sure no new requests are submitted 334 * to the device, and that any requests that have been submitted 335 * are completely handled. 336 * Once ->stop is called and completes, the module will be completely 337 * unused. 338 */ 339 void mddev_suspend(mddev_t *mddev) 340 { 341 BUG_ON(mddev->suspended); 342 mddev->suspended = 1; 343 synchronize_rcu(); 344 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 345 mddev->pers->quiesce(mddev, 1); 346 } 347 EXPORT_SYMBOL_GPL(mddev_suspend); 348 349 void mddev_resume(mddev_t *mddev) 350 { 351 mddev->suspended = 0; 352 wake_up(&mddev->sb_wait); 353 mddev->pers->quiesce(mddev, 0); 354 } 355 EXPORT_SYMBOL_GPL(mddev_resume); 356 357 int mddev_congested(mddev_t *mddev, int bits) 358 { 359 return mddev->suspended; 360 } 361 EXPORT_SYMBOL(mddev_congested); 362 363 /* 364 * Generic flush handling for md 365 */ 366 367 static void md_end_flush(struct bio *bio, int err) 368 { 369 mdk_rdev_t *rdev = bio->bi_private; 370 mddev_t *mddev = rdev->mddev; 371 372 rdev_dec_pending(rdev, mddev); 373 374 if (atomic_dec_and_test(&mddev->flush_pending)) { 375 /* The pre-request flush has finished */ 376 queue_work(md_wq, &mddev->flush_work); 377 } 378 bio_put(bio); 379 } 380 381 static void md_submit_flush_data(struct work_struct *ws); 382 383 static void submit_flushes(struct work_struct *ws) 384 { 385 mddev_t *mddev = container_of(ws, mddev_t, flush_work); 386 mdk_rdev_t *rdev; 387 388 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 389 atomic_set(&mddev->flush_pending, 1); 390 rcu_read_lock(); 391 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 392 if (rdev->raid_disk >= 0 && 393 !test_bit(Faulty, &rdev->flags)) { 394 /* Take two references, one is dropped 395 * when request finishes, one after 396 * we reclaim rcu_read_lock 397 */ 398 struct bio *bi; 399 atomic_inc(&rdev->nr_pending); 400 atomic_inc(&rdev->nr_pending); 401 rcu_read_unlock(); 402 bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev); 403 bi->bi_end_io = md_end_flush; 404 bi->bi_private = rdev; 405 bi->bi_bdev = rdev->bdev; 406 atomic_inc(&mddev->flush_pending); 407 submit_bio(WRITE_FLUSH, bi); 408 rcu_read_lock(); 409 rdev_dec_pending(rdev, mddev); 410 } 411 rcu_read_unlock(); 412 if (atomic_dec_and_test(&mddev->flush_pending)) 413 queue_work(md_wq, &mddev->flush_work); 414 } 415 416 static void md_submit_flush_data(struct work_struct *ws) 417 { 418 mddev_t *mddev = container_of(ws, mddev_t, flush_work); 419 struct bio *bio = mddev->flush_bio; 420 421 if (bio->bi_size == 0) 422 /* an empty barrier - all done */ 423 bio_endio(bio, 0); 424 else { 425 bio->bi_rw &= ~REQ_FLUSH; 426 if (mddev->pers->make_request(mddev, bio)) 427 generic_make_request(bio); 428 } 429 430 mddev->flush_bio = NULL; 431 wake_up(&mddev->sb_wait); 432 } 433 434 void md_flush_request(mddev_t *mddev, struct bio *bio) 435 { 436 spin_lock_irq(&mddev->write_lock); 437 wait_event_lock_irq(mddev->sb_wait, 438 !mddev->flush_bio, 439 mddev->write_lock, /*nothing*/); 440 mddev->flush_bio = bio; 441 spin_unlock_irq(&mddev->write_lock); 442 443 INIT_WORK(&mddev->flush_work, submit_flushes); 444 queue_work(md_wq, &mddev->flush_work); 445 } 446 EXPORT_SYMBOL(md_flush_request); 447 448 /* Support for plugging. 449 * This mirrors the plugging support in request_queue, but does not 450 * require having a whole queue or request structures. 451 * We allocate an md_plug_cb for each md device and each thread it gets 452 * plugged on. This links tot the private plug_handle structure in the 453 * personality data where we keep a count of the number of outstanding 454 * plugs so other code can see if a plug is active. 455 */ 456 struct md_plug_cb { 457 struct blk_plug_cb cb; 458 mddev_t *mddev; 459 }; 460 461 static void plugger_unplug(struct blk_plug_cb *cb) 462 { 463 struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); 464 if (atomic_dec_and_test(&mdcb->mddev->plug_cnt)) 465 md_wakeup_thread(mdcb->mddev->thread); 466 kfree(mdcb); 467 } 468 469 /* Check that an unplug wakeup will come shortly. 470 * If not, wakeup the md thread immediately 471 */ 472 int mddev_check_plugged(mddev_t *mddev) 473 { 474 struct blk_plug *plug = current->plug; 475 struct md_plug_cb *mdcb; 476 477 if (!plug) 478 return 0; 479 480 list_for_each_entry(mdcb, &plug->cb_list, cb.list) { 481 if (mdcb->cb.callback == plugger_unplug && 482 mdcb->mddev == mddev) { 483 /* Already on the list, move to top */ 484 if (mdcb != list_first_entry(&plug->cb_list, 485 struct md_plug_cb, 486 cb.list)) 487 list_move(&mdcb->cb.list, &plug->cb_list); 488 return 1; 489 } 490 } 491 /* Not currently on the callback list */ 492 mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC); 493 if (!mdcb) 494 return 0; 495 496 mdcb->mddev = mddev; 497 mdcb->cb.callback = plugger_unplug; 498 atomic_inc(&mddev->plug_cnt); 499 list_add(&mdcb->cb.list, &plug->cb_list); 500 return 1; 501 } 502 EXPORT_SYMBOL_GPL(mddev_check_plugged); 503 504 static inline mddev_t *mddev_get(mddev_t *mddev) 505 { 506 atomic_inc(&mddev->active); 507 return mddev; 508 } 509 510 static void mddev_delayed_delete(struct work_struct *ws); 511 512 static void mddev_put(mddev_t *mddev) 513 { 514 struct bio_set *bs = NULL; 515 516 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 517 return; 518 if (!mddev->raid_disks && list_empty(&mddev->disks) && 519 mddev->ctime == 0 && !mddev->hold_active) { 520 /* Array is not configured at all, and not held active, 521 * so destroy it */ 522 list_del(&mddev->all_mddevs); 523 bs = mddev->bio_set; 524 mddev->bio_set = NULL; 525 if (mddev->gendisk) { 526 /* We did a probe so need to clean up. Call 527 * queue_work inside the spinlock so that 528 * flush_workqueue() after mddev_find will 529 * succeed in waiting for the work to be done. 530 */ 531 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 532 queue_work(md_misc_wq, &mddev->del_work); 533 } else 534 kfree(mddev); 535 } 536 spin_unlock(&all_mddevs_lock); 537 if (bs) 538 bioset_free(bs); 539 } 540 541 void mddev_init(mddev_t *mddev) 542 { 543 mutex_init(&mddev->open_mutex); 544 mutex_init(&mddev->reconfig_mutex); 545 mutex_init(&mddev->bitmap_info.mutex); 546 INIT_LIST_HEAD(&mddev->disks); 547 INIT_LIST_HEAD(&mddev->all_mddevs); 548 init_timer(&mddev->safemode_timer); 549 atomic_set(&mddev->active, 1); 550 atomic_set(&mddev->openers, 0); 551 atomic_set(&mddev->active_io, 0); 552 atomic_set(&mddev->plug_cnt, 0); 553 spin_lock_init(&mddev->write_lock); 554 atomic_set(&mddev->flush_pending, 0); 555 init_waitqueue_head(&mddev->sb_wait); 556 init_waitqueue_head(&mddev->recovery_wait); 557 mddev->reshape_position = MaxSector; 558 mddev->resync_min = 0; 559 mddev->resync_max = MaxSector; 560 mddev->level = LEVEL_NONE; 561 } 562 EXPORT_SYMBOL_GPL(mddev_init); 563 564 static mddev_t * mddev_find(dev_t unit) 565 { 566 mddev_t *mddev, *new = NULL; 567 568 if (unit && MAJOR(unit) != MD_MAJOR) 569 unit &= ~((1<<MdpMinorShift)-1); 570 571 retry: 572 spin_lock(&all_mddevs_lock); 573 574 if (unit) { 575 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 576 if (mddev->unit == unit) { 577 mddev_get(mddev); 578 spin_unlock(&all_mddevs_lock); 579 kfree(new); 580 return mddev; 581 } 582 583 if (new) { 584 list_add(&new->all_mddevs, &all_mddevs); 585 spin_unlock(&all_mddevs_lock); 586 new->hold_active = UNTIL_IOCTL; 587 return new; 588 } 589 } else if (new) { 590 /* find an unused unit number */ 591 static int next_minor = 512; 592 int start = next_minor; 593 int is_free = 0; 594 int dev = 0; 595 while (!is_free) { 596 dev = MKDEV(MD_MAJOR, next_minor); 597 next_minor++; 598 if (next_minor > MINORMASK) 599 next_minor = 0; 600 if (next_minor == start) { 601 /* Oh dear, all in use. */ 602 spin_unlock(&all_mddevs_lock); 603 kfree(new); 604 return NULL; 605 } 606 607 is_free = 1; 608 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 609 if (mddev->unit == dev) { 610 is_free = 0; 611 break; 612 } 613 } 614 new->unit = dev; 615 new->md_minor = MINOR(dev); 616 new->hold_active = UNTIL_STOP; 617 list_add(&new->all_mddevs, &all_mddevs); 618 spin_unlock(&all_mddevs_lock); 619 return new; 620 } 621 spin_unlock(&all_mddevs_lock); 622 623 new = kzalloc(sizeof(*new), GFP_KERNEL); 624 if (!new) 625 return NULL; 626 627 new->unit = unit; 628 if (MAJOR(unit) == MD_MAJOR) 629 new->md_minor = MINOR(unit); 630 else 631 new->md_minor = MINOR(unit) >> MdpMinorShift; 632 633 mddev_init(new); 634 635 goto retry; 636 } 637 638 static inline int mddev_lock(mddev_t * mddev) 639 { 640 return mutex_lock_interruptible(&mddev->reconfig_mutex); 641 } 642 643 static inline int mddev_is_locked(mddev_t *mddev) 644 { 645 return mutex_is_locked(&mddev->reconfig_mutex); 646 } 647 648 static inline int mddev_trylock(mddev_t * mddev) 649 { 650 return mutex_trylock(&mddev->reconfig_mutex); 651 } 652 653 static struct attribute_group md_redundancy_group; 654 655 static void mddev_unlock(mddev_t * mddev) 656 { 657 if (mddev->to_remove) { 658 /* These cannot be removed under reconfig_mutex as 659 * an access to the files will try to take reconfig_mutex 660 * while holding the file unremovable, which leads to 661 * a deadlock. 662 * So hold set sysfs_active while the remove in happeing, 663 * and anything else which might set ->to_remove or my 664 * otherwise change the sysfs namespace will fail with 665 * -EBUSY if sysfs_active is still set. 666 * We set sysfs_active under reconfig_mutex and elsewhere 667 * test it under the same mutex to ensure its correct value 668 * is seen. 669 */ 670 struct attribute_group *to_remove = mddev->to_remove; 671 mddev->to_remove = NULL; 672 mddev->sysfs_active = 1; 673 mutex_unlock(&mddev->reconfig_mutex); 674 675 if (mddev->kobj.sd) { 676 if (to_remove != &md_redundancy_group) 677 sysfs_remove_group(&mddev->kobj, to_remove); 678 if (mddev->pers == NULL || 679 mddev->pers->sync_request == NULL) { 680 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 681 if (mddev->sysfs_action) 682 sysfs_put(mddev->sysfs_action); 683 mddev->sysfs_action = NULL; 684 } 685 } 686 mddev->sysfs_active = 0; 687 } else 688 mutex_unlock(&mddev->reconfig_mutex); 689 690 md_wakeup_thread(mddev->thread); 691 } 692 693 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 694 { 695 mdk_rdev_t *rdev; 696 697 list_for_each_entry(rdev, &mddev->disks, same_set) 698 if (rdev->desc_nr == nr) 699 return rdev; 700 701 return NULL; 702 } 703 704 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) 705 { 706 mdk_rdev_t *rdev; 707 708 list_for_each_entry(rdev, &mddev->disks, same_set) 709 if (rdev->bdev->bd_dev == dev) 710 return rdev; 711 712 return NULL; 713 } 714 715 static struct mdk_personality *find_pers(int level, char *clevel) 716 { 717 struct mdk_personality *pers; 718 list_for_each_entry(pers, &pers_list, list) { 719 if (level != LEVEL_NONE && pers->level == level) 720 return pers; 721 if (strcmp(pers->name, clevel)==0) 722 return pers; 723 } 724 return NULL; 725 } 726 727 /* return the offset of the super block in 512byte sectors */ 728 static inline sector_t calc_dev_sboffset(mdk_rdev_t *rdev) 729 { 730 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; 731 return MD_NEW_SIZE_SECTORS(num_sectors); 732 } 733 734 static int alloc_disk_sb(mdk_rdev_t * rdev) 735 { 736 if (rdev->sb_page) 737 MD_BUG(); 738 739 rdev->sb_page = alloc_page(GFP_KERNEL); 740 if (!rdev->sb_page) { 741 printk(KERN_ALERT "md: out of memory.\n"); 742 return -ENOMEM; 743 } 744 745 return 0; 746 } 747 748 static void free_disk_sb(mdk_rdev_t * rdev) 749 { 750 if (rdev->sb_page) { 751 put_page(rdev->sb_page); 752 rdev->sb_loaded = 0; 753 rdev->sb_page = NULL; 754 rdev->sb_start = 0; 755 rdev->sectors = 0; 756 } 757 } 758 759 760 static void super_written(struct bio *bio, int error) 761 { 762 mdk_rdev_t *rdev = bio->bi_private; 763 mddev_t *mddev = rdev->mddev; 764 765 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 766 printk("md: super_written gets error=%d, uptodate=%d\n", 767 error, test_bit(BIO_UPTODATE, &bio->bi_flags)); 768 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags)); 769 md_error(mddev, rdev); 770 } 771 772 if (atomic_dec_and_test(&mddev->pending_writes)) 773 wake_up(&mddev->sb_wait); 774 bio_put(bio); 775 } 776 777 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 778 sector_t sector, int size, struct page *page) 779 { 780 /* write first size bytes of page to sector of rdev 781 * Increment mddev->pending_writes before returning 782 * and decrement it on completion, waking up sb_wait 783 * if zero is reached. 784 * If an error occurred, call md_error 785 */ 786 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 787 788 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 789 bio->bi_sector = sector; 790 bio_add_page(bio, page, size, 0); 791 bio->bi_private = rdev; 792 bio->bi_end_io = super_written; 793 794 atomic_inc(&mddev->pending_writes); 795 submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio); 796 } 797 798 void md_super_wait(mddev_t *mddev) 799 { 800 /* wait for all superblock writes that were scheduled to complete */ 801 DEFINE_WAIT(wq); 802 for(;;) { 803 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); 804 if (atomic_read(&mddev->pending_writes)==0) 805 break; 806 schedule(); 807 } 808 finish_wait(&mddev->sb_wait, &wq); 809 } 810 811 static void bi_complete(struct bio *bio, int error) 812 { 813 complete((struct completion*)bio->bi_private); 814 } 815 816 int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, 817 struct page *page, int rw, bool metadata_op) 818 { 819 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); 820 struct completion event; 821 int ret; 822 823 rw |= REQ_SYNC; 824 825 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 826 rdev->meta_bdev : rdev->bdev; 827 if (metadata_op) 828 bio->bi_sector = sector + rdev->sb_start; 829 else 830 bio->bi_sector = sector + rdev->data_offset; 831 bio_add_page(bio, page, size, 0); 832 init_completion(&event); 833 bio->bi_private = &event; 834 bio->bi_end_io = bi_complete; 835 submit_bio(rw, bio); 836 wait_for_completion(&event); 837 838 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 839 bio_put(bio); 840 return ret; 841 } 842 EXPORT_SYMBOL_GPL(sync_page_io); 843 844 static int read_disk_sb(mdk_rdev_t * rdev, int size) 845 { 846 char b[BDEVNAME_SIZE]; 847 if (!rdev->sb_page) { 848 MD_BUG(); 849 return -EINVAL; 850 } 851 if (rdev->sb_loaded) 852 return 0; 853 854 855 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true)) 856 goto fail; 857 rdev->sb_loaded = 1; 858 return 0; 859 860 fail: 861 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 862 bdevname(rdev->bdev,b)); 863 return -EINVAL; 864 } 865 866 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 867 { 868 return sb1->set_uuid0 == sb2->set_uuid0 && 869 sb1->set_uuid1 == sb2->set_uuid1 && 870 sb1->set_uuid2 == sb2->set_uuid2 && 871 sb1->set_uuid3 == sb2->set_uuid3; 872 } 873 874 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 875 { 876 int ret; 877 mdp_super_t *tmp1, *tmp2; 878 879 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 880 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 881 882 if (!tmp1 || !tmp2) { 883 ret = 0; 884 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n"); 885 goto abort; 886 } 887 888 *tmp1 = *sb1; 889 *tmp2 = *sb2; 890 891 /* 892 * nr_disks is not constant 893 */ 894 tmp1->nr_disks = 0; 895 tmp2->nr_disks = 0; 896 897 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 898 abort: 899 kfree(tmp1); 900 kfree(tmp2); 901 return ret; 902 } 903 904 905 static u32 md_csum_fold(u32 csum) 906 { 907 csum = (csum & 0xffff) + (csum >> 16); 908 return (csum & 0xffff) + (csum >> 16); 909 } 910 911 static unsigned int calc_sb_csum(mdp_super_t * sb) 912 { 913 u64 newcsum = 0; 914 u32 *sb32 = (u32*)sb; 915 int i; 916 unsigned int disk_csum, csum; 917 918 disk_csum = sb->sb_csum; 919 sb->sb_csum = 0; 920 921 for (i = 0; i < MD_SB_BYTES/4 ; i++) 922 newcsum += sb32[i]; 923 csum = (newcsum & 0xffffffff) + (newcsum>>32); 924 925 926 #ifdef CONFIG_ALPHA 927 /* This used to use csum_partial, which was wrong for several 928 * reasons including that different results are returned on 929 * different architectures. It isn't critical that we get exactly 930 * the same return value as before (we always csum_fold before 931 * testing, and that removes any differences). However as we 932 * know that csum_partial always returned a 16bit value on 933 * alphas, do a fold to maximise conformity to previous behaviour. 934 */ 935 sb->sb_csum = md_csum_fold(disk_csum); 936 #else 937 sb->sb_csum = disk_csum; 938 #endif 939 return csum; 940 } 941 942 943 /* 944 * Handle superblock details. 945 * We want to be able to handle multiple superblock formats 946 * so we have a common interface to them all, and an array of 947 * different handlers. 948 * We rely on user-space to write the initial superblock, and support 949 * reading and updating of superblocks. 950 * Interface methods are: 951 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version) 952 * loads and validates a superblock on dev. 953 * if refdev != NULL, compare superblocks on both devices 954 * Return: 955 * 0 - dev has a superblock that is compatible with refdev 956 * 1 - dev has a superblock that is compatible and newer than refdev 957 * so dev should be used as the refdev in future 958 * -EINVAL superblock incompatible or invalid 959 * -othererror e.g. -EIO 960 * 961 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev) 962 * Verify that dev is acceptable into mddev. 963 * The first time, mddev->raid_disks will be 0, and data from 964 * dev should be merged in. Subsequent calls check that dev 965 * is new enough. Return 0 or -EINVAL 966 * 967 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev) 968 * Update the superblock for rdev with data in mddev 969 * This does not write to disc. 970 * 971 */ 972 973 struct super_type { 974 char *name; 975 struct module *owner; 976 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, 977 int minor_version); 978 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); 979 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); 980 unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev, 981 sector_t num_sectors); 982 }; 983 984 /* 985 * Check that the given mddev has no bitmap. 986 * 987 * This function is called from the run method of all personalities that do not 988 * support bitmaps. It prints an error message and returns non-zero if mddev 989 * has a bitmap. Otherwise, it returns 0. 990 * 991 */ 992 int md_check_no_bitmap(mddev_t *mddev) 993 { 994 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 995 return 0; 996 printk(KERN_ERR "%s: bitmaps are not supported for %s\n", 997 mdname(mddev), mddev->pers->name); 998 return 1; 999 } 1000 EXPORT_SYMBOL(md_check_no_bitmap); 1001 1002 /* 1003 * load_super for 0.90.0 1004 */ 1005 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 1006 { 1007 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1008 mdp_super_t *sb; 1009 int ret; 1010 1011 /* 1012 * Calculate the position of the superblock (512byte sectors), 1013 * it's at the end of the disk. 1014 * 1015 * It also happens to be a multiple of 4Kb. 1016 */ 1017 rdev->sb_start = calc_dev_sboffset(rdev); 1018 1019 ret = read_disk_sb(rdev, MD_SB_BYTES); 1020 if (ret) return ret; 1021 1022 ret = -EINVAL; 1023 1024 bdevname(rdev->bdev, b); 1025 sb = (mdp_super_t*)page_address(rdev->sb_page); 1026 1027 if (sb->md_magic != MD_SB_MAGIC) { 1028 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 1029 b); 1030 goto abort; 1031 } 1032 1033 if (sb->major_version != 0 || 1034 sb->minor_version < 90 || 1035 sb->minor_version > 91) { 1036 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 1037 sb->major_version, sb->minor_version, 1038 b); 1039 goto abort; 1040 } 1041 1042 if (sb->raid_disks <= 0) 1043 goto abort; 1044 1045 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 1046 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 1047 b); 1048 goto abort; 1049 } 1050 1051 rdev->preferred_minor = sb->md_minor; 1052 rdev->data_offset = 0; 1053 rdev->sb_size = MD_SB_BYTES; 1054 1055 if (sb->level == LEVEL_MULTIPATH) 1056 rdev->desc_nr = -1; 1057 else 1058 rdev->desc_nr = sb->this_disk.number; 1059 1060 if (!refdev) { 1061 ret = 1; 1062 } else { 1063 __u64 ev1, ev2; 1064 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page); 1065 if (!uuid_equal(refsb, sb)) { 1066 printk(KERN_WARNING "md: %s has different UUID to %s\n", 1067 b, bdevname(refdev->bdev,b2)); 1068 goto abort; 1069 } 1070 if (!sb_equal(refsb, sb)) { 1071 printk(KERN_WARNING "md: %s has same UUID" 1072 " but different superblock to %s\n", 1073 b, bdevname(refdev->bdev, b2)); 1074 goto abort; 1075 } 1076 ev1 = md_event(sb); 1077 ev2 = md_event(refsb); 1078 if (ev1 > ev2) 1079 ret = 1; 1080 else 1081 ret = 0; 1082 } 1083 rdev->sectors = rdev->sb_start; 1084 1085 if (rdev->sectors < sb->size * 2 && sb->level > 1) 1086 /* "this cannot possibly happen" ... */ 1087 ret = -EINVAL; 1088 1089 abort: 1090 return ret; 1091 } 1092 1093 /* 1094 * validate_super for 0.90.0 1095 */ 1096 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1097 { 1098 mdp_disk_t *desc; 1099 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); 1100 __u64 ev1 = md_event(sb); 1101 1102 rdev->raid_disk = -1; 1103 clear_bit(Faulty, &rdev->flags); 1104 clear_bit(In_sync, &rdev->flags); 1105 clear_bit(WriteMostly, &rdev->flags); 1106 1107 if (mddev->raid_disks == 0) { 1108 mddev->major_version = 0; 1109 mddev->minor_version = sb->minor_version; 1110 mddev->patch_version = sb->patch_version; 1111 mddev->external = 0; 1112 mddev->chunk_sectors = sb->chunk_size >> 9; 1113 mddev->ctime = sb->ctime; 1114 mddev->utime = sb->utime; 1115 mddev->level = sb->level; 1116 mddev->clevel[0] = 0; 1117 mddev->layout = sb->layout; 1118 mddev->raid_disks = sb->raid_disks; 1119 mddev->dev_sectors = sb->size * 2; 1120 mddev->events = ev1; 1121 mddev->bitmap_info.offset = 0; 1122 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1123 1124 if (mddev->minor_version >= 91) { 1125 mddev->reshape_position = sb->reshape_position; 1126 mddev->delta_disks = sb->delta_disks; 1127 mddev->new_level = sb->new_level; 1128 mddev->new_layout = sb->new_layout; 1129 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1130 } else { 1131 mddev->reshape_position = MaxSector; 1132 mddev->delta_disks = 0; 1133 mddev->new_level = mddev->level; 1134 mddev->new_layout = mddev->layout; 1135 mddev->new_chunk_sectors = mddev->chunk_sectors; 1136 } 1137 1138 if (sb->state & (1<<MD_SB_CLEAN)) 1139 mddev->recovery_cp = MaxSector; 1140 else { 1141 if (sb->events_hi == sb->cp_events_hi && 1142 sb->events_lo == sb->cp_events_lo) { 1143 mddev->recovery_cp = sb->recovery_cp; 1144 } else 1145 mddev->recovery_cp = 0; 1146 } 1147 1148 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1149 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1150 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1151 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1152 1153 mddev->max_disks = MD_SB_DISKS; 1154 1155 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1156 mddev->bitmap_info.file == NULL) 1157 mddev->bitmap_info.offset = 1158 mddev->bitmap_info.default_offset; 1159 1160 } else if (mddev->pers == NULL) { 1161 /* Insist on good event counter while assembling, except 1162 * for spares (which don't need an event count) */ 1163 ++ev1; 1164 if (sb->disks[rdev->desc_nr].state & ( 1165 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1166 if (ev1 < mddev->events) 1167 return -EINVAL; 1168 } else if (mddev->bitmap) { 1169 /* if adding to array with a bitmap, then we can accept an 1170 * older device ... but not too old. 1171 */ 1172 if (ev1 < mddev->bitmap->events_cleared) 1173 return 0; 1174 } else { 1175 if (ev1 < mddev->events) 1176 /* just a hot-add of a new device, leave raid_disk at -1 */ 1177 return 0; 1178 } 1179 1180 if (mddev->level != LEVEL_MULTIPATH) { 1181 desc = sb->disks + rdev->desc_nr; 1182 1183 if (desc->state & (1<<MD_DISK_FAULTY)) 1184 set_bit(Faulty, &rdev->flags); 1185 else if (desc->state & (1<<MD_DISK_SYNC) /* && 1186 desc->raid_disk < mddev->raid_disks */) { 1187 set_bit(In_sync, &rdev->flags); 1188 rdev->raid_disk = desc->raid_disk; 1189 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1190 /* active but not in sync implies recovery up to 1191 * reshape position. We don't know exactly where 1192 * that is, so set to zero for now */ 1193 if (mddev->minor_version >= 91) { 1194 rdev->recovery_offset = 0; 1195 rdev->raid_disk = desc->raid_disk; 1196 } 1197 } 1198 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1199 set_bit(WriteMostly, &rdev->flags); 1200 } else /* MULTIPATH are always insync */ 1201 set_bit(In_sync, &rdev->flags); 1202 return 0; 1203 } 1204 1205 /* 1206 * sync_super for 0.90.0 1207 */ 1208 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1209 { 1210 mdp_super_t *sb; 1211 mdk_rdev_t *rdev2; 1212 int next_spare = mddev->raid_disks; 1213 1214 1215 /* make rdev->sb match mddev data.. 1216 * 1217 * 1/ zero out disks 1218 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1219 * 3/ any empty disks < next_spare become removed 1220 * 1221 * disks[0] gets initialised to REMOVED because 1222 * we cannot be sure from other fields if it has 1223 * been initialised or not. 1224 */ 1225 int i; 1226 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1227 1228 rdev->sb_size = MD_SB_BYTES; 1229 1230 sb = (mdp_super_t*)page_address(rdev->sb_page); 1231 1232 memset(sb, 0, sizeof(*sb)); 1233 1234 sb->md_magic = MD_SB_MAGIC; 1235 sb->major_version = mddev->major_version; 1236 sb->patch_version = mddev->patch_version; 1237 sb->gvalid_words = 0; /* ignored */ 1238 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1239 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1240 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1241 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1242 1243 sb->ctime = mddev->ctime; 1244 sb->level = mddev->level; 1245 sb->size = mddev->dev_sectors / 2; 1246 sb->raid_disks = mddev->raid_disks; 1247 sb->md_minor = mddev->md_minor; 1248 sb->not_persistent = 0; 1249 sb->utime = mddev->utime; 1250 sb->state = 0; 1251 sb->events_hi = (mddev->events>>32); 1252 sb->events_lo = (u32)mddev->events; 1253 1254 if (mddev->reshape_position == MaxSector) 1255 sb->minor_version = 90; 1256 else { 1257 sb->minor_version = 91; 1258 sb->reshape_position = mddev->reshape_position; 1259 sb->new_level = mddev->new_level; 1260 sb->delta_disks = mddev->delta_disks; 1261 sb->new_layout = mddev->new_layout; 1262 sb->new_chunk = mddev->new_chunk_sectors << 9; 1263 } 1264 mddev->minor_version = sb->minor_version; 1265 if (mddev->in_sync) 1266 { 1267 sb->recovery_cp = mddev->recovery_cp; 1268 sb->cp_events_hi = (mddev->events>>32); 1269 sb->cp_events_lo = (u32)mddev->events; 1270 if (mddev->recovery_cp == MaxSector) 1271 sb->state = (1<< MD_SB_CLEAN); 1272 } else 1273 sb->recovery_cp = 0; 1274 1275 sb->layout = mddev->layout; 1276 sb->chunk_size = mddev->chunk_sectors << 9; 1277 1278 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1279 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1280 1281 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1282 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1283 mdp_disk_t *d; 1284 int desc_nr; 1285 int is_active = test_bit(In_sync, &rdev2->flags); 1286 1287 if (rdev2->raid_disk >= 0 && 1288 sb->minor_version >= 91) 1289 /* we have nowhere to store the recovery_offset, 1290 * but if it is not below the reshape_position, 1291 * we can piggy-back on that. 1292 */ 1293 is_active = 1; 1294 if (rdev2->raid_disk < 0 || 1295 test_bit(Faulty, &rdev2->flags)) 1296 is_active = 0; 1297 if (is_active) 1298 desc_nr = rdev2->raid_disk; 1299 else 1300 desc_nr = next_spare++; 1301 rdev2->desc_nr = desc_nr; 1302 d = &sb->disks[rdev2->desc_nr]; 1303 nr_disks++; 1304 d->number = rdev2->desc_nr; 1305 d->major = MAJOR(rdev2->bdev->bd_dev); 1306 d->minor = MINOR(rdev2->bdev->bd_dev); 1307 if (is_active) 1308 d->raid_disk = rdev2->raid_disk; 1309 else 1310 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1311 if (test_bit(Faulty, &rdev2->flags)) 1312 d->state = (1<<MD_DISK_FAULTY); 1313 else if (is_active) { 1314 d->state = (1<<MD_DISK_ACTIVE); 1315 if (test_bit(In_sync, &rdev2->flags)) 1316 d->state |= (1<<MD_DISK_SYNC); 1317 active++; 1318 working++; 1319 } else { 1320 d->state = 0; 1321 spare++; 1322 working++; 1323 } 1324 if (test_bit(WriteMostly, &rdev2->flags)) 1325 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1326 } 1327 /* now set the "removed" and "faulty" bits on any missing devices */ 1328 for (i=0 ; i < mddev->raid_disks ; i++) { 1329 mdp_disk_t *d = &sb->disks[i]; 1330 if (d->state == 0 && d->number == 0) { 1331 d->number = i; 1332 d->raid_disk = i; 1333 d->state = (1<<MD_DISK_REMOVED); 1334 d->state |= (1<<MD_DISK_FAULTY); 1335 failed++; 1336 } 1337 } 1338 sb->nr_disks = nr_disks; 1339 sb->active_disks = active; 1340 sb->working_disks = working; 1341 sb->failed_disks = failed; 1342 sb->spare_disks = spare; 1343 1344 sb->this_disk = sb->disks[rdev->desc_nr]; 1345 sb->sb_csum = calc_sb_csum(sb); 1346 } 1347 1348 /* 1349 * rdev_size_change for 0.90.0 1350 */ 1351 static unsigned long long 1352 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1353 { 1354 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1355 return 0; /* component must fit device */ 1356 if (rdev->mddev->bitmap_info.offset) 1357 return 0; /* can't move bitmap */ 1358 rdev->sb_start = calc_dev_sboffset(rdev); 1359 if (!num_sectors || num_sectors > rdev->sb_start) 1360 num_sectors = rdev->sb_start; 1361 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1362 rdev->sb_page); 1363 md_super_wait(rdev->mddev); 1364 return num_sectors; 1365 } 1366 1367 1368 /* 1369 * version 1 superblock 1370 */ 1371 1372 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb) 1373 { 1374 __le32 disk_csum; 1375 u32 csum; 1376 unsigned long long newcsum; 1377 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1378 __le32 *isuper = (__le32*)sb; 1379 int i; 1380 1381 disk_csum = sb->sb_csum; 1382 sb->sb_csum = 0; 1383 newcsum = 0; 1384 for (i=0; size>=4; size -= 4 ) 1385 newcsum += le32_to_cpu(*isuper++); 1386 1387 if (size == 2) 1388 newcsum += le16_to_cpu(*(__le16*) isuper); 1389 1390 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1391 sb->sb_csum = disk_csum; 1392 return cpu_to_le32(csum); 1393 } 1394 1395 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 1396 { 1397 struct mdp_superblock_1 *sb; 1398 int ret; 1399 sector_t sb_start; 1400 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1401 int bmask; 1402 1403 /* 1404 * Calculate the position of the superblock in 512byte sectors. 1405 * It is always aligned to a 4K boundary and 1406 * depeding on minor_version, it can be: 1407 * 0: At least 8K, but less than 12K, from end of device 1408 * 1: At start of device 1409 * 2: 4K from start of device. 1410 */ 1411 switch(minor_version) { 1412 case 0: 1413 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; 1414 sb_start -= 8*2; 1415 sb_start &= ~(sector_t)(4*2-1); 1416 break; 1417 case 1: 1418 sb_start = 0; 1419 break; 1420 case 2: 1421 sb_start = 8; 1422 break; 1423 default: 1424 return -EINVAL; 1425 } 1426 rdev->sb_start = sb_start; 1427 1428 /* superblock is rarely larger than 1K, but it can be larger, 1429 * and it is safe to read 4k, so we do that 1430 */ 1431 ret = read_disk_sb(rdev, 4096); 1432 if (ret) return ret; 1433 1434 1435 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1436 1437 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1438 sb->major_version != cpu_to_le32(1) || 1439 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1440 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1441 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1442 return -EINVAL; 1443 1444 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1445 printk("md: invalid superblock checksum on %s\n", 1446 bdevname(rdev->bdev,b)); 1447 return -EINVAL; 1448 } 1449 if (le64_to_cpu(sb->data_size) < 10) { 1450 printk("md: data_size too small on %s\n", 1451 bdevname(rdev->bdev,b)); 1452 return -EINVAL; 1453 } 1454 1455 rdev->preferred_minor = 0xffff; 1456 rdev->data_offset = le64_to_cpu(sb->data_offset); 1457 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1458 1459 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1460 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1461 if (rdev->sb_size & bmask) 1462 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1463 1464 if (minor_version 1465 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1466 return -EINVAL; 1467 1468 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1469 rdev->desc_nr = -1; 1470 else 1471 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1472 1473 if (!refdev) { 1474 ret = 1; 1475 } else { 1476 __u64 ev1, ev2; 1477 struct mdp_superblock_1 *refsb = 1478 (struct mdp_superblock_1*)page_address(refdev->sb_page); 1479 1480 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1481 sb->level != refsb->level || 1482 sb->layout != refsb->layout || 1483 sb->chunksize != refsb->chunksize) { 1484 printk(KERN_WARNING "md: %s has strangely different" 1485 " superblock to %s\n", 1486 bdevname(rdev->bdev,b), 1487 bdevname(refdev->bdev,b2)); 1488 return -EINVAL; 1489 } 1490 ev1 = le64_to_cpu(sb->events); 1491 ev2 = le64_to_cpu(refsb->events); 1492 1493 if (ev1 > ev2) 1494 ret = 1; 1495 else 1496 ret = 0; 1497 } 1498 if (minor_version) 1499 rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 1500 le64_to_cpu(sb->data_offset); 1501 else 1502 rdev->sectors = rdev->sb_start; 1503 if (rdev->sectors < le64_to_cpu(sb->data_size)) 1504 return -EINVAL; 1505 rdev->sectors = le64_to_cpu(sb->data_size); 1506 if (le64_to_cpu(sb->size) > rdev->sectors) 1507 return -EINVAL; 1508 return ret; 1509 } 1510 1511 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1512 { 1513 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1514 __u64 ev1 = le64_to_cpu(sb->events); 1515 1516 rdev->raid_disk = -1; 1517 clear_bit(Faulty, &rdev->flags); 1518 clear_bit(In_sync, &rdev->flags); 1519 clear_bit(WriteMostly, &rdev->flags); 1520 1521 if (mddev->raid_disks == 0) { 1522 mddev->major_version = 1; 1523 mddev->patch_version = 0; 1524 mddev->external = 0; 1525 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1526 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1527 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1528 mddev->level = le32_to_cpu(sb->level); 1529 mddev->clevel[0] = 0; 1530 mddev->layout = le32_to_cpu(sb->layout); 1531 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1532 mddev->dev_sectors = le64_to_cpu(sb->size); 1533 mddev->events = ev1; 1534 mddev->bitmap_info.offset = 0; 1535 mddev->bitmap_info.default_offset = 1024 >> 9; 1536 1537 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1538 memcpy(mddev->uuid, sb->set_uuid, 16); 1539 1540 mddev->max_disks = (4096-256)/2; 1541 1542 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1543 mddev->bitmap_info.file == NULL ) 1544 mddev->bitmap_info.offset = 1545 (__s32)le32_to_cpu(sb->bitmap_offset); 1546 1547 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1548 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1549 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1550 mddev->new_level = le32_to_cpu(sb->new_level); 1551 mddev->new_layout = le32_to_cpu(sb->new_layout); 1552 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1553 } else { 1554 mddev->reshape_position = MaxSector; 1555 mddev->delta_disks = 0; 1556 mddev->new_level = mddev->level; 1557 mddev->new_layout = mddev->layout; 1558 mddev->new_chunk_sectors = mddev->chunk_sectors; 1559 } 1560 1561 } else if (mddev->pers == NULL) { 1562 /* Insist of good event counter while assembling, except for 1563 * spares (which don't need an event count) */ 1564 ++ev1; 1565 if (rdev->desc_nr >= 0 && 1566 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1567 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe) 1568 if (ev1 < mddev->events) 1569 return -EINVAL; 1570 } else if (mddev->bitmap) { 1571 /* If adding to array with a bitmap, then we can accept an 1572 * older device, but not too old. 1573 */ 1574 if (ev1 < mddev->bitmap->events_cleared) 1575 return 0; 1576 } else { 1577 if (ev1 < mddev->events) 1578 /* just a hot-add of a new device, leave raid_disk at -1 */ 1579 return 0; 1580 } 1581 if (mddev->level != LEVEL_MULTIPATH) { 1582 int role; 1583 if (rdev->desc_nr < 0 || 1584 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1585 role = 0xffff; 1586 rdev->desc_nr = -1; 1587 } else 1588 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1589 switch(role) { 1590 case 0xffff: /* spare */ 1591 break; 1592 case 0xfffe: /* faulty */ 1593 set_bit(Faulty, &rdev->flags); 1594 break; 1595 default: 1596 if ((le32_to_cpu(sb->feature_map) & 1597 MD_FEATURE_RECOVERY_OFFSET)) 1598 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1599 else 1600 set_bit(In_sync, &rdev->flags); 1601 rdev->raid_disk = role; 1602 break; 1603 } 1604 if (sb->devflags & WriteMostly1) 1605 set_bit(WriteMostly, &rdev->flags); 1606 } else /* MULTIPATH are always insync */ 1607 set_bit(In_sync, &rdev->flags); 1608 1609 return 0; 1610 } 1611 1612 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1613 { 1614 struct mdp_superblock_1 *sb; 1615 mdk_rdev_t *rdev2; 1616 int max_dev, i; 1617 /* make rdev->sb match mddev and rdev data. */ 1618 1619 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1620 1621 sb->feature_map = 0; 1622 sb->pad0 = 0; 1623 sb->recovery_offset = cpu_to_le64(0); 1624 memset(sb->pad1, 0, sizeof(sb->pad1)); 1625 memset(sb->pad2, 0, sizeof(sb->pad2)); 1626 memset(sb->pad3, 0, sizeof(sb->pad3)); 1627 1628 sb->utime = cpu_to_le64((__u64)mddev->utime); 1629 sb->events = cpu_to_le64(mddev->events); 1630 if (mddev->in_sync) 1631 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1632 else 1633 sb->resync_offset = cpu_to_le64(0); 1634 1635 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1636 1637 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1638 sb->size = cpu_to_le64(mddev->dev_sectors); 1639 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 1640 sb->level = cpu_to_le32(mddev->level); 1641 sb->layout = cpu_to_le32(mddev->layout); 1642 1643 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1644 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 1645 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1646 } 1647 1648 if (rdev->raid_disk >= 0 && 1649 !test_bit(In_sync, &rdev->flags)) { 1650 sb->feature_map |= 1651 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1652 sb->recovery_offset = 1653 cpu_to_le64(rdev->recovery_offset); 1654 } 1655 1656 if (mddev->reshape_position != MaxSector) { 1657 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1658 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1659 sb->new_layout = cpu_to_le32(mddev->new_layout); 1660 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1661 sb->new_level = cpu_to_le32(mddev->new_level); 1662 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 1663 } 1664 1665 max_dev = 0; 1666 list_for_each_entry(rdev2, &mddev->disks, same_set) 1667 if (rdev2->desc_nr+1 > max_dev) 1668 max_dev = rdev2->desc_nr+1; 1669 1670 if (max_dev > le32_to_cpu(sb->max_dev)) { 1671 int bmask; 1672 sb->max_dev = cpu_to_le32(max_dev); 1673 rdev->sb_size = max_dev * 2 + 256; 1674 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1675 if (rdev->sb_size & bmask) 1676 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1677 } else 1678 max_dev = le32_to_cpu(sb->max_dev); 1679 1680 for (i=0; i<max_dev;i++) 1681 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1682 1683 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1684 i = rdev2->desc_nr; 1685 if (test_bit(Faulty, &rdev2->flags)) 1686 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1687 else if (test_bit(In_sync, &rdev2->flags)) 1688 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1689 else if (rdev2->raid_disk >= 0) 1690 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1691 else 1692 sb->dev_roles[i] = cpu_to_le16(0xffff); 1693 } 1694 1695 sb->sb_csum = calc_sb_1_csum(sb); 1696 } 1697 1698 static unsigned long long 1699 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1700 { 1701 struct mdp_superblock_1 *sb; 1702 sector_t max_sectors; 1703 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1704 return 0; /* component must fit device */ 1705 if (rdev->sb_start < rdev->data_offset) { 1706 /* minor versions 1 and 2; superblock before data */ 1707 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; 1708 max_sectors -= rdev->data_offset; 1709 if (!num_sectors || num_sectors > max_sectors) 1710 num_sectors = max_sectors; 1711 } else if (rdev->mddev->bitmap_info.offset) { 1712 /* minor version 0 with bitmap we can't move */ 1713 return 0; 1714 } else { 1715 /* minor version 0; superblock after data */ 1716 sector_t sb_start; 1717 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2; 1718 sb_start &= ~(sector_t)(4*2 - 1); 1719 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 1720 if (!num_sectors || num_sectors > max_sectors) 1721 num_sectors = max_sectors; 1722 rdev->sb_start = sb_start; 1723 } 1724 sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page); 1725 sb->data_size = cpu_to_le64(num_sectors); 1726 sb->super_offset = rdev->sb_start; 1727 sb->sb_csum = calc_sb_1_csum(sb); 1728 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1729 rdev->sb_page); 1730 md_super_wait(rdev->mddev); 1731 return num_sectors; 1732 } 1733 1734 static struct super_type super_types[] = { 1735 [0] = { 1736 .name = "0.90.0", 1737 .owner = THIS_MODULE, 1738 .load_super = super_90_load, 1739 .validate_super = super_90_validate, 1740 .sync_super = super_90_sync, 1741 .rdev_size_change = super_90_rdev_size_change, 1742 }, 1743 [1] = { 1744 .name = "md-1", 1745 .owner = THIS_MODULE, 1746 .load_super = super_1_load, 1747 .validate_super = super_1_validate, 1748 .sync_super = super_1_sync, 1749 .rdev_size_change = super_1_rdev_size_change, 1750 }, 1751 }; 1752 1753 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1754 { 1755 mdk_rdev_t *rdev, *rdev2; 1756 1757 rcu_read_lock(); 1758 rdev_for_each_rcu(rdev, mddev1) 1759 rdev_for_each_rcu(rdev2, mddev2) 1760 if (rdev->bdev->bd_contains == 1761 rdev2->bdev->bd_contains) { 1762 rcu_read_unlock(); 1763 return 1; 1764 } 1765 rcu_read_unlock(); 1766 return 0; 1767 } 1768 1769 static LIST_HEAD(pending_raid_disks); 1770 1771 /* 1772 * Try to register data integrity profile for an mddev 1773 * 1774 * This is called when an array is started and after a disk has been kicked 1775 * from the array. It only succeeds if all working and active component devices 1776 * are integrity capable with matching profiles. 1777 */ 1778 int md_integrity_register(mddev_t *mddev) 1779 { 1780 mdk_rdev_t *rdev, *reference = NULL; 1781 1782 if (list_empty(&mddev->disks)) 1783 return 0; /* nothing to do */ 1784 if (blk_get_integrity(mddev->gendisk)) 1785 return 0; /* already registered */ 1786 list_for_each_entry(rdev, &mddev->disks, same_set) { 1787 /* skip spares and non-functional disks */ 1788 if (test_bit(Faulty, &rdev->flags)) 1789 continue; 1790 if (rdev->raid_disk < 0) 1791 continue; 1792 if (!reference) { 1793 /* Use the first rdev as the reference */ 1794 reference = rdev; 1795 continue; 1796 } 1797 /* does this rdev's profile match the reference profile? */ 1798 if (blk_integrity_compare(reference->bdev->bd_disk, 1799 rdev->bdev->bd_disk) < 0) 1800 return -EINVAL; 1801 } 1802 if (!reference || !bdev_get_integrity(reference->bdev)) 1803 return 0; 1804 /* 1805 * All component devices are integrity capable and have matching 1806 * profiles, register the common profile for the md device. 1807 */ 1808 if (blk_integrity_register(mddev->gendisk, 1809 bdev_get_integrity(reference->bdev)) != 0) { 1810 printk(KERN_ERR "md: failed to register integrity for %s\n", 1811 mdname(mddev)); 1812 return -EINVAL; 1813 } 1814 printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev)); 1815 if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) { 1816 printk(KERN_ERR "md: failed to create integrity pool for %s\n", 1817 mdname(mddev)); 1818 return -EINVAL; 1819 } 1820 return 0; 1821 } 1822 EXPORT_SYMBOL(md_integrity_register); 1823 1824 /* Disable data integrity if non-capable/non-matching disk is being added */ 1825 void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev) 1826 { 1827 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); 1828 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk); 1829 1830 if (!bi_mddev) /* nothing to do */ 1831 return; 1832 if (rdev->raid_disk < 0) /* skip spares */ 1833 return; 1834 if (bi_rdev && blk_integrity_compare(mddev->gendisk, 1835 rdev->bdev->bd_disk) >= 0) 1836 return; 1837 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev)); 1838 blk_integrity_unregister(mddev->gendisk); 1839 } 1840 EXPORT_SYMBOL(md_integrity_add_rdev); 1841 1842 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) 1843 { 1844 char b[BDEVNAME_SIZE]; 1845 struct kobject *ko; 1846 char *s; 1847 int err; 1848 1849 if (rdev->mddev) { 1850 MD_BUG(); 1851 return -EINVAL; 1852 } 1853 1854 /* prevent duplicates */ 1855 if (find_rdev(mddev, rdev->bdev->bd_dev)) 1856 return -EEXIST; 1857 1858 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 1859 if (rdev->sectors && (mddev->dev_sectors == 0 || 1860 rdev->sectors < mddev->dev_sectors)) { 1861 if (mddev->pers) { 1862 /* Cannot change size, so fail 1863 * If mddev->level <= 0, then we don't care 1864 * about aligning sizes (e.g. linear) 1865 */ 1866 if (mddev->level > 0) 1867 return -ENOSPC; 1868 } else 1869 mddev->dev_sectors = rdev->sectors; 1870 } 1871 1872 /* Verify rdev->desc_nr is unique. 1873 * If it is -1, assign a free number, else 1874 * check number is not in use 1875 */ 1876 if (rdev->desc_nr < 0) { 1877 int choice = 0; 1878 if (mddev->pers) choice = mddev->raid_disks; 1879 while (find_rdev_nr(mddev, choice)) 1880 choice++; 1881 rdev->desc_nr = choice; 1882 } else { 1883 if (find_rdev_nr(mddev, rdev->desc_nr)) 1884 return -EBUSY; 1885 } 1886 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 1887 printk(KERN_WARNING "md: %s: array is limited to %d devices\n", 1888 mdname(mddev), mddev->max_disks); 1889 return -EBUSY; 1890 } 1891 bdevname(rdev->bdev,b); 1892 while ( (s=strchr(b, '/')) != NULL) 1893 *s = '!'; 1894 1895 rdev->mddev = mddev; 1896 printk(KERN_INFO "md: bind<%s>\n", b); 1897 1898 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 1899 goto fail; 1900 1901 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 1902 if (sysfs_create_link(&rdev->kobj, ko, "block")) 1903 /* failure here is OK */; 1904 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 1905 1906 list_add_rcu(&rdev->same_set, &mddev->disks); 1907 bd_link_disk_holder(rdev->bdev, mddev->gendisk); 1908 1909 /* May as well allow recovery to be retried once */ 1910 mddev->recovery_disabled = 0; 1911 1912 return 0; 1913 1914 fail: 1915 printk(KERN_WARNING "md: failed to register dev-%s for %s\n", 1916 b, mdname(mddev)); 1917 return err; 1918 } 1919 1920 static void md_delayed_delete(struct work_struct *ws) 1921 { 1922 mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work); 1923 kobject_del(&rdev->kobj); 1924 kobject_put(&rdev->kobj); 1925 } 1926 1927 static void unbind_rdev_from_array(mdk_rdev_t * rdev) 1928 { 1929 char b[BDEVNAME_SIZE]; 1930 if (!rdev->mddev) { 1931 MD_BUG(); 1932 return; 1933 } 1934 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); 1935 list_del_rcu(&rdev->same_set); 1936 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 1937 rdev->mddev = NULL; 1938 sysfs_remove_link(&rdev->kobj, "block"); 1939 sysfs_put(rdev->sysfs_state); 1940 rdev->sysfs_state = NULL; 1941 /* We need to delay this, otherwise we can deadlock when 1942 * writing to 'remove' to "dev/state". We also need 1943 * to delay it due to rcu usage. 1944 */ 1945 synchronize_rcu(); 1946 INIT_WORK(&rdev->del_work, md_delayed_delete); 1947 kobject_get(&rdev->kobj); 1948 queue_work(md_misc_wq, &rdev->del_work); 1949 } 1950 1951 /* 1952 * prevent the device from being mounted, repartitioned or 1953 * otherwise reused by a RAID array (or any other kernel 1954 * subsystem), by bd_claiming the device. 1955 */ 1956 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared) 1957 { 1958 int err = 0; 1959 struct block_device *bdev; 1960 char b[BDEVNAME_SIZE]; 1961 1962 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1963 shared ? (mdk_rdev_t *)lock_rdev : rdev); 1964 if (IS_ERR(bdev)) { 1965 printk(KERN_ERR "md: could not open %s.\n", 1966 __bdevname(dev, b)); 1967 return PTR_ERR(bdev); 1968 } 1969 rdev->bdev = bdev; 1970 return err; 1971 } 1972 1973 static void unlock_rdev(mdk_rdev_t *rdev) 1974 { 1975 struct block_device *bdev = rdev->bdev; 1976 rdev->bdev = NULL; 1977 if (!bdev) 1978 MD_BUG(); 1979 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1980 } 1981 1982 void md_autodetect_dev(dev_t dev); 1983 1984 static void export_rdev(mdk_rdev_t * rdev) 1985 { 1986 char b[BDEVNAME_SIZE]; 1987 printk(KERN_INFO "md: export_rdev(%s)\n", 1988 bdevname(rdev->bdev,b)); 1989 if (rdev->mddev) 1990 MD_BUG(); 1991 free_disk_sb(rdev); 1992 #ifndef MODULE 1993 if (test_bit(AutoDetected, &rdev->flags)) 1994 md_autodetect_dev(rdev->bdev->bd_dev); 1995 #endif 1996 unlock_rdev(rdev); 1997 kobject_put(&rdev->kobj); 1998 } 1999 2000 static void kick_rdev_from_array(mdk_rdev_t * rdev) 2001 { 2002 unbind_rdev_from_array(rdev); 2003 export_rdev(rdev); 2004 } 2005 2006 static void export_array(mddev_t *mddev) 2007 { 2008 mdk_rdev_t *rdev, *tmp; 2009 2010 rdev_for_each(rdev, tmp, mddev) { 2011 if (!rdev->mddev) { 2012 MD_BUG(); 2013 continue; 2014 } 2015 kick_rdev_from_array(rdev); 2016 } 2017 if (!list_empty(&mddev->disks)) 2018 MD_BUG(); 2019 mddev->raid_disks = 0; 2020 mddev->major_version = 0; 2021 } 2022 2023 static void print_desc(mdp_disk_t *desc) 2024 { 2025 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number, 2026 desc->major,desc->minor,desc->raid_disk,desc->state); 2027 } 2028 2029 static void print_sb_90(mdp_super_t *sb) 2030 { 2031 int i; 2032 2033 printk(KERN_INFO 2034 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", 2035 sb->major_version, sb->minor_version, sb->patch_version, 2036 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, 2037 sb->ctime); 2038 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", 2039 sb->level, sb->size, sb->nr_disks, sb->raid_disks, 2040 sb->md_minor, sb->layout, sb->chunk_size); 2041 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d" 2042 " FD:%d SD:%d CSUM:%08x E:%08lx\n", 2043 sb->utime, sb->state, sb->active_disks, sb->working_disks, 2044 sb->failed_disks, sb->spare_disks, 2045 sb->sb_csum, (unsigned long)sb->events_lo); 2046 2047 printk(KERN_INFO); 2048 for (i = 0; i < MD_SB_DISKS; i++) { 2049 mdp_disk_t *desc; 2050 2051 desc = sb->disks + i; 2052 if (desc->number || desc->major || desc->minor || 2053 desc->raid_disk || (desc->state && (desc->state != 4))) { 2054 printk(" D %2d: ", i); 2055 print_desc(desc); 2056 } 2057 } 2058 printk(KERN_INFO "md: THIS: "); 2059 print_desc(&sb->this_disk); 2060 } 2061 2062 static void print_sb_1(struct mdp_superblock_1 *sb) 2063 { 2064 __u8 *uuid; 2065 2066 uuid = sb->set_uuid; 2067 printk(KERN_INFO 2068 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n" 2069 "md: Name: \"%s\" CT:%llu\n", 2070 le32_to_cpu(sb->major_version), 2071 le32_to_cpu(sb->feature_map), 2072 uuid, 2073 sb->set_name, 2074 (unsigned long long)le64_to_cpu(sb->ctime) 2075 & MD_SUPERBLOCK_1_TIME_SEC_MASK); 2076 2077 uuid = sb->device_uuid; 2078 printk(KERN_INFO 2079 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" 2080 " RO:%llu\n" 2081 "md: Dev:%08x UUID: %pU\n" 2082 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" 2083 "md: (MaxDev:%u) \n", 2084 le32_to_cpu(sb->level), 2085 (unsigned long long)le64_to_cpu(sb->size), 2086 le32_to_cpu(sb->raid_disks), 2087 le32_to_cpu(sb->layout), 2088 le32_to_cpu(sb->chunksize), 2089 (unsigned long long)le64_to_cpu(sb->data_offset), 2090 (unsigned long long)le64_to_cpu(sb->data_size), 2091 (unsigned long long)le64_to_cpu(sb->super_offset), 2092 (unsigned long long)le64_to_cpu(sb->recovery_offset), 2093 le32_to_cpu(sb->dev_number), 2094 uuid, 2095 sb->devflags, 2096 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK, 2097 (unsigned long long)le64_to_cpu(sb->events), 2098 (unsigned long long)le64_to_cpu(sb->resync_offset), 2099 le32_to_cpu(sb->sb_csum), 2100 le32_to_cpu(sb->max_dev) 2101 ); 2102 } 2103 2104 static void print_rdev(mdk_rdev_t *rdev, int major_version) 2105 { 2106 char b[BDEVNAME_SIZE]; 2107 printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n", 2108 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors, 2109 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), 2110 rdev->desc_nr); 2111 if (rdev->sb_loaded) { 2112 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version); 2113 switch (major_version) { 2114 case 0: 2115 print_sb_90((mdp_super_t*)page_address(rdev->sb_page)); 2116 break; 2117 case 1: 2118 print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page)); 2119 break; 2120 } 2121 } else 2122 printk(KERN_INFO "md: no rdev superblock!\n"); 2123 } 2124 2125 static void md_print_devices(void) 2126 { 2127 struct list_head *tmp; 2128 mdk_rdev_t *rdev; 2129 mddev_t *mddev; 2130 char b[BDEVNAME_SIZE]; 2131 2132 printk("\n"); 2133 printk("md: **********************************\n"); 2134 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n"); 2135 printk("md: **********************************\n"); 2136 for_each_mddev(mddev, tmp) { 2137 2138 if (mddev->bitmap) 2139 bitmap_print_sb(mddev->bitmap); 2140 else 2141 printk("%s: ", mdname(mddev)); 2142 list_for_each_entry(rdev, &mddev->disks, same_set) 2143 printk("<%s>", bdevname(rdev->bdev,b)); 2144 printk("\n"); 2145 2146 list_for_each_entry(rdev, &mddev->disks, same_set) 2147 print_rdev(rdev, mddev->major_version); 2148 } 2149 printk("md: **********************************\n"); 2150 printk("\n"); 2151 } 2152 2153 2154 static void sync_sbs(mddev_t * mddev, int nospares) 2155 { 2156 /* Update each superblock (in-memory image), but 2157 * if we are allowed to, skip spares which already 2158 * have the right event counter, or have one earlier 2159 * (which would mean they aren't being marked as dirty 2160 * with the rest of the array) 2161 */ 2162 mdk_rdev_t *rdev; 2163 list_for_each_entry(rdev, &mddev->disks, same_set) { 2164 if (rdev->sb_events == mddev->events || 2165 (nospares && 2166 rdev->raid_disk < 0 && 2167 rdev->sb_events+1 == mddev->events)) { 2168 /* Don't update this superblock */ 2169 rdev->sb_loaded = 2; 2170 } else { 2171 super_types[mddev->major_version]. 2172 sync_super(mddev, rdev); 2173 rdev->sb_loaded = 1; 2174 } 2175 } 2176 } 2177 2178 static void md_update_sb(mddev_t * mddev, int force_change) 2179 { 2180 mdk_rdev_t *rdev; 2181 int sync_req; 2182 int nospares = 0; 2183 2184 repeat: 2185 /* First make sure individual recovery_offsets are correct */ 2186 list_for_each_entry(rdev, &mddev->disks, same_set) { 2187 if (rdev->raid_disk >= 0 && 2188 mddev->delta_disks >= 0 && 2189 !test_bit(In_sync, &rdev->flags) && 2190 mddev->curr_resync_completed > rdev->recovery_offset) 2191 rdev->recovery_offset = mddev->curr_resync_completed; 2192 2193 } 2194 if (!mddev->persistent) { 2195 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); 2196 clear_bit(MD_CHANGE_DEVS, &mddev->flags); 2197 if (!mddev->external) 2198 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2199 wake_up(&mddev->sb_wait); 2200 return; 2201 } 2202 2203 spin_lock_irq(&mddev->write_lock); 2204 2205 mddev->utime = get_seconds(); 2206 2207 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 2208 force_change = 1; 2209 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 2210 /* just a clean<-> dirty transition, possibly leave spares alone, 2211 * though if events isn't the right even/odd, we will have to do 2212 * spares after all 2213 */ 2214 nospares = 1; 2215 if (force_change) 2216 nospares = 0; 2217 if (mddev->degraded) 2218 /* If the array is degraded, then skipping spares is both 2219 * dangerous and fairly pointless. 2220 * Dangerous because a device that was removed from the array 2221 * might have a event_count that still looks up-to-date, 2222 * so it can be re-added without a resync. 2223 * Pointless because if there are any spares to skip, 2224 * then a recovery will happen and soon that array won't 2225 * be degraded any more and the spare can go back to sleep then. 2226 */ 2227 nospares = 0; 2228 2229 sync_req = mddev->in_sync; 2230 2231 /* If this is just a dirty<->clean transition, and the array is clean 2232 * and 'events' is odd, we can roll back to the previous clean state */ 2233 if (nospares 2234 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2235 && mddev->can_decrease_events 2236 && mddev->events != 1) { 2237 mddev->events--; 2238 mddev->can_decrease_events = 0; 2239 } else { 2240 /* otherwise we have to go forward and ... */ 2241 mddev->events ++; 2242 mddev->can_decrease_events = nospares; 2243 } 2244 2245 if (!mddev->events) { 2246 /* 2247 * oops, this 64-bit counter should never wrap. 2248 * Either we are in around ~1 trillion A.C., assuming 2249 * 1 reboot per second, or we have a bug: 2250 */ 2251 MD_BUG(); 2252 mddev->events --; 2253 } 2254 sync_sbs(mddev, nospares); 2255 spin_unlock_irq(&mddev->write_lock); 2256 2257 dprintk(KERN_INFO 2258 "md: updating %s RAID superblock on device (in sync %d)\n", 2259 mdname(mddev),mddev->in_sync); 2260 2261 bitmap_update_sb(mddev->bitmap); 2262 list_for_each_entry(rdev, &mddev->disks, same_set) { 2263 char b[BDEVNAME_SIZE]; 2264 dprintk(KERN_INFO "md: "); 2265 if (rdev->sb_loaded != 1) 2266 continue; /* no noise on spare devices */ 2267 if (test_bit(Faulty, &rdev->flags)) 2268 dprintk("(skipping faulty "); 2269 2270 dprintk("%s ", bdevname(rdev->bdev,b)); 2271 if (!test_bit(Faulty, &rdev->flags)) { 2272 md_super_write(mddev,rdev, 2273 rdev->sb_start, rdev->sb_size, 2274 rdev->sb_page); 2275 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", 2276 bdevname(rdev->bdev,b), 2277 (unsigned long long)rdev->sb_start); 2278 rdev->sb_events = mddev->events; 2279 2280 } else 2281 dprintk(")\n"); 2282 if (mddev->level == LEVEL_MULTIPATH) 2283 /* only need to write one superblock... */ 2284 break; 2285 } 2286 md_super_wait(mddev); 2287 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ 2288 2289 spin_lock_irq(&mddev->write_lock); 2290 if (mddev->in_sync != sync_req || 2291 test_bit(MD_CHANGE_DEVS, &mddev->flags)) { 2292 /* have to write it out again */ 2293 spin_unlock_irq(&mddev->write_lock); 2294 goto repeat; 2295 } 2296 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2297 spin_unlock_irq(&mddev->write_lock); 2298 wake_up(&mddev->sb_wait); 2299 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2300 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2301 2302 } 2303 2304 /* words written to sysfs files may, or may not, be \n terminated. 2305 * We want to accept with case. For this we use cmd_match. 2306 */ 2307 static int cmd_match(const char *cmd, const char *str) 2308 { 2309 /* See if cmd, written into a sysfs file, matches 2310 * str. They must either be the same, or cmd can 2311 * have a trailing newline 2312 */ 2313 while (*cmd && *str && *cmd == *str) { 2314 cmd++; 2315 str++; 2316 } 2317 if (*cmd == '\n') 2318 cmd++; 2319 if (*str || *cmd) 2320 return 0; 2321 return 1; 2322 } 2323 2324 struct rdev_sysfs_entry { 2325 struct attribute attr; 2326 ssize_t (*show)(mdk_rdev_t *, char *); 2327 ssize_t (*store)(mdk_rdev_t *, const char *, size_t); 2328 }; 2329 2330 static ssize_t 2331 state_show(mdk_rdev_t *rdev, char *page) 2332 { 2333 char *sep = ""; 2334 size_t len = 0; 2335 2336 if (test_bit(Faulty, &rdev->flags)) { 2337 len+= sprintf(page+len, "%sfaulty",sep); 2338 sep = ","; 2339 } 2340 if (test_bit(In_sync, &rdev->flags)) { 2341 len += sprintf(page+len, "%sin_sync",sep); 2342 sep = ","; 2343 } 2344 if (test_bit(WriteMostly, &rdev->flags)) { 2345 len += sprintf(page+len, "%swrite_mostly",sep); 2346 sep = ","; 2347 } 2348 if (test_bit(Blocked, &rdev->flags)) { 2349 len += sprintf(page+len, "%sblocked", sep); 2350 sep = ","; 2351 } 2352 if (!test_bit(Faulty, &rdev->flags) && 2353 !test_bit(In_sync, &rdev->flags)) { 2354 len += sprintf(page+len, "%sspare", sep); 2355 sep = ","; 2356 } 2357 return len+sprintf(page+len, "\n"); 2358 } 2359 2360 static ssize_t 2361 state_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2362 { 2363 /* can write 2364 * faulty - simulates and error 2365 * remove - disconnects the device 2366 * writemostly - sets write_mostly 2367 * -writemostly - clears write_mostly 2368 * blocked - sets the Blocked flag 2369 * -blocked - clears the Blocked flag 2370 * insync - sets Insync providing device isn't active 2371 */ 2372 int err = -EINVAL; 2373 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2374 md_error(rdev->mddev, rdev); 2375 err = 0; 2376 } else if (cmd_match(buf, "remove")) { 2377 if (rdev->raid_disk >= 0) 2378 err = -EBUSY; 2379 else { 2380 mddev_t *mddev = rdev->mddev; 2381 kick_rdev_from_array(rdev); 2382 if (mddev->pers) 2383 md_update_sb(mddev, 1); 2384 md_new_event(mddev); 2385 err = 0; 2386 } 2387 } else if (cmd_match(buf, "writemostly")) { 2388 set_bit(WriteMostly, &rdev->flags); 2389 err = 0; 2390 } else if (cmd_match(buf, "-writemostly")) { 2391 clear_bit(WriteMostly, &rdev->flags); 2392 err = 0; 2393 } else if (cmd_match(buf, "blocked")) { 2394 set_bit(Blocked, &rdev->flags); 2395 err = 0; 2396 } else if (cmd_match(buf, "-blocked")) { 2397 clear_bit(Blocked, &rdev->flags); 2398 wake_up(&rdev->blocked_wait); 2399 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2400 md_wakeup_thread(rdev->mddev->thread); 2401 2402 err = 0; 2403 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2404 set_bit(In_sync, &rdev->flags); 2405 err = 0; 2406 } 2407 if (!err) 2408 sysfs_notify_dirent_safe(rdev->sysfs_state); 2409 return err ? err : len; 2410 } 2411 static struct rdev_sysfs_entry rdev_state = 2412 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); 2413 2414 static ssize_t 2415 errors_show(mdk_rdev_t *rdev, char *page) 2416 { 2417 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 2418 } 2419 2420 static ssize_t 2421 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2422 { 2423 char *e; 2424 unsigned long n = simple_strtoul(buf, &e, 10); 2425 if (*buf && (*e == 0 || *e == '\n')) { 2426 atomic_set(&rdev->corrected_errors, n); 2427 return len; 2428 } 2429 return -EINVAL; 2430 } 2431 static struct rdev_sysfs_entry rdev_errors = 2432 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 2433 2434 static ssize_t 2435 slot_show(mdk_rdev_t *rdev, char *page) 2436 { 2437 if (rdev->raid_disk < 0) 2438 return sprintf(page, "none\n"); 2439 else 2440 return sprintf(page, "%d\n", rdev->raid_disk); 2441 } 2442 2443 static ssize_t 2444 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2445 { 2446 char *e; 2447 int err; 2448 char nm[20]; 2449 int slot = simple_strtoul(buf, &e, 10); 2450 if (strncmp(buf, "none", 4)==0) 2451 slot = -1; 2452 else if (e==buf || (*e && *e!= '\n')) 2453 return -EINVAL; 2454 if (rdev->mddev->pers && slot == -1) { 2455 /* Setting 'slot' on an active array requires also 2456 * updating the 'rd%d' link, and communicating 2457 * with the personality with ->hot_*_disk. 2458 * For now we only support removing 2459 * failed/spare devices. This normally happens automatically, 2460 * but not when the metadata is externally managed. 2461 */ 2462 if (rdev->raid_disk == -1) 2463 return -EEXIST; 2464 /* personality does all needed checks */ 2465 if (rdev->mddev->pers->hot_add_disk == NULL) 2466 return -EINVAL; 2467 err = rdev->mddev->pers-> 2468 hot_remove_disk(rdev->mddev, rdev->raid_disk); 2469 if (err) 2470 return err; 2471 sprintf(nm, "rd%d", rdev->raid_disk); 2472 sysfs_remove_link(&rdev->mddev->kobj, nm); 2473 rdev->raid_disk = -1; 2474 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2475 md_wakeup_thread(rdev->mddev->thread); 2476 } else if (rdev->mddev->pers) { 2477 mdk_rdev_t *rdev2; 2478 /* Activating a spare .. or possibly reactivating 2479 * if we ever get bitmaps working here. 2480 */ 2481 2482 if (rdev->raid_disk != -1) 2483 return -EBUSY; 2484 2485 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) 2486 return -EBUSY; 2487 2488 if (rdev->mddev->pers->hot_add_disk == NULL) 2489 return -EINVAL; 2490 2491 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set) 2492 if (rdev2->raid_disk == slot) 2493 return -EEXIST; 2494 2495 if (slot >= rdev->mddev->raid_disks && 2496 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 2497 return -ENOSPC; 2498 2499 rdev->raid_disk = slot; 2500 if (test_bit(In_sync, &rdev->flags)) 2501 rdev->saved_raid_disk = slot; 2502 else 2503 rdev->saved_raid_disk = -1; 2504 err = rdev->mddev->pers-> 2505 hot_add_disk(rdev->mddev, rdev); 2506 if (err) { 2507 rdev->raid_disk = -1; 2508 return err; 2509 } else 2510 sysfs_notify_dirent_safe(rdev->sysfs_state); 2511 sprintf(nm, "rd%d", rdev->raid_disk); 2512 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm)) 2513 /* failure here is OK */; 2514 /* don't wakeup anyone, leave that to userspace. */ 2515 } else { 2516 if (slot >= rdev->mddev->raid_disks && 2517 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 2518 return -ENOSPC; 2519 rdev->raid_disk = slot; 2520 /* assume it is working */ 2521 clear_bit(Faulty, &rdev->flags); 2522 clear_bit(WriteMostly, &rdev->flags); 2523 set_bit(In_sync, &rdev->flags); 2524 sysfs_notify_dirent_safe(rdev->sysfs_state); 2525 } 2526 return len; 2527 } 2528 2529 2530 static struct rdev_sysfs_entry rdev_slot = 2531 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 2532 2533 static ssize_t 2534 offset_show(mdk_rdev_t *rdev, char *page) 2535 { 2536 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 2537 } 2538 2539 static ssize_t 2540 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2541 { 2542 char *e; 2543 unsigned long long offset = simple_strtoull(buf, &e, 10); 2544 if (e==buf || (*e && *e != '\n')) 2545 return -EINVAL; 2546 if (rdev->mddev->pers && rdev->raid_disk >= 0) 2547 return -EBUSY; 2548 if (rdev->sectors && rdev->mddev->external) 2549 /* Must set offset before size, so overlap checks 2550 * can be sane */ 2551 return -EBUSY; 2552 rdev->data_offset = offset; 2553 return len; 2554 } 2555 2556 static struct rdev_sysfs_entry rdev_offset = 2557 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 2558 2559 static ssize_t 2560 rdev_size_show(mdk_rdev_t *rdev, char *page) 2561 { 2562 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 2563 } 2564 2565 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 2566 { 2567 /* check if two start/length pairs overlap */ 2568 if (s1+l1 <= s2) 2569 return 0; 2570 if (s2+l2 <= s1) 2571 return 0; 2572 return 1; 2573 } 2574 2575 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 2576 { 2577 unsigned long long blocks; 2578 sector_t new; 2579 2580 if (strict_strtoull(buf, 10, &blocks) < 0) 2581 return -EINVAL; 2582 2583 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 2584 return -EINVAL; /* sector conversion overflow */ 2585 2586 new = blocks * 2; 2587 if (new != blocks * 2) 2588 return -EINVAL; /* unsigned long long to sector_t overflow */ 2589 2590 *sectors = new; 2591 return 0; 2592 } 2593 2594 static ssize_t 2595 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2596 { 2597 mddev_t *my_mddev = rdev->mddev; 2598 sector_t oldsectors = rdev->sectors; 2599 sector_t sectors; 2600 2601 if (strict_blocks_to_sectors(buf, §ors) < 0) 2602 return -EINVAL; 2603 if (my_mddev->pers && rdev->raid_disk >= 0) { 2604 if (my_mddev->persistent) { 2605 sectors = super_types[my_mddev->major_version]. 2606 rdev_size_change(rdev, sectors); 2607 if (!sectors) 2608 return -EBUSY; 2609 } else if (!sectors) 2610 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 2611 rdev->data_offset; 2612 } 2613 if (sectors < my_mddev->dev_sectors) 2614 return -EINVAL; /* component must fit device */ 2615 2616 rdev->sectors = sectors; 2617 if (sectors > oldsectors && my_mddev->external) { 2618 /* need to check that all other rdevs with the same ->bdev 2619 * do not overlap. We need to unlock the mddev to avoid 2620 * a deadlock. We have already changed rdev->sectors, and if 2621 * we have to change it back, we will have the lock again. 2622 */ 2623 mddev_t *mddev; 2624 int overlap = 0; 2625 struct list_head *tmp; 2626 2627 mddev_unlock(my_mddev); 2628 for_each_mddev(mddev, tmp) { 2629 mdk_rdev_t *rdev2; 2630 2631 mddev_lock(mddev); 2632 list_for_each_entry(rdev2, &mddev->disks, same_set) 2633 if (rdev->bdev == rdev2->bdev && 2634 rdev != rdev2 && 2635 overlaps(rdev->data_offset, rdev->sectors, 2636 rdev2->data_offset, 2637 rdev2->sectors)) { 2638 overlap = 1; 2639 break; 2640 } 2641 mddev_unlock(mddev); 2642 if (overlap) { 2643 mddev_put(mddev); 2644 break; 2645 } 2646 } 2647 mddev_lock(my_mddev); 2648 if (overlap) { 2649 /* Someone else could have slipped in a size 2650 * change here, but doing so is just silly. 2651 * We put oldsectors back because we *know* it is 2652 * safe, and trust userspace not to race with 2653 * itself 2654 */ 2655 rdev->sectors = oldsectors; 2656 return -EBUSY; 2657 } 2658 } 2659 return len; 2660 } 2661 2662 static struct rdev_sysfs_entry rdev_size = 2663 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 2664 2665 2666 static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page) 2667 { 2668 unsigned long long recovery_start = rdev->recovery_offset; 2669 2670 if (test_bit(In_sync, &rdev->flags) || 2671 recovery_start == MaxSector) 2672 return sprintf(page, "none\n"); 2673 2674 return sprintf(page, "%llu\n", recovery_start); 2675 } 2676 2677 static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2678 { 2679 unsigned long long recovery_start; 2680 2681 if (cmd_match(buf, "none")) 2682 recovery_start = MaxSector; 2683 else if (strict_strtoull(buf, 10, &recovery_start)) 2684 return -EINVAL; 2685 2686 if (rdev->mddev->pers && 2687 rdev->raid_disk >= 0) 2688 return -EBUSY; 2689 2690 rdev->recovery_offset = recovery_start; 2691 if (recovery_start == MaxSector) 2692 set_bit(In_sync, &rdev->flags); 2693 else 2694 clear_bit(In_sync, &rdev->flags); 2695 return len; 2696 } 2697 2698 static struct rdev_sysfs_entry rdev_recovery_start = 2699 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 2700 2701 static struct attribute *rdev_default_attrs[] = { 2702 &rdev_state.attr, 2703 &rdev_errors.attr, 2704 &rdev_slot.attr, 2705 &rdev_offset.attr, 2706 &rdev_size.attr, 2707 &rdev_recovery_start.attr, 2708 NULL, 2709 }; 2710 static ssize_t 2711 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 2712 { 2713 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2714 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 2715 mddev_t *mddev = rdev->mddev; 2716 ssize_t rv; 2717 2718 if (!entry->show) 2719 return -EIO; 2720 2721 rv = mddev ? mddev_lock(mddev) : -EBUSY; 2722 if (!rv) { 2723 if (rdev->mddev == NULL) 2724 rv = -EBUSY; 2725 else 2726 rv = entry->show(rdev, page); 2727 mddev_unlock(mddev); 2728 } 2729 return rv; 2730 } 2731 2732 static ssize_t 2733 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 2734 const char *page, size_t length) 2735 { 2736 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2737 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 2738 ssize_t rv; 2739 mddev_t *mddev = rdev->mddev; 2740 2741 if (!entry->store) 2742 return -EIO; 2743 if (!capable(CAP_SYS_ADMIN)) 2744 return -EACCES; 2745 rv = mddev ? mddev_lock(mddev): -EBUSY; 2746 if (!rv) { 2747 if (rdev->mddev == NULL) 2748 rv = -EBUSY; 2749 else 2750 rv = entry->store(rdev, page, length); 2751 mddev_unlock(mddev); 2752 } 2753 return rv; 2754 } 2755 2756 static void rdev_free(struct kobject *ko) 2757 { 2758 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); 2759 kfree(rdev); 2760 } 2761 static const struct sysfs_ops rdev_sysfs_ops = { 2762 .show = rdev_attr_show, 2763 .store = rdev_attr_store, 2764 }; 2765 static struct kobj_type rdev_ktype = { 2766 .release = rdev_free, 2767 .sysfs_ops = &rdev_sysfs_ops, 2768 .default_attrs = rdev_default_attrs, 2769 }; 2770 2771 void md_rdev_init(mdk_rdev_t *rdev) 2772 { 2773 rdev->desc_nr = -1; 2774 rdev->saved_raid_disk = -1; 2775 rdev->raid_disk = -1; 2776 rdev->flags = 0; 2777 rdev->data_offset = 0; 2778 rdev->sb_events = 0; 2779 rdev->last_read_error.tv_sec = 0; 2780 rdev->last_read_error.tv_nsec = 0; 2781 atomic_set(&rdev->nr_pending, 0); 2782 atomic_set(&rdev->read_errors, 0); 2783 atomic_set(&rdev->corrected_errors, 0); 2784 2785 INIT_LIST_HEAD(&rdev->same_set); 2786 init_waitqueue_head(&rdev->blocked_wait); 2787 } 2788 EXPORT_SYMBOL_GPL(md_rdev_init); 2789 /* 2790 * Import a device. If 'super_format' >= 0, then sanity check the superblock 2791 * 2792 * mark the device faulty if: 2793 * 2794 * - the device is nonexistent (zero size) 2795 * - the device has no valid superblock 2796 * 2797 * a faulty rdev _never_ has rdev->sb set. 2798 */ 2799 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor) 2800 { 2801 char b[BDEVNAME_SIZE]; 2802 int err; 2803 mdk_rdev_t *rdev; 2804 sector_t size; 2805 2806 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 2807 if (!rdev) { 2808 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 2809 return ERR_PTR(-ENOMEM); 2810 } 2811 2812 md_rdev_init(rdev); 2813 if ((err = alloc_disk_sb(rdev))) 2814 goto abort_free; 2815 2816 err = lock_rdev(rdev, newdev, super_format == -2); 2817 if (err) 2818 goto abort_free; 2819 2820 kobject_init(&rdev->kobj, &rdev_ktype); 2821 2822 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; 2823 if (!size) { 2824 printk(KERN_WARNING 2825 "md: %s has zero or unknown size, marking faulty!\n", 2826 bdevname(rdev->bdev,b)); 2827 err = -EINVAL; 2828 goto abort_free; 2829 } 2830 2831 if (super_format >= 0) { 2832 err = super_types[super_format]. 2833 load_super(rdev, NULL, super_minor); 2834 if (err == -EINVAL) { 2835 printk(KERN_WARNING 2836 "md: %s does not have a valid v%d.%d " 2837 "superblock, not importing!\n", 2838 bdevname(rdev->bdev,b), 2839 super_format, super_minor); 2840 goto abort_free; 2841 } 2842 if (err < 0) { 2843 printk(KERN_WARNING 2844 "md: could not read %s's sb, not importing!\n", 2845 bdevname(rdev->bdev,b)); 2846 goto abort_free; 2847 } 2848 } 2849 2850 return rdev; 2851 2852 abort_free: 2853 if (rdev->sb_page) { 2854 if (rdev->bdev) 2855 unlock_rdev(rdev); 2856 free_disk_sb(rdev); 2857 } 2858 kfree(rdev); 2859 return ERR_PTR(err); 2860 } 2861 2862 /* 2863 * Check a full RAID array for plausibility 2864 */ 2865 2866 2867 static void analyze_sbs(mddev_t * mddev) 2868 { 2869 int i; 2870 mdk_rdev_t *rdev, *freshest, *tmp; 2871 char b[BDEVNAME_SIZE]; 2872 2873 freshest = NULL; 2874 rdev_for_each(rdev, tmp, mddev) 2875 switch (super_types[mddev->major_version]. 2876 load_super(rdev, freshest, mddev->minor_version)) { 2877 case 1: 2878 freshest = rdev; 2879 break; 2880 case 0: 2881 break; 2882 default: 2883 printk( KERN_ERR \ 2884 "md: fatal superblock inconsistency in %s" 2885 " -- removing from array\n", 2886 bdevname(rdev->bdev,b)); 2887 kick_rdev_from_array(rdev); 2888 } 2889 2890 2891 super_types[mddev->major_version]. 2892 validate_super(mddev, freshest); 2893 2894 i = 0; 2895 rdev_for_each(rdev, tmp, mddev) { 2896 if (mddev->max_disks && 2897 (rdev->desc_nr >= mddev->max_disks || 2898 i > mddev->max_disks)) { 2899 printk(KERN_WARNING 2900 "md: %s: %s: only %d devices permitted\n", 2901 mdname(mddev), bdevname(rdev->bdev, b), 2902 mddev->max_disks); 2903 kick_rdev_from_array(rdev); 2904 continue; 2905 } 2906 if (rdev != freshest) 2907 if (super_types[mddev->major_version]. 2908 validate_super(mddev, rdev)) { 2909 printk(KERN_WARNING "md: kicking non-fresh %s" 2910 " from array!\n", 2911 bdevname(rdev->bdev,b)); 2912 kick_rdev_from_array(rdev); 2913 continue; 2914 } 2915 if (mddev->level == LEVEL_MULTIPATH) { 2916 rdev->desc_nr = i++; 2917 rdev->raid_disk = rdev->desc_nr; 2918 set_bit(In_sync, &rdev->flags); 2919 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { 2920 rdev->raid_disk = -1; 2921 clear_bit(In_sync, &rdev->flags); 2922 } 2923 } 2924 } 2925 2926 /* Read a fixed-point number. 2927 * Numbers in sysfs attributes should be in "standard" units where 2928 * possible, so time should be in seconds. 2929 * However we internally use a a much smaller unit such as 2930 * milliseconds or jiffies. 2931 * This function takes a decimal number with a possible fractional 2932 * component, and produces an integer which is the result of 2933 * multiplying that number by 10^'scale'. 2934 * all without any floating-point arithmetic. 2935 */ 2936 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 2937 { 2938 unsigned long result = 0; 2939 long decimals = -1; 2940 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 2941 if (*cp == '.') 2942 decimals = 0; 2943 else if (decimals < scale) { 2944 unsigned int value; 2945 value = *cp - '0'; 2946 result = result * 10 + value; 2947 if (decimals >= 0) 2948 decimals++; 2949 } 2950 cp++; 2951 } 2952 if (*cp == '\n') 2953 cp++; 2954 if (*cp) 2955 return -EINVAL; 2956 if (decimals < 0) 2957 decimals = 0; 2958 while (decimals < scale) { 2959 result *= 10; 2960 decimals ++; 2961 } 2962 *res = result; 2963 return 0; 2964 } 2965 2966 2967 static void md_safemode_timeout(unsigned long data); 2968 2969 static ssize_t 2970 safe_delay_show(mddev_t *mddev, char *page) 2971 { 2972 int msec = (mddev->safemode_delay*1000)/HZ; 2973 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 2974 } 2975 static ssize_t 2976 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len) 2977 { 2978 unsigned long msec; 2979 2980 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) 2981 return -EINVAL; 2982 if (msec == 0) 2983 mddev->safemode_delay = 0; 2984 else { 2985 unsigned long old_delay = mddev->safemode_delay; 2986 mddev->safemode_delay = (msec*HZ)/1000; 2987 if (mddev->safemode_delay == 0) 2988 mddev->safemode_delay = 1; 2989 if (mddev->safemode_delay < old_delay) 2990 md_safemode_timeout((unsigned long)mddev); 2991 } 2992 return len; 2993 } 2994 static struct md_sysfs_entry md_safe_delay = 2995 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 2996 2997 static ssize_t 2998 level_show(mddev_t *mddev, char *page) 2999 { 3000 struct mdk_personality *p = mddev->pers; 3001 if (p) 3002 return sprintf(page, "%s\n", p->name); 3003 else if (mddev->clevel[0]) 3004 return sprintf(page, "%s\n", mddev->clevel); 3005 else if (mddev->level != LEVEL_NONE) 3006 return sprintf(page, "%d\n", mddev->level); 3007 else 3008 return 0; 3009 } 3010 3011 static ssize_t 3012 level_store(mddev_t *mddev, const char *buf, size_t len) 3013 { 3014 char clevel[16]; 3015 ssize_t rv = len; 3016 struct mdk_personality *pers; 3017 long level; 3018 void *priv; 3019 mdk_rdev_t *rdev; 3020 3021 if (mddev->pers == NULL) { 3022 if (len == 0) 3023 return 0; 3024 if (len >= sizeof(mddev->clevel)) 3025 return -ENOSPC; 3026 strncpy(mddev->clevel, buf, len); 3027 if (mddev->clevel[len-1] == '\n') 3028 len--; 3029 mddev->clevel[len] = 0; 3030 mddev->level = LEVEL_NONE; 3031 return rv; 3032 } 3033 3034 /* request to change the personality. Need to ensure: 3035 * - array is not engaged in resync/recovery/reshape 3036 * - old personality can be suspended 3037 * - new personality will access other array. 3038 */ 3039 3040 if (mddev->sync_thread || 3041 mddev->reshape_position != MaxSector || 3042 mddev->sysfs_active) 3043 return -EBUSY; 3044 3045 if (!mddev->pers->quiesce) { 3046 printk(KERN_WARNING "md: %s: %s does not support online personality change\n", 3047 mdname(mddev), mddev->pers->name); 3048 return -EINVAL; 3049 } 3050 3051 /* Now find the new personality */ 3052 if (len == 0 || len >= sizeof(clevel)) 3053 return -EINVAL; 3054 strncpy(clevel, buf, len); 3055 if (clevel[len-1] == '\n') 3056 len--; 3057 clevel[len] = 0; 3058 if (strict_strtol(clevel, 10, &level)) 3059 level = LEVEL_NONE; 3060 3061 if (request_module("md-%s", clevel) != 0) 3062 request_module("md-level-%s", clevel); 3063 spin_lock(&pers_lock); 3064 pers = find_pers(level, clevel); 3065 if (!pers || !try_module_get(pers->owner)) { 3066 spin_unlock(&pers_lock); 3067 printk(KERN_WARNING "md: personality %s not loaded\n", clevel); 3068 return -EINVAL; 3069 } 3070 spin_unlock(&pers_lock); 3071 3072 if (pers == mddev->pers) { 3073 /* Nothing to do! */ 3074 module_put(pers->owner); 3075 return rv; 3076 } 3077 if (!pers->takeover) { 3078 module_put(pers->owner); 3079 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n", 3080 mdname(mddev), clevel); 3081 return -EINVAL; 3082 } 3083 3084 list_for_each_entry(rdev, &mddev->disks, same_set) 3085 rdev->new_raid_disk = rdev->raid_disk; 3086 3087 /* ->takeover must set new_* and/or delta_disks 3088 * if it succeeds, and may set them when it fails. 3089 */ 3090 priv = pers->takeover(mddev); 3091 if (IS_ERR(priv)) { 3092 mddev->new_level = mddev->level; 3093 mddev->new_layout = mddev->layout; 3094 mddev->new_chunk_sectors = mddev->chunk_sectors; 3095 mddev->raid_disks -= mddev->delta_disks; 3096 mddev->delta_disks = 0; 3097 module_put(pers->owner); 3098 printk(KERN_WARNING "md: %s: %s would not accept array\n", 3099 mdname(mddev), clevel); 3100 return PTR_ERR(priv); 3101 } 3102 3103 /* Looks like we have a winner */ 3104 mddev_suspend(mddev); 3105 mddev->pers->stop(mddev); 3106 3107 if (mddev->pers->sync_request == NULL && 3108 pers->sync_request != NULL) { 3109 /* need to add the md_redundancy_group */ 3110 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 3111 printk(KERN_WARNING 3112 "md: cannot register extra attributes for %s\n", 3113 mdname(mddev)); 3114 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action"); 3115 } 3116 if (mddev->pers->sync_request != NULL && 3117 pers->sync_request == NULL) { 3118 /* need to remove the md_redundancy_group */ 3119 if (mddev->to_remove == NULL) 3120 mddev->to_remove = &md_redundancy_group; 3121 } 3122 3123 if (mddev->pers->sync_request == NULL && 3124 mddev->external) { 3125 /* We are converting from a no-redundancy array 3126 * to a redundancy array and metadata is managed 3127 * externally so we need to be sure that writes 3128 * won't block due to a need to transition 3129 * clean->dirty 3130 * until external management is started. 3131 */ 3132 mddev->in_sync = 0; 3133 mddev->safemode_delay = 0; 3134 mddev->safemode = 0; 3135 } 3136 3137 list_for_each_entry(rdev, &mddev->disks, same_set) { 3138 char nm[20]; 3139 if (rdev->raid_disk < 0) 3140 continue; 3141 if (rdev->new_raid_disk >= mddev->raid_disks) 3142 rdev->new_raid_disk = -1; 3143 if (rdev->new_raid_disk == rdev->raid_disk) 3144 continue; 3145 sprintf(nm, "rd%d", rdev->raid_disk); 3146 sysfs_remove_link(&mddev->kobj, nm); 3147 } 3148 list_for_each_entry(rdev, &mddev->disks, same_set) { 3149 if (rdev->raid_disk < 0) 3150 continue; 3151 if (rdev->new_raid_disk == rdev->raid_disk) 3152 continue; 3153 rdev->raid_disk = rdev->new_raid_disk; 3154 if (rdev->raid_disk < 0) 3155 clear_bit(In_sync, &rdev->flags); 3156 else { 3157 char nm[20]; 3158 sprintf(nm, "rd%d", rdev->raid_disk); 3159 if(sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) 3160 printk("md: cannot register %s for %s after level change\n", 3161 nm, mdname(mddev)); 3162 } 3163 } 3164 3165 module_put(mddev->pers->owner); 3166 mddev->pers = pers; 3167 mddev->private = priv; 3168 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 3169 mddev->level = mddev->new_level; 3170 mddev->layout = mddev->new_layout; 3171 mddev->chunk_sectors = mddev->new_chunk_sectors; 3172 mddev->delta_disks = 0; 3173 mddev->degraded = 0; 3174 if (mddev->pers->sync_request == NULL) { 3175 /* this is now an array without redundancy, so 3176 * it must always be in_sync 3177 */ 3178 mddev->in_sync = 1; 3179 del_timer_sync(&mddev->safemode_timer); 3180 } 3181 pers->run(mddev); 3182 mddev_resume(mddev); 3183 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3184 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3185 md_wakeup_thread(mddev->thread); 3186 sysfs_notify(&mddev->kobj, NULL, "level"); 3187 md_new_event(mddev); 3188 return rv; 3189 } 3190 3191 static struct md_sysfs_entry md_level = 3192 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 3193 3194 3195 static ssize_t 3196 layout_show(mddev_t *mddev, char *page) 3197 { 3198 /* just a number, not meaningful for all levels */ 3199 if (mddev->reshape_position != MaxSector && 3200 mddev->layout != mddev->new_layout) 3201 return sprintf(page, "%d (%d)\n", 3202 mddev->new_layout, mddev->layout); 3203 return sprintf(page, "%d\n", mddev->layout); 3204 } 3205 3206 static ssize_t 3207 layout_store(mddev_t *mddev, const char *buf, size_t len) 3208 { 3209 char *e; 3210 unsigned long n = simple_strtoul(buf, &e, 10); 3211 3212 if (!*buf || (*e && *e != '\n')) 3213 return -EINVAL; 3214 3215 if (mddev->pers) { 3216 int err; 3217 if (mddev->pers->check_reshape == NULL) 3218 return -EBUSY; 3219 mddev->new_layout = n; 3220 err = mddev->pers->check_reshape(mddev); 3221 if (err) { 3222 mddev->new_layout = mddev->layout; 3223 return err; 3224 } 3225 } else { 3226 mddev->new_layout = n; 3227 if (mddev->reshape_position == MaxSector) 3228 mddev->layout = n; 3229 } 3230 return len; 3231 } 3232 static struct md_sysfs_entry md_layout = 3233 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 3234 3235 3236 static ssize_t 3237 raid_disks_show(mddev_t *mddev, char *page) 3238 { 3239 if (mddev->raid_disks == 0) 3240 return 0; 3241 if (mddev->reshape_position != MaxSector && 3242 mddev->delta_disks != 0) 3243 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 3244 mddev->raid_disks - mddev->delta_disks); 3245 return sprintf(page, "%d\n", mddev->raid_disks); 3246 } 3247 3248 static int update_raid_disks(mddev_t *mddev, int raid_disks); 3249 3250 static ssize_t 3251 raid_disks_store(mddev_t *mddev, const char *buf, size_t len) 3252 { 3253 char *e; 3254 int rv = 0; 3255 unsigned long n = simple_strtoul(buf, &e, 10); 3256 3257 if (!*buf || (*e && *e != '\n')) 3258 return -EINVAL; 3259 3260 if (mddev->pers) 3261 rv = update_raid_disks(mddev, n); 3262 else if (mddev->reshape_position != MaxSector) { 3263 int olddisks = mddev->raid_disks - mddev->delta_disks; 3264 mddev->delta_disks = n - olddisks; 3265 mddev->raid_disks = n; 3266 } else 3267 mddev->raid_disks = n; 3268 return rv ? rv : len; 3269 } 3270 static struct md_sysfs_entry md_raid_disks = 3271 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 3272 3273 static ssize_t 3274 chunk_size_show(mddev_t *mddev, char *page) 3275 { 3276 if (mddev->reshape_position != MaxSector && 3277 mddev->chunk_sectors != mddev->new_chunk_sectors) 3278 return sprintf(page, "%d (%d)\n", 3279 mddev->new_chunk_sectors << 9, 3280 mddev->chunk_sectors << 9); 3281 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 3282 } 3283 3284 static ssize_t 3285 chunk_size_store(mddev_t *mddev, const char *buf, size_t len) 3286 { 3287 char *e; 3288 unsigned long n = simple_strtoul(buf, &e, 10); 3289 3290 if (!*buf || (*e && *e != '\n')) 3291 return -EINVAL; 3292 3293 if (mddev->pers) { 3294 int err; 3295 if (mddev->pers->check_reshape == NULL) 3296 return -EBUSY; 3297 mddev->new_chunk_sectors = n >> 9; 3298 err = mddev->pers->check_reshape(mddev); 3299 if (err) { 3300 mddev->new_chunk_sectors = mddev->chunk_sectors; 3301 return err; 3302 } 3303 } else { 3304 mddev->new_chunk_sectors = n >> 9; 3305 if (mddev->reshape_position == MaxSector) 3306 mddev->chunk_sectors = n >> 9; 3307 } 3308 return len; 3309 } 3310 static struct md_sysfs_entry md_chunk_size = 3311 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 3312 3313 static ssize_t 3314 resync_start_show(mddev_t *mddev, char *page) 3315 { 3316 if (mddev->recovery_cp == MaxSector) 3317 return sprintf(page, "none\n"); 3318 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 3319 } 3320 3321 static ssize_t 3322 resync_start_store(mddev_t *mddev, const char *buf, size_t len) 3323 { 3324 char *e; 3325 unsigned long long n = simple_strtoull(buf, &e, 10); 3326 3327 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 3328 return -EBUSY; 3329 if (cmd_match(buf, "none")) 3330 n = MaxSector; 3331 else if (!*buf || (*e && *e != '\n')) 3332 return -EINVAL; 3333 3334 mddev->recovery_cp = n; 3335 return len; 3336 } 3337 static struct md_sysfs_entry md_resync_start = 3338 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store); 3339 3340 /* 3341 * The array state can be: 3342 * 3343 * clear 3344 * No devices, no size, no level 3345 * Equivalent to STOP_ARRAY ioctl 3346 * inactive 3347 * May have some settings, but array is not active 3348 * all IO results in error 3349 * When written, doesn't tear down array, but just stops it 3350 * suspended (not supported yet) 3351 * All IO requests will block. The array can be reconfigured. 3352 * Writing this, if accepted, will block until array is quiescent 3353 * readonly 3354 * no resync can happen. no superblocks get written. 3355 * write requests fail 3356 * read-auto 3357 * like readonly, but behaves like 'clean' on a write request. 3358 * 3359 * clean - no pending writes, but otherwise active. 3360 * When written to inactive array, starts without resync 3361 * If a write request arrives then 3362 * if metadata is known, mark 'dirty' and switch to 'active'. 3363 * if not known, block and switch to write-pending 3364 * If written to an active array that has pending writes, then fails. 3365 * active 3366 * fully active: IO and resync can be happening. 3367 * When written to inactive array, starts with resync 3368 * 3369 * write-pending 3370 * clean, but writes are blocked waiting for 'active' to be written. 3371 * 3372 * active-idle 3373 * like active, but no writes have been seen for a while (100msec). 3374 * 3375 */ 3376 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 3377 write_pending, active_idle, bad_word}; 3378 static char *array_states[] = { 3379 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 3380 "write-pending", "active-idle", NULL }; 3381 3382 static int match_word(const char *word, char **list) 3383 { 3384 int n; 3385 for (n=0; list[n]; n++) 3386 if (cmd_match(word, list[n])) 3387 break; 3388 return n; 3389 } 3390 3391 static ssize_t 3392 array_state_show(mddev_t *mddev, char *page) 3393 { 3394 enum array_state st = inactive; 3395 3396 if (mddev->pers) 3397 switch(mddev->ro) { 3398 case 1: 3399 st = readonly; 3400 break; 3401 case 2: 3402 st = read_auto; 3403 break; 3404 case 0: 3405 if (mddev->in_sync) 3406 st = clean; 3407 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 3408 st = write_pending; 3409 else if (mddev->safemode) 3410 st = active_idle; 3411 else 3412 st = active; 3413 } 3414 else { 3415 if (list_empty(&mddev->disks) && 3416 mddev->raid_disks == 0 && 3417 mddev->dev_sectors == 0) 3418 st = clear; 3419 else 3420 st = inactive; 3421 } 3422 return sprintf(page, "%s\n", array_states[st]); 3423 } 3424 3425 static int do_md_stop(mddev_t * mddev, int ro, int is_open); 3426 static int md_set_readonly(mddev_t * mddev, int is_open); 3427 static int do_md_run(mddev_t * mddev); 3428 static int restart_array(mddev_t *mddev); 3429 3430 static ssize_t 3431 array_state_store(mddev_t *mddev, const char *buf, size_t len) 3432 { 3433 int err = -EINVAL; 3434 enum array_state st = match_word(buf, array_states); 3435 switch(st) { 3436 case bad_word: 3437 break; 3438 case clear: 3439 /* stopping an active array */ 3440 if (atomic_read(&mddev->openers) > 0) 3441 return -EBUSY; 3442 err = do_md_stop(mddev, 0, 0); 3443 break; 3444 case inactive: 3445 /* stopping an active array */ 3446 if (mddev->pers) { 3447 if (atomic_read(&mddev->openers) > 0) 3448 return -EBUSY; 3449 err = do_md_stop(mddev, 2, 0); 3450 } else 3451 err = 0; /* already inactive */ 3452 break; 3453 case suspended: 3454 break; /* not supported yet */ 3455 case readonly: 3456 if (mddev->pers) 3457 err = md_set_readonly(mddev, 0); 3458 else { 3459 mddev->ro = 1; 3460 set_disk_ro(mddev->gendisk, 1); 3461 err = do_md_run(mddev); 3462 } 3463 break; 3464 case read_auto: 3465 if (mddev->pers) { 3466 if (mddev->ro == 0) 3467 err = md_set_readonly(mddev, 0); 3468 else if (mddev->ro == 1) 3469 err = restart_array(mddev); 3470 if (err == 0) { 3471 mddev->ro = 2; 3472 set_disk_ro(mddev->gendisk, 0); 3473 } 3474 } else { 3475 mddev->ro = 2; 3476 err = do_md_run(mddev); 3477 } 3478 break; 3479 case clean: 3480 if (mddev->pers) { 3481 restart_array(mddev); 3482 spin_lock_irq(&mddev->write_lock); 3483 if (atomic_read(&mddev->writes_pending) == 0) { 3484 if (mddev->in_sync == 0) { 3485 mddev->in_sync = 1; 3486 if (mddev->safemode == 1) 3487 mddev->safemode = 0; 3488 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3489 } 3490 err = 0; 3491 } else 3492 err = -EBUSY; 3493 spin_unlock_irq(&mddev->write_lock); 3494 } else 3495 err = -EINVAL; 3496 break; 3497 case active: 3498 if (mddev->pers) { 3499 restart_array(mddev); 3500 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 3501 wake_up(&mddev->sb_wait); 3502 err = 0; 3503 } else { 3504 mddev->ro = 0; 3505 set_disk_ro(mddev->gendisk, 0); 3506 err = do_md_run(mddev); 3507 } 3508 break; 3509 case write_pending: 3510 case active_idle: 3511 /* these cannot be set */ 3512 break; 3513 } 3514 if (err) 3515 return err; 3516 else { 3517 sysfs_notify_dirent_safe(mddev->sysfs_state); 3518 return len; 3519 } 3520 } 3521 static struct md_sysfs_entry md_array_state = 3522 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3523 3524 static ssize_t 3525 max_corrected_read_errors_show(mddev_t *mddev, char *page) { 3526 return sprintf(page, "%d\n", 3527 atomic_read(&mddev->max_corr_read_errors)); 3528 } 3529 3530 static ssize_t 3531 max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len) 3532 { 3533 char *e; 3534 unsigned long n = simple_strtoul(buf, &e, 10); 3535 3536 if (*buf && (*e == 0 || *e == '\n')) { 3537 atomic_set(&mddev->max_corr_read_errors, n); 3538 return len; 3539 } 3540 return -EINVAL; 3541 } 3542 3543 static struct md_sysfs_entry max_corr_read_errors = 3544 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 3545 max_corrected_read_errors_store); 3546 3547 static ssize_t 3548 null_show(mddev_t *mddev, char *page) 3549 { 3550 return -EINVAL; 3551 } 3552 3553 static ssize_t 3554 new_dev_store(mddev_t *mddev, const char *buf, size_t len) 3555 { 3556 /* buf must be %d:%d\n? giving major and minor numbers */ 3557 /* The new device is added to the array. 3558 * If the array has a persistent superblock, we read the 3559 * superblock to initialise info and check validity. 3560 * Otherwise, only checking done is that in bind_rdev_to_array, 3561 * which mainly checks size. 3562 */ 3563 char *e; 3564 int major = simple_strtoul(buf, &e, 10); 3565 int minor; 3566 dev_t dev; 3567 mdk_rdev_t *rdev; 3568 int err; 3569 3570 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 3571 return -EINVAL; 3572 minor = simple_strtoul(e+1, &e, 10); 3573 if (*e && *e != '\n') 3574 return -EINVAL; 3575 dev = MKDEV(major, minor); 3576 if (major != MAJOR(dev) || 3577 minor != MINOR(dev)) 3578 return -EOVERFLOW; 3579 3580 3581 if (mddev->persistent) { 3582 rdev = md_import_device(dev, mddev->major_version, 3583 mddev->minor_version); 3584 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 3585 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 3586 mdk_rdev_t, same_set); 3587 err = super_types[mddev->major_version] 3588 .load_super(rdev, rdev0, mddev->minor_version); 3589 if (err < 0) 3590 goto out; 3591 } 3592 } else if (mddev->external) 3593 rdev = md_import_device(dev, -2, -1); 3594 else 3595 rdev = md_import_device(dev, -1, -1); 3596 3597 if (IS_ERR(rdev)) 3598 return PTR_ERR(rdev); 3599 err = bind_rdev_to_array(rdev, mddev); 3600 out: 3601 if (err) 3602 export_rdev(rdev); 3603 return err ? err : len; 3604 } 3605 3606 static struct md_sysfs_entry md_new_device = 3607 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 3608 3609 static ssize_t 3610 bitmap_store(mddev_t *mddev, const char *buf, size_t len) 3611 { 3612 char *end; 3613 unsigned long chunk, end_chunk; 3614 3615 if (!mddev->bitmap) 3616 goto out; 3617 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 3618 while (*buf) { 3619 chunk = end_chunk = simple_strtoul(buf, &end, 0); 3620 if (buf == end) break; 3621 if (*end == '-') { /* range */ 3622 buf = end + 1; 3623 end_chunk = simple_strtoul(buf, &end, 0); 3624 if (buf == end) break; 3625 } 3626 if (*end && !isspace(*end)) break; 3627 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 3628 buf = skip_spaces(end); 3629 } 3630 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 3631 out: 3632 return len; 3633 } 3634 3635 static struct md_sysfs_entry md_bitmap = 3636 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 3637 3638 static ssize_t 3639 size_show(mddev_t *mddev, char *page) 3640 { 3641 return sprintf(page, "%llu\n", 3642 (unsigned long long)mddev->dev_sectors / 2); 3643 } 3644 3645 static int update_size(mddev_t *mddev, sector_t num_sectors); 3646 3647 static ssize_t 3648 size_store(mddev_t *mddev, const char *buf, size_t len) 3649 { 3650 /* If array is inactive, we can reduce the component size, but 3651 * not increase it (except from 0). 3652 * If array is active, we can try an on-line resize 3653 */ 3654 sector_t sectors; 3655 int err = strict_blocks_to_sectors(buf, §ors); 3656 3657 if (err < 0) 3658 return err; 3659 if (mddev->pers) { 3660 err = update_size(mddev, sectors); 3661 md_update_sb(mddev, 1); 3662 } else { 3663 if (mddev->dev_sectors == 0 || 3664 mddev->dev_sectors > sectors) 3665 mddev->dev_sectors = sectors; 3666 else 3667 err = -ENOSPC; 3668 } 3669 return err ? err : len; 3670 } 3671 3672 static struct md_sysfs_entry md_size = 3673 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 3674 3675 3676 /* Metdata version. 3677 * This is one of 3678 * 'none' for arrays with no metadata (good luck...) 3679 * 'external' for arrays with externally managed metadata, 3680 * or N.M for internally known formats 3681 */ 3682 static ssize_t 3683 metadata_show(mddev_t *mddev, char *page) 3684 { 3685 if (mddev->persistent) 3686 return sprintf(page, "%d.%d\n", 3687 mddev->major_version, mddev->minor_version); 3688 else if (mddev->external) 3689 return sprintf(page, "external:%s\n", mddev->metadata_type); 3690 else 3691 return sprintf(page, "none\n"); 3692 } 3693 3694 static ssize_t 3695 metadata_store(mddev_t *mddev, const char *buf, size_t len) 3696 { 3697 int major, minor; 3698 char *e; 3699 /* Changing the details of 'external' metadata is 3700 * always permitted. Otherwise there must be 3701 * no devices attached to the array. 3702 */ 3703 if (mddev->external && strncmp(buf, "external:", 9) == 0) 3704 ; 3705 else if (!list_empty(&mddev->disks)) 3706 return -EBUSY; 3707 3708 if (cmd_match(buf, "none")) { 3709 mddev->persistent = 0; 3710 mddev->external = 0; 3711 mddev->major_version = 0; 3712 mddev->minor_version = 90; 3713 return len; 3714 } 3715 if (strncmp(buf, "external:", 9) == 0) { 3716 size_t namelen = len-9; 3717 if (namelen >= sizeof(mddev->metadata_type)) 3718 namelen = sizeof(mddev->metadata_type)-1; 3719 strncpy(mddev->metadata_type, buf+9, namelen); 3720 mddev->metadata_type[namelen] = 0; 3721 if (namelen && mddev->metadata_type[namelen-1] == '\n') 3722 mddev->metadata_type[--namelen] = 0; 3723 mddev->persistent = 0; 3724 mddev->external = 1; 3725 mddev->major_version = 0; 3726 mddev->minor_version = 90; 3727 return len; 3728 } 3729 major = simple_strtoul(buf, &e, 10); 3730 if (e==buf || *e != '.') 3731 return -EINVAL; 3732 buf = e+1; 3733 minor = simple_strtoul(buf, &e, 10); 3734 if (e==buf || (*e && *e != '\n') ) 3735 return -EINVAL; 3736 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 3737 return -ENOENT; 3738 mddev->major_version = major; 3739 mddev->minor_version = minor; 3740 mddev->persistent = 1; 3741 mddev->external = 0; 3742 return len; 3743 } 3744 3745 static struct md_sysfs_entry md_metadata = 3746 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 3747 3748 static ssize_t 3749 action_show(mddev_t *mddev, char *page) 3750 { 3751 char *type = "idle"; 3752 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 3753 type = "frozen"; 3754 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3755 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { 3756 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3757 type = "reshape"; 3758 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3759 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 3760 type = "resync"; 3761 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 3762 type = "check"; 3763 else 3764 type = "repair"; 3765 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 3766 type = "recover"; 3767 } 3768 return sprintf(page, "%s\n", type); 3769 } 3770 3771 static void reap_sync_thread(mddev_t *mddev); 3772 3773 static ssize_t 3774 action_store(mddev_t *mddev, const char *page, size_t len) 3775 { 3776 if (!mddev->pers || !mddev->pers->sync_request) 3777 return -EINVAL; 3778 3779 if (cmd_match(page, "frozen")) 3780 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 3781 else 3782 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 3783 3784 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 3785 if (mddev->sync_thread) { 3786 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 3787 reap_sync_thread(mddev); 3788 } 3789 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3790 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 3791 return -EBUSY; 3792 else if (cmd_match(page, "resync")) 3793 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3794 else if (cmd_match(page, "recover")) { 3795 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 3796 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3797 } else if (cmd_match(page, "reshape")) { 3798 int err; 3799 if (mddev->pers->start_reshape == NULL) 3800 return -EINVAL; 3801 err = mddev->pers->start_reshape(mddev); 3802 if (err) 3803 return err; 3804 sysfs_notify(&mddev->kobj, NULL, "degraded"); 3805 } else { 3806 if (cmd_match(page, "check")) 3807 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3808 else if (!cmd_match(page, "repair")) 3809 return -EINVAL; 3810 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 3811 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3812 } 3813 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3814 md_wakeup_thread(mddev->thread); 3815 sysfs_notify_dirent_safe(mddev->sysfs_action); 3816 return len; 3817 } 3818 3819 static ssize_t 3820 mismatch_cnt_show(mddev_t *mddev, char *page) 3821 { 3822 return sprintf(page, "%llu\n", 3823 (unsigned long long) mddev->resync_mismatches); 3824 } 3825 3826 static struct md_sysfs_entry md_scan_mode = 3827 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 3828 3829 3830 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 3831 3832 static ssize_t 3833 sync_min_show(mddev_t *mddev, char *page) 3834 { 3835 return sprintf(page, "%d (%s)\n", speed_min(mddev), 3836 mddev->sync_speed_min ? "local": "system"); 3837 } 3838 3839 static ssize_t 3840 sync_min_store(mddev_t *mddev, const char *buf, size_t len) 3841 { 3842 int min; 3843 char *e; 3844 if (strncmp(buf, "system", 6)==0) { 3845 mddev->sync_speed_min = 0; 3846 return len; 3847 } 3848 min = simple_strtoul(buf, &e, 10); 3849 if (buf == e || (*e && *e != '\n') || min <= 0) 3850 return -EINVAL; 3851 mddev->sync_speed_min = min; 3852 return len; 3853 } 3854 3855 static struct md_sysfs_entry md_sync_min = 3856 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 3857 3858 static ssize_t 3859 sync_max_show(mddev_t *mddev, char *page) 3860 { 3861 return sprintf(page, "%d (%s)\n", speed_max(mddev), 3862 mddev->sync_speed_max ? "local": "system"); 3863 } 3864 3865 static ssize_t 3866 sync_max_store(mddev_t *mddev, const char *buf, size_t len) 3867 { 3868 int max; 3869 char *e; 3870 if (strncmp(buf, "system", 6)==0) { 3871 mddev->sync_speed_max = 0; 3872 return len; 3873 } 3874 max = simple_strtoul(buf, &e, 10); 3875 if (buf == e || (*e && *e != '\n') || max <= 0) 3876 return -EINVAL; 3877 mddev->sync_speed_max = max; 3878 return len; 3879 } 3880 3881 static struct md_sysfs_entry md_sync_max = 3882 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 3883 3884 static ssize_t 3885 degraded_show(mddev_t *mddev, char *page) 3886 { 3887 return sprintf(page, "%d\n", mddev->degraded); 3888 } 3889 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 3890 3891 static ssize_t 3892 sync_force_parallel_show(mddev_t *mddev, char *page) 3893 { 3894 return sprintf(page, "%d\n", mddev->parallel_resync); 3895 } 3896 3897 static ssize_t 3898 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len) 3899 { 3900 long n; 3901 3902 if (strict_strtol(buf, 10, &n)) 3903 return -EINVAL; 3904 3905 if (n != 0 && n != 1) 3906 return -EINVAL; 3907 3908 mddev->parallel_resync = n; 3909 3910 if (mddev->sync_thread) 3911 wake_up(&resync_wait); 3912 3913 return len; 3914 } 3915 3916 /* force parallel resync, even with shared block devices */ 3917 static struct md_sysfs_entry md_sync_force_parallel = 3918 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 3919 sync_force_parallel_show, sync_force_parallel_store); 3920 3921 static ssize_t 3922 sync_speed_show(mddev_t *mddev, char *page) 3923 { 3924 unsigned long resync, dt, db; 3925 if (mddev->curr_resync == 0) 3926 return sprintf(page, "none\n"); 3927 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 3928 dt = (jiffies - mddev->resync_mark) / HZ; 3929 if (!dt) dt++; 3930 db = resync - mddev->resync_mark_cnt; 3931 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 3932 } 3933 3934 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 3935 3936 static ssize_t 3937 sync_completed_show(mddev_t *mddev, char *page) 3938 { 3939 unsigned long long max_sectors, resync; 3940 3941 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3942 return sprintf(page, "none\n"); 3943 3944 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 3945 max_sectors = mddev->resync_max_sectors; 3946 else 3947 max_sectors = mddev->dev_sectors; 3948 3949 resync = mddev->curr_resync_completed; 3950 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 3951 } 3952 3953 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); 3954 3955 static ssize_t 3956 min_sync_show(mddev_t *mddev, char *page) 3957 { 3958 return sprintf(page, "%llu\n", 3959 (unsigned long long)mddev->resync_min); 3960 } 3961 static ssize_t 3962 min_sync_store(mddev_t *mddev, const char *buf, size_t len) 3963 { 3964 unsigned long long min; 3965 if (strict_strtoull(buf, 10, &min)) 3966 return -EINVAL; 3967 if (min > mddev->resync_max) 3968 return -EINVAL; 3969 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3970 return -EBUSY; 3971 3972 /* Must be a multiple of chunk_size */ 3973 if (mddev->chunk_sectors) { 3974 sector_t temp = min; 3975 if (sector_div(temp, mddev->chunk_sectors)) 3976 return -EINVAL; 3977 } 3978 mddev->resync_min = min; 3979 3980 return len; 3981 } 3982 3983 static struct md_sysfs_entry md_min_sync = 3984 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 3985 3986 static ssize_t 3987 max_sync_show(mddev_t *mddev, char *page) 3988 { 3989 if (mddev->resync_max == MaxSector) 3990 return sprintf(page, "max\n"); 3991 else 3992 return sprintf(page, "%llu\n", 3993 (unsigned long long)mddev->resync_max); 3994 } 3995 static ssize_t 3996 max_sync_store(mddev_t *mddev, const char *buf, size_t len) 3997 { 3998 if (strncmp(buf, "max", 3) == 0) 3999 mddev->resync_max = MaxSector; 4000 else { 4001 unsigned long long max; 4002 if (strict_strtoull(buf, 10, &max)) 4003 return -EINVAL; 4004 if (max < mddev->resync_min) 4005 return -EINVAL; 4006 if (max < mddev->resync_max && 4007 mddev->ro == 0 && 4008 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4009 return -EBUSY; 4010 4011 /* Must be a multiple of chunk_size */ 4012 if (mddev->chunk_sectors) { 4013 sector_t temp = max; 4014 if (sector_div(temp, mddev->chunk_sectors)) 4015 return -EINVAL; 4016 } 4017 mddev->resync_max = max; 4018 } 4019 wake_up(&mddev->recovery_wait); 4020 return len; 4021 } 4022 4023 static struct md_sysfs_entry md_max_sync = 4024 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 4025 4026 static ssize_t 4027 suspend_lo_show(mddev_t *mddev, char *page) 4028 { 4029 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 4030 } 4031 4032 static ssize_t 4033 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) 4034 { 4035 char *e; 4036 unsigned long long new = simple_strtoull(buf, &e, 10); 4037 unsigned long long old = mddev->suspend_lo; 4038 4039 if (mddev->pers == NULL || 4040 mddev->pers->quiesce == NULL) 4041 return -EINVAL; 4042 if (buf == e || (*e && *e != '\n')) 4043 return -EINVAL; 4044 4045 mddev->suspend_lo = new; 4046 if (new >= old) 4047 /* Shrinking suspended region */ 4048 mddev->pers->quiesce(mddev, 2); 4049 else { 4050 /* Expanding suspended region - need to wait */ 4051 mddev->pers->quiesce(mddev, 1); 4052 mddev->pers->quiesce(mddev, 0); 4053 } 4054 return len; 4055 } 4056 static struct md_sysfs_entry md_suspend_lo = 4057 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 4058 4059 4060 static ssize_t 4061 suspend_hi_show(mddev_t *mddev, char *page) 4062 { 4063 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 4064 } 4065 4066 static ssize_t 4067 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) 4068 { 4069 char *e; 4070 unsigned long long new = simple_strtoull(buf, &e, 10); 4071 unsigned long long old = mddev->suspend_hi; 4072 4073 if (mddev->pers == NULL || 4074 mddev->pers->quiesce == NULL) 4075 return -EINVAL; 4076 if (buf == e || (*e && *e != '\n')) 4077 return -EINVAL; 4078 4079 mddev->suspend_hi = new; 4080 if (new <= old) 4081 /* Shrinking suspended region */ 4082 mddev->pers->quiesce(mddev, 2); 4083 else { 4084 /* Expanding suspended region - need to wait */ 4085 mddev->pers->quiesce(mddev, 1); 4086 mddev->pers->quiesce(mddev, 0); 4087 } 4088 return len; 4089 } 4090 static struct md_sysfs_entry md_suspend_hi = 4091 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 4092 4093 static ssize_t 4094 reshape_position_show(mddev_t *mddev, char *page) 4095 { 4096 if (mddev->reshape_position != MaxSector) 4097 return sprintf(page, "%llu\n", 4098 (unsigned long long)mddev->reshape_position); 4099 strcpy(page, "none\n"); 4100 return 5; 4101 } 4102 4103 static ssize_t 4104 reshape_position_store(mddev_t *mddev, const char *buf, size_t len) 4105 { 4106 char *e; 4107 unsigned long long new = simple_strtoull(buf, &e, 10); 4108 if (mddev->pers) 4109 return -EBUSY; 4110 if (buf == e || (*e && *e != '\n')) 4111 return -EINVAL; 4112 mddev->reshape_position = new; 4113 mddev->delta_disks = 0; 4114 mddev->new_level = mddev->level; 4115 mddev->new_layout = mddev->layout; 4116 mddev->new_chunk_sectors = mddev->chunk_sectors; 4117 return len; 4118 } 4119 4120 static struct md_sysfs_entry md_reshape_position = 4121 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 4122 reshape_position_store); 4123 4124 static ssize_t 4125 array_size_show(mddev_t *mddev, char *page) 4126 { 4127 if (mddev->external_size) 4128 return sprintf(page, "%llu\n", 4129 (unsigned long long)mddev->array_sectors/2); 4130 else 4131 return sprintf(page, "default\n"); 4132 } 4133 4134 static ssize_t 4135 array_size_store(mddev_t *mddev, const char *buf, size_t len) 4136 { 4137 sector_t sectors; 4138 4139 if (strncmp(buf, "default", 7) == 0) { 4140 if (mddev->pers) 4141 sectors = mddev->pers->size(mddev, 0, 0); 4142 else 4143 sectors = mddev->array_sectors; 4144 4145 mddev->external_size = 0; 4146 } else { 4147 if (strict_blocks_to_sectors(buf, §ors) < 0) 4148 return -EINVAL; 4149 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 4150 return -E2BIG; 4151 4152 mddev->external_size = 1; 4153 } 4154 4155 mddev->array_sectors = sectors; 4156 if (mddev->pers) { 4157 set_capacity(mddev->gendisk, mddev->array_sectors); 4158 revalidate_disk(mddev->gendisk); 4159 } 4160 return len; 4161 } 4162 4163 static struct md_sysfs_entry md_array_size = 4164 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 4165 array_size_store); 4166 4167 static struct attribute *md_default_attrs[] = { 4168 &md_level.attr, 4169 &md_layout.attr, 4170 &md_raid_disks.attr, 4171 &md_chunk_size.attr, 4172 &md_size.attr, 4173 &md_resync_start.attr, 4174 &md_metadata.attr, 4175 &md_new_device.attr, 4176 &md_safe_delay.attr, 4177 &md_array_state.attr, 4178 &md_reshape_position.attr, 4179 &md_array_size.attr, 4180 &max_corr_read_errors.attr, 4181 NULL, 4182 }; 4183 4184 static struct attribute *md_redundancy_attrs[] = { 4185 &md_scan_mode.attr, 4186 &md_mismatches.attr, 4187 &md_sync_min.attr, 4188 &md_sync_max.attr, 4189 &md_sync_speed.attr, 4190 &md_sync_force_parallel.attr, 4191 &md_sync_completed.attr, 4192 &md_min_sync.attr, 4193 &md_max_sync.attr, 4194 &md_suspend_lo.attr, 4195 &md_suspend_hi.attr, 4196 &md_bitmap.attr, 4197 &md_degraded.attr, 4198 NULL, 4199 }; 4200 static struct attribute_group md_redundancy_group = { 4201 .name = NULL, 4202 .attrs = md_redundancy_attrs, 4203 }; 4204 4205 4206 static ssize_t 4207 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 4208 { 4209 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4210 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 4211 ssize_t rv; 4212 4213 if (!entry->show) 4214 return -EIO; 4215 rv = mddev_lock(mddev); 4216 if (!rv) { 4217 rv = entry->show(mddev, page); 4218 mddev_unlock(mddev); 4219 } 4220 return rv; 4221 } 4222 4223 static ssize_t 4224 md_attr_store(struct kobject *kobj, struct attribute *attr, 4225 const char *page, size_t length) 4226 { 4227 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4228 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 4229 ssize_t rv; 4230 4231 if (!entry->store) 4232 return -EIO; 4233 if (!capable(CAP_SYS_ADMIN)) 4234 return -EACCES; 4235 rv = mddev_lock(mddev); 4236 if (mddev->hold_active == UNTIL_IOCTL) 4237 mddev->hold_active = 0; 4238 if (!rv) { 4239 rv = entry->store(mddev, page, length); 4240 mddev_unlock(mddev); 4241 } 4242 return rv; 4243 } 4244 4245 static void md_free(struct kobject *ko) 4246 { 4247 mddev_t *mddev = container_of(ko, mddev_t, kobj); 4248 4249 if (mddev->sysfs_state) 4250 sysfs_put(mddev->sysfs_state); 4251 4252 if (mddev->gendisk) { 4253 del_gendisk(mddev->gendisk); 4254 put_disk(mddev->gendisk); 4255 } 4256 if (mddev->queue) 4257 blk_cleanup_queue(mddev->queue); 4258 4259 kfree(mddev); 4260 } 4261 4262 static const struct sysfs_ops md_sysfs_ops = { 4263 .show = md_attr_show, 4264 .store = md_attr_store, 4265 }; 4266 static struct kobj_type md_ktype = { 4267 .release = md_free, 4268 .sysfs_ops = &md_sysfs_ops, 4269 .default_attrs = md_default_attrs, 4270 }; 4271 4272 int mdp_major = 0; 4273 4274 static void mddev_delayed_delete(struct work_struct *ws) 4275 { 4276 mddev_t *mddev = container_of(ws, mddev_t, del_work); 4277 4278 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); 4279 kobject_del(&mddev->kobj); 4280 kobject_put(&mddev->kobj); 4281 } 4282 4283 static int md_alloc(dev_t dev, char *name) 4284 { 4285 static DEFINE_MUTEX(disks_mutex); 4286 mddev_t *mddev = mddev_find(dev); 4287 struct gendisk *disk; 4288 int partitioned; 4289 int shift; 4290 int unit; 4291 int error; 4292 4293 if (!mddev) 4294 return -ENODEV; 4295 4296 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 4297 shift = partitioned ? MdpMinorShift : 0; 4298 unit = MINOR(mddev->unit) >> shift; 4299 4300 /* wait for any previous instance of this device to be 4301 * completely removed (mddev_delayed_delete). 4302 */ 4303 flush_workqueue(md_misc_wq); 4304 4305 mutex_lock(&disks_mutex); 4306 error = -EEXIST; 4307 if (mddev->gendisk) 4308 goto abort; 4309 4310 if (name) { 4311 /* Need to ensure that 'name' is not a duplicate. 4312 */ 4313 mddev_t *mddev2; 4314 spin_lock(&all_mddevs_lock); 4315 4316 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 4317 if (mddev2->gendisk && 4318 strcmp(mddev2->gendisk->disk_name, name) == 0) { 4319 spin_unlock(&all_mddevs_lock); 4320 goto abort; 4321 } 4322 spin_unlock(&all_mddevs_lock); 4323 } 4324 4325 error = -ENOMEM; 4326 mddev->queue = blk_alloc_queue(GFP_KERNEL); 4327 if (!mddev->queue) 4328 goto abort; 4329 mddev->queue->queuedata = mddev; 4330 4331 blk_queue_make_request(mddev->queue, md_make_request); 4332 4333 disk = alloc_disk(1 << shift); 4334 if (!disk) { 4335 blk_cleanup_queue(mddev->queue); 4336 mddev->queue = NULL; 4337 goto abort; 4338 } 4339 disk->major = MAJOR(mddev->unit); 4340 disk->first_minor = unit << shift; 4341 if (name) 4342 strcpy(disk->disk_name, name); 4343 else if (partitioned) 4344 sprintf(disk->disk_name, "md_d%d", unit); 4345 else 4346 sprintf(disk->disk_name, "md%d", unit); 4347 disk->fops = &md_fops; 4348 disk->private_data = mddev; 4349 disk->queue = mddev->queue; 4350 blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA); 4351 /* Allow extended partitions. This makes the 4352 * 'mdp' device redundant, but we can't really 4353 * remove it now. 4354 */ 4355 disk->flags |= GENHD_FL_EXT_DEVT; 4356 mddev->gendisk = disk; 4357 /* As soon as we call add_disk(), another thread could get 4358 * through to md_open, so make sure it doesn't get too far 4359 */ 4360 mutex_lock(&mddev->open_mutex); 4361 add_disk(disk); 4362 4363 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 4364 &disk_to_dev(disk)->kobj, "%s", "md"); 4365 if (error) { 4366 /* This isn't possible, but as kobject_init_and_add is marked 4367 * __must_check, we must do something with the result 4368 */ 4369 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 4370 disk->disk_name); 4371 error = 0; 4372 } 4373 if (mddev->kobj.sd && 4374 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) 4375 printk(KERN_DEBUG "pointless warning\n"); 4376 mutex_unlock(&mddev->open_mutex); 4377 abort: 4378 mutex_unlock(&disks_mutex); 4379 if (!error && mddev->kobj.sd) { 4380 kobject_uevent(&mddev->kobj, KOBJ_ADD); 4381 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 4382 } 4383 mddev_put(mddev); 4384 return error; 4385 } 4386 4387 static struct kobject *md_probe(dev_t dev, int *part, void *data) 4388 { 4389 md_alloc(dev, NULL); 4390 return NULL; 4391 } 4392 4393 static int add_named_array(const char *val, struct kernel_param *kp) 4394 { 4395 /* val must be "md_*" where * is not all digits. 4396 * We allocate an array with a large free minor number, and 4397 * set the name to val. val must not already be an active name. 4398 */ 4399 int len = strlen(val); 4400 char buf[DISK_NAME_LEN]; 4401 4402 while (len && val[len-1] == '\n') 4403 len--; 4404 if (len >= DISK_NAME_LEN) 4405 return -E2BIG; 4406 strlcpy(buf, val, len+1); 4407 if (strncmp(buf, "md_", 3) != 0) 4408 return -EINVAL; 4409 return md_alloc(0, buf); 4410 } 4411 4412 static void md_safemode_timeout(unsigned long data) 4413 { 4414 mddev_t *mddev = (mddev_t *) data; 4415 4416 if (!atomic_read(&mddev->writes_pending)) { 4417 mddev->safemode = 1; 4418 if (mddev->external) 4419 sysfs_notify_dirent_safe(mddev->sysfs_state); 4420 } 4421 md_wakeup_thread(mddev->thread); 4422 } 4423 4424 static int start_dirty_degraded; 4425 4426 int md_run(mddev_t *mddev) 4427 { 4428 int err; 4429 mdk_rdev_t *rdev; 4430 struct mdk_personality *pers; 4431 4432 if (list_empty(&mddev->disks)) 4433 /* cannot run an array with no devices.. */ 4434 return -EINVAL; 4435 4436 if (mddev->pers) 4437 return -EBUSY; 4438 /* Cannot run until previous stop completes properly */ 4439 if (mddev->sysfs_active) 4440 return -EBUSY; 4441 4442 /* 4443 * Analyze all RAID superblock(s) 4444 */ 4445 if (!mddev->raid_disks) { 4446 if (!mddev->persistent) 4447 return -EINVAL; 4448 analyze_sbs(mddev); 4449 } 4450 4451 if (mddev->level != LEVEL_NONE) 4452 request_module("md-level-%d", mddev->level); 4453 else if (mddev->clevel[0]) 4454 request_module("md-%s", mddev->clevel); 4455 4456 /* 4457 * Drop all container device buffers, from now on 4458 * the only valid external interface is through the md 4459 * device. 4460 */ 4461 list_for_each_entry(rdev, &mddev->disks, same_set) { 4462 if (test_bit(Faulty, &rdev->flags)) 4463 continue; 4464 sync_blockdev(rdev->bdev); 4465 invalidate_bdev(rdev->bdev); 4466 4467 /* perform some consistency tests on the device. 4468 * We don't want the data to overlap the metadata, 4469 * Internal Bitmap issues have been handled elsewhere. 4470 */ 4471 if (rdev->meta_bdev) { 4472 /* Nothing to check */; 4473 } else if (rdev->data_offset < rdev->sb_start) { 4474 if (mddev->dev_sectors && 4475 rdev->data_offset + mddev->dev_sectors 4476 > rdev->sb_start) { 4477 printk("md: %s: data overlaps metadata\n", 4478 mdname(mddev)); 4479 return -EINVAL; 4480 } 4481 } else { 4482 if (rdev->sb_start + rdev->sb_size/512 4483 > rdev->data_offset) { 4484 printk("md: %s: metadata overlaps data\n", 4485 mdname(mddev)); 4486 return -EINVAL; 4487 } 4488 } 4489 sysfs_notify_dirent_safe(rdev->sysfs_state); 4490 } 4491 4492 if (mddev->bio_set == NULL) 4493 mddev->bio_set = bioset_create(BIO_POOL_SIZE, sizeof(mddev)); 4494 4495 spin_lock(&pers_lock); 4496 pers = find_pers(mddev->level, mddev->clevel); 4497 if (!pers || !try_module_get(pers->owner)) { 4498 spin_unlock(&pers_lock); 4499 if (mddev->level != LEVEL_NONE) 4500 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 4501 mddev->level); 4502 else 4503 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 4504 mddev->clevel); 4505 return -EINVAL; 4506 } 4507 mddev->pers = pers; 4508 spin_unlock(&pers_lock); 4509 if (mddev->level != pers->level) { 4510 mddev->level = pers->level; 4511 mddev->new_level = pers->level; 4512 } 4513 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 4514 4515 if (mddev->reshape_position != MaxSector && 4516 pers->start_reshape == NULL) { 4517 /* This personality cannot handle reshaping... */ 4518 mddev->pers = NULL; 4519 module_put(pers->owner); 4520 return -EINVAL; 4521 } 4522 4523 if (pers->sync_request) { 4524 /* Warn if this is a potentially silly 4525 * configuration. 4526 */ 4527 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 4528 mdk_rdev_t *rdev2; 4529 int warned = 0; 4530 4531 list_for_each_entry(rdev, &mddev->disks, same_set) 4532 list_for_each_entry(rdev2, &mddev->disks, same_set) { 4533 if (rdev < rdev2 && 4534 rdev->bdev->bd_contains == 4535 rdev2->bdev->bd_contains) { 4536 printk(KERN_WARNING 4537 "%s: WARNING: %s appears to be" 4538 " on the same physical disk as" 4539 " %s.\n", 4540 mdname(mddev), 4541 bdevname(rdev->bdev,b), 4542 bdevname(rdev2->bdev,b2)); 4543 warned = 1; 4544 } 4545 } 4546 4547 if (warned) 4548 printk(KERN_WARNING 4549 "True protection against single-disk" 4550 " failure might be compromised.\n"); 4551 } 4552 4553 mddev->recovery = 0; 4554 /* may be over-ridden by personality */ 4555 mddev->resync_max_sectors = mddev->dev_sectors; 4556 4557 mddev->ok_start_degraded = start_dirty_degraded; 4558 4559 if (start_readonly && mddev->ro == 0) 4560 mddev->ro = 2; /* read-only, but switch on first write */ 4561 4562 err = mddev->pers->run(mddev); 4563 if (err) 4564 printk(KERN_ERR "md: pers->run() failed ...\n"); 4565 else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) { 4566 WARN_ONCE(!mddev->external_size, "%s: default size too small," 4567 " but 'external_size' not in effect?\n", __func__); 4568 printk(KERN_ERR 4569 "md: invalid array_size %llu > default size %llu\n", 4570 (unsigned long long)mddev->array_sectors / 2, 4571 (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2); 4572 err = -EINVAL; 4573 mddev->pers->stop(mddev); 4574 } 4575 if (err == 0 && mddev->pers->sync_request) { 4576 err = bitmap_create(mddev); 4577 if (err) { 4578 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 4579 mdname(mddev), err); 4580 mddev->pers->stop(mddev); 4581 } 4582 } 4583 if (err) { 4584 module_put(mddev->pers->owner); 4585 mddev->pers = NULL; 4586 bitmap_destroy(mddev); 4587 return err; 4588 } 4589 if (mddev->pers->sync_request) { 4590 if (mddev->kobj.sd && 4591 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 4592 printk(KERN_WARNING 4593 "md: cannot register extra attributes for %s\n", 4594 mdname(mddev)); 4595 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 4596 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 4597 mddev->ro = 0; 4598 4599 atomic_set(&mddev->writes_pending,0); 4600 atomic_set(&mddev->max_corr_read_errors, 4601 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 4602 mddev->safemode = 0; 4603 mddev->safemode_timer.function = md_safemode_timeout; 4604 mddev->safemode_timer.data = (unsigned long) mddev; 4605 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 4606 mddev->in_sync = 1; 4607 smp_wmb(); 4608 mddev->ready = 1; 4609 list_for_each_entry(rdev, &mddev->disks, same_set) 4610 if (rdev->raid_disk >= 0) { 4611 char nm[20]; 4612 sprintf(nm, "rd%d", rdev->raid_disk); 4613 if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) 4614 /* failure here is OK */; 4615 } 4616 4617 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4618 4619 if (mddev->flags) 4620 md_update_sb(mddev, 0); 4621 4622 md_wakeup_thread(mddev->thread); 4623 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 4624 4625 md_new_event(mddev); 4626 sysfs_notify_dirent_safe(mddev->sysfs_state); 4627 sysfs_notify_dirent_safe(mddev->sysfs_action); 4628 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4629 return 0; 4630 } 4631 EXPORT_SYMBOL_GPL(md_run); 4632 4633 static int do_md_run(mddev_t *mddev) 4634 { 4635 int err; 4636 4637 err = md_run(mddev); 4638 if (err) 4639 goto out; 4640 err = bitmap_load(mddev); 4641 if (err) { 4642 bitmap_destroy(mddev); 4643 goto out; 4644 } 4645 set_capacity(mddev->gendisk, mddev->array_sectors); 4646 revalidate_disk(mddev->gendisk); 4647 mddev->changed = 1; 4648 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4649 out: 4650 return err; 4651 } 4652 4653 static int restart_array(mddev_t *mddev) 4654 { 4655 struct gendisk *disk = mddev->gendisk; 4656 4657 /* Complain if it has no devices */ 4658 if (list_empty(&mddev->disks)) 4659 return -ENXIO; 4660 if (!mddev->pers) 4661 return -EINVAL; 4662 if (!mddev->ro) 4663 return -EBUSY; 4664 mddev->safemode = 0; 4665 mddev->ro = 0; 4666 set_disk_ro(disk, 0); 4667 printk(KERN_INFO "md: %s switched to read-write mode.\n", 4668 mdname(mddev)); 4669 /* Kick recovery or resync if necessary */ 4670 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4671 md_wakeup_thread(mddev->thread); 4672 md_wakeup_thread(mddev->sync_thread); 4673 sysfs_notify_dirent_safe(mddev->sysfs_state); 4674 return 0; 4675 } 4676 4677 /* similar to deny_write_access, but accounts for our holding a reference 4678 * to the file ourselves */ 4679 static int deny_bitmap_write_access(struct file * file) 4680 { 4681 struct inode *inode = file->f_mapping->host; 4682 4683 spin_lock(&inode->i_lock); 4684 if (atomic_read(&inode->i_writecount) > 1) { 4685 spin_unlock(&inode->i_lock); 4686 return -ETXTBSY; 4687 } 4688 atomic_set(&inode->i_writecount, -1); 4689 spin_unlock(&inode->i_lock); 4690 4691 return 0; 4692 } 4693 4694 void restore_bitmap_write_access(struct file *file) 4695 { 4696 struct inode *inode = file->f_mapping->host; 4697 4698 spin_lock(&inode->i_lock); 4699 atomic_set(&inode->i_writecount, 1); 4700 spin_unlock(&inode->i_lock); 4701 } 4702 4703 static void md_clean(mddev_t *mddev) 4704 { 4705 mddev->array_sectors = 0; 4706 mddev->external_size = 0; 4707 mddev->dev_sectors = 0; 4708 mddev->raid_disks = 0; 4709 mddev->recovery_cp = 0; 4710 mddev->resync_min = 0; 4711 mddev->resync_max = MaxSector; 4712 mddev->reshape_position = MaxSector; 4713 mddev->external = 0; 4714 mddev->persistent = 0; 4715 mddev->level = LEVEL_NONE; 4716 mddev->clevel[0] = 0; 4717 mddev->flags = 0; 4718 mddev->ro = 0; 4719 mddev->metadata_type[0] = 0; 4720 mddev->chunk_sectors = 0; 4721 mddev->ctime = mddev->utime = 0; 4722 mddev->layout = 0; 4723 mddev->max_disks = 0; 4724 mddev->events = 0; 4725 mddev->can_decrease_events = 0; 4726 mddev->delta_disks = 0; 4727 mddev->new_level = LEVEL_NONE; 4728 mddev->new_layout = 0; 4729 mddev->new_chunk_sectors = 0; 4730 mddev->curr_resync = 0; 4731 mddev->resync_mismatches = 0; 4732 mddev->suspend_lo = mddev->suspend_hi = 0; 4733 mddev->sync_speed_min = mddev->sync_speed_max = 0; 4734 mddev->recovery = 0; 4735 mddev->in_sync = 0; 4736 mddev->changed = 0; 4737 mddev->degraded = 0; 4738 mddev->safemode = 0; 4739 mddev->bitmap_info.offset = 0; 4740 mddev->bitmap_info.default_offset = 0; 4741 mddev->bitmap_info.chunksize = 0; 4742 mddev->bitmap_info.daemon_sleep = 0; 4743 mddev->bitmap_info.max_write_behind = 0; 4744 } 4745 4746 static void __md_stop_writes(mddev_t *mddev) 4747 { 4748 if (mddev->sync_thread) { 4749 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4750 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4751 reap_sync_thread(mddev); 4752 } 4753 4754 del_timer_sync(&mddev->safemode_timer); 4755 4756 bitmap_flush(mddev); 4757 md_super_wait(mddev); 4758 4759 if (!mddev->in_sync || mddev->flags) { 4760 /* mark array as shutdown cleanly */ 4761 mddev->in_sync = 1; 4762 md_update_sb(mddev, 1); 4763 } 4764 } 4765 4766 void md_stop_writes(mddev_t *mddev) 4767 { 4768 mddev_lock(mddev); 4769 __md_stop_writes(mddev); 4770 mddev_unlock(mddev); 4771 } 4772 EXPORT_SYMBOL_GPL(md_stop_writes); 4773 4774 void md_stop(mddev_t *mddev) 4775 { 4776 mddev->ready = 0; 4777 mddev->pers->stop(mddev); 4778 if (mddev->pers->sync_request && mddev->to_remove == NULL) 4779 mddev->to_remove = &md_redundancy_group; 4780 module_put(mddev->pers->owner); 4781 mddev->pers = NULL; 4782 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4783 } 4784 EXPORT_SYMBOL_GPL(md_stop); 4785 4786 static int md_set_readonly(mddev_t *mddev, int is_open) 4787 { 4788 int err = 0; 4789 mutex_lock(&mddev->open_mutex); 4790 if (atomic_read(&mddev->openers) > is_open) { 4791 printk("md: %s still in use.\n",mdname(mddev)); 4792 err = -EBUSY; 4793 goto out; 4794 } 4795 if (mddev->pers) { 4796 __md_stop_writes(mddev); 4797 4798 err = -ENXIO; 4799 if (mddev->ro==1) 4800 goto out; 4801 mddev->ro = 1; 4802 set_disk_ro(mddev->gendisk, 1); 4803 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4804 sysfs_notify_dirent_safe(mddev->sysfs_state); 4805 err = 0; 4806 } 4807 out: 4808 mutex_unlock(&mddev->open_mutex); 4809 return err; 4810 } 4811 4812 /* mode: 4813 * 0 - completely stop and dis-assemble array 4814 * 2 - stop but do not disassemble array 4815 */ 4816 static int do_md_stop(mddev_t * mddev, int mode, int is_open) 4817 { 4818 struct gendisk *disk = mddev->gendisk; 4819 mdk_rdev_t *rdev; 4820 4821 mutex_lock(&mddev->open_mutex); 4822 if (atomic_read(&mddev->openers) > is_open || 4823 mddev->sysfs_active) { 4824 printk("md: %s still in use.\n",mdname(mddev)); 4825 mutex_unlock(&mddev->open_mutex); 4826 return -EBUSY; 4827 } 4828 4829 if (mddev->pers) { 4830 if (mddev->ro) 4831 set_disk_ro(disk, 0); 4832 4833 __md_stop_writes(mddev); 4834 md_stop(mddev); 4835 mddev->queue->merge_bvec_fn = NULL; 4836 mddev->queue->backing_dev_info.congested_fn = NULL; 4837 4838 /* tell userspace to handle 'inactive' */ 4839 sysfs_notify_dirent_safe(mddev->sysfs_state); 4840 4841 list_for_each_entry(rdev, &mddev->disks, same_set) 4842 if (rdev->raid_disk >= 0) { 4843 char nm[20]; 4844 sprintf(nm, "rd%d", rdev->raid_disk); 4845 sysfs_remove_link(&mddev->kobj, nm); 4846 } 4847 4848 set_capacity(disk, 0); 4849 mutex_unlock(&mddev->open_mutex); 4850 mddev->changed = 1; 4851 revalidate_disk(disk); 4852 4853 if (mddev->ro) 4854 mddev->ro = 0; 4855 } else 4856 mutex_unlock(&mddev->open_mutex); 4857 /* 4858 * Free resources if final stop 4859 */ 4860 if (mode == 0) { 4861 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 4862 4863 bitmap_destroy(mddev); 4864 if (mddev->bitmap_info.file) { 4865 restore_bitmap_write_access(mddev->bitmap_info.file); 4866 fput(mddev->bitmap_info.file); 4867 mddev->bitmap_info.file = NULL; 4868 } 4869 mddev->bitmap_info.offset = 0; 4870 4871 export_array(mddev); 4872 4873 md_clean(mddev); 4874 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4875 if (mddev->hold_active == UNTIL_STOP) 4876 mddev->hold_active = 0; 4877 } 4878 blk_integrity_unregister(disk); 4879 md_new_event(mddev); 4880 sysfs_notify_dirent_safe(mddev->sysfs_state); 4881 return 0; 4882 } 4883 4884 #ifndef MODULE 4885 static void autorun_array(mddev_t *mddev) 4886 { 4887 mdk_rdev_t *rdev; 4888 int err; 4889 4890 if (list_empty(&mddev->disks)) 4891 return; 4892 4893 printk(KERN_INFO "md: running: "); 4894 4895 list_for_each_entry(rdev, &mddev->disks, same_set) { 4896 char b[BDEVNAME_SIZE]; 4897 printk("<%s>", bdevname(rdev->bdev,b)); 4898 } 4899 printk("\n"); 4900 4901 err = do_md_run(mddev); 4902 if (err) { 4903 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 4904 do_md_stop(mddev, 0, 0); 4905 } 4906 } 4907 4908 /* 4909 * lets try to run arrays based on all disks that have arrived 4910 * until now. (those are in pending_raid_disks) 4911 * 4912 * the method: pick the first pending disk, collect all disks with 4913 * the same UUID, remove all from the pending list and put them into 4914 * the 'same_array' list. Then order this list based on superblock 4915 * update time (freshest comes first), kick out 'old' disks and 4916 * compare superblocks. If everything's fine then run it. 4917 * 4918 * If "unit" is allocated, then bump its reference count 4919 */ 4920 static void autorun_devices(int part) 4921 { 4922 mdk_rdev_t *rdev0, *rdev, *tmp; 4923 mddev_t *mddev; 4924 char b[BDEVNAME_SIZE]; 4925 4926 printk(KERN_INFO "md: autorun ...\n"); 4927 while (!list_empty(&pending_raid_disks)) { 4928 int unit; 4929 dev_t dev; 4930 LIST_HEAD(candidates); 4931 rdev0 = list_entry(pending_raid_disks.next, 4932 mdk_rdev_t, same_set); 4933 4934 printk(KERN_INFO "md: considering %s ...\n", 4935 bdevname(rdev0->bdev,b)); 4936 INIT_LIST_HEAD(&candidates); 4937 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 4938 if (super_90_load(rdev, rdev0, 0) >= 0) { 4939 printk(KERN_INFO "md: adding %s ...\n", 4940 bdevname(rdev->bdev,b)); 4941 list_move(&rdev->same_set, &candidates); 4942 } 4943 /* 4944 * now we have a set of devices, with all of them having 4945 * mostly sane superblocks. It's time to allocate the 4946 * mddev. 4947 */ 4948 if (part) { 4949 dev = MKDEV(mdp_major, 4950 rdev0->preferred_minor << MdpMinorShift); 4951 unit = MINOR(dev) >> MdpMinorShift; 4952 } else { 4953 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 4954 unit = MINOR(dev); 4955 } 4956 if (rdev0->preferred_minor != unit) { 4957 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 4958 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 4959 break; 4960 } 4961 4962 md_probe(dev, NULL, NULL); 4963 mddev = mddev_find(dev); 4964 if (!mddev || !mddev->gendisk) { 4965 if (mddev) 4966 mddev_put(mddev); 4967 printk(KERN_ERR 4968 "md: cannot allocate memory for md drive.\n"); 4969 break; 4970 } 4971 if (mddev_lock(mddev)) 4972 printk(KERN_WARNING "md: %s locked, cannot run\n", 4973 mdname(mddev)); 4974 else if (mddev->raid_disks || mddev->major_version 4975 || !list_empty(&mddev->disks)) { 4976 printk(KERN_WARNING 4977 "md: %s already running, cannot run %s\n", 4978 mdname(mddev), bdevname(rdev0->bdev,b)); 4979 mddev_unlock(mddev); 4980 } else { 4981 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 4982 mddev->persistent = 1; 4983 rdev_for_each_list(rdev, tmp, &candidates) { 4984 list_del_init(&rdev->same_set); 4985 if (bind_rdev_to_array(rdev, mddev)) 4986 export_rdev(rdev); 4987 } 4988 autorun_array(mddev); 4989 mddev_unlock(mddev); 4990 } 4991 /* on success, candidates will be empty, on error 4992 * it won't... 4993 */ 4994 rdev_for_each_list(rdev, tmp, &candidates) { 4995 list_del_init(&rdev->same_set); 4996 export_rdev(rdev); 4997 } 4998 mddev_put(mddev); 4999 } 5000 printk(KERN_INFO "md: ... autorun DONE.\n"); 5001 } 5002 #endif /* !MODULE */ 5003 5004 static int get_version(void __user * arg) 5005 { 5006 mdu_version_t ver; 5007 5008 ver.major = MD_MAJOR_VERSION; 5009 ver.minor = MD_MINOR_VERSION; 5010 ver.patchlevel = MD_PATCHLEVEL_VERSION; 5011 5012 if (copy_to_user(arg, &ver, sizeof(ver))) 5013 return -EFAULT; 5014 5015 return 0; 5016 } 5017 5018 static int get_array_info(mddev_t * mddev, void __user * arg) 5019 { 5020 mdu_array_info_t info; 5021 int nr,working,insync,failed,spare; 5022 mdk_rdev_t *rdev; 5023 5024 nr=working=insync=failed=spare=0; 5025 list_for_each_entry(rdev, &mddev->disks, same_set) { 5026 nr++; 5027 if (test_bit(Faulty, &rdev->flags)) 5028 failed++; 5029 else { 5030 working++; 5031 if (test_bit(In_sync, &rdev->flags)) 5032 insync++; 5033 else 5034 spare++; 5035 } 5036 } 5037 5038 info.major_version = mddev->major_version; 5039 info.minor_version = mddev->minor_version; 5040 info.patch_version = MD_PATCHLEVEL_VERSION; 5041 info.ctime = mddev->ctime; 5042 info.level = mddev->level; 5043 info.size = mddev->dev_sectors / 2; 5044 if (info.size != mddev->dev_sectors / 2) /* overflow */ 5045 info.size = -1; 5046 info.nr_disks = nr; 5047 info.raid_disks = mddev->raid_disks; 5048 info.md_minor = mddev->md_minor; 5049 info.not_persistent= !mddev->persistent; 5050 5051 info.utime = mddev->utime; 5052 info.state = 0; 5053 if (mddev->in_sync) 5054 info.state = (1<<MD_SB_CLEAN); 5055 if (mddev->bitmap && mddev->bitmap_info.offset) 5056 info.state = (1<<MD_SB_BITMAP_PRESENT); 5057 info.active_disks = insync; 5058 info.working_disks = working; 5059 info.failed_disks = failed; 5060 info.spare_disks = spare; 5061 5062 info.layout = mddev->layout; 5063 info.chunk_size = mddev->chunk_sectors << 9; 5064 5065 if (copy_to_user(arg, &info, sizeof(info))) 5066 return -EFAULT; 5067 5068 return 0; 5069 } 5070 5071 static int get_bitmap_file(mddev_t * mddev, void __user * arg) 5072 { 5073 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 5074 char *ptr, *buf = NULL; 5075 int err = -ENOMEM; 5076 5077 if (md_allow_write(mddev)) 5078 file = kmalloc(sizeof(*file), GFP_NOIO); 5079 else 5080 file = kmalloc(sizeof(*file), GFP_KERNEL); 5081 5082 if (!file) 5083 goto out; 5084 5085 /* bitmap disabled, zero the first byte and copy out */ 5086 if (!mddev->bitmap || !mddev->bitmap->file) { 5087 file->pathname[0] = '\0'; 5088 goto copy_out; 5089 } 5090 5091 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL); 5092 if (!buf) 5093 goto out; 5094 5095 ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname)); 5096 if (IS_ERR(ptr)) 5097 goto out; 5098 5099 strcpy(file->pathname, ptr); 5100 5101 copy_out: 5102 err = 0; 5103 if (copy_to_user(arg, file, sizeof(*file))) 5104 err = -EFAULT; 5105 out: 5106 kfree(buf); 5107 kfree(file); 5108 return err; 5109 } 5110 5111 static int get_disk_info(mddev_t * mddev, void __user * arg) 5112 { 5113 mdu_disk_info_t info; 5114 mdk_rdev_t *rdev; 5115 5116 if (copy_from_user(&info, arg, sizeof(info))) 5117 return -EFAULT; 5118 5119 rdev = find_rdev_nr(mddev, info.number); 5120 if (rdev) { 5121 info.major = MAJOR(rdev->bdev->bd_dev); 5122 info.minor = MINOR(rdev->bdev->bd_dev); 5123 info.raid_disk = rdev->raid_disk; 5124 info.state = 0; 5125 if (test_bit(Faulty, &rdev->flags)) 5126 info.state |= (1<<MD_DISK_FAULTY); 5127 else if (test_bit(In_sync, &rdev->flags)) { 5128 info.state |= (1<<MD_DISK_ACTIVE); 5129 info.state |= (1<<MD_DISK_SYNC); 5130 } 5131 if (test_bit(WriteMostly, &rdev->flags)) 5132 info.state |= (1<<MD_DISK_WRITEMOSTLY); 5133 } else { 5134 info.major = info.minor = 0; 5135 info.raid_disk = -1; 5136 info.state = (1<<MD_DISK_REMOVED); 5137 } 5138 5139 if (copy_to_user(arg, &info, sizeof(info))) 5140 return -EFAULT; 5141 5142 return 0; 5143 } 5144 5145 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) 5146 { 5147 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5148 mdk_rdev_t *rdev; 5149 dev_t dev = MKDEV(info->major,info->minor); 5150 5151 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 5152 return -EOVERFLOW; 5153 5154 if (!mddev->raid_disks) { 5155 int err; 5156 /* expecting a device which has a superblock */ 5157 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 5158 if (IS_ERR(rdev)) { 5159 printk(KERN_WARNING 5160 "md: md_import_device returned %ld\n", 5161 PTR_ERR(rdev)); 5162 return PTR_ERR(rdev); 5163 } 5164 if (!list_empty(&mddev->disks)) { 5165 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 5166 mdk_rdev_t, same_set); 5167 err = super_types[mddev->major_version] 5168 .load_super(rdev, rdev0, mddev->minor_version); 5169 if (err < 0) { 5170 printk(KERN_WARNING 5171 "md: %s has different UUID to %s\n", 5172 bdevname(rdev->bdev,b), 5173 bdevname(rdev0->bdev,b2)); 5174 export_rdev(rdev); 5175 return -EINVAL; 5176 } 5177 } 5178 err = bind_rdev_to_array(rdev, mddev); 5179 if (err) 5180 export_rdev(rdev); 5181 return err; 5182 } 5183 5184 /* 5185 * add_new_disk can be used once the array is assembled 5186 * to add "hot spares". They must already have a superblock 5187 * written 5188 */ 5189 if (mddev->pers) { 5190 int err; 5191 if (!mddev->pers->hot_add_disk) { 5192 printk(KERN_WARNING 5193 "%s: personality does not support diskops!\n", 5194 mdname(mddev)); 5195 return -EINVAL; 5196 } 5197 if (mddev->persistent) 5198 rdev = md_import_device(dev, mddev->major_version, 5199 mddev->minor_version); 5200 else 5201 rdev = md_import_device(dev, -1, -1); 5202 if (IS_ERR(rdev)) { 5203 printk(KERN_WARNING 5204 "md: md_import_device returned %ld\n", 5205 PTR_ERR(rdev)); 5206 return PTR_ERR(rdev); 5207 } 5208 /* set saved_raid_disk if appropriate */ 5209 if (!mddev->persistent) { 5210 if (info->state & (1<<MD_DISK_SYNC) && 5211 info->raid_disk < mddev->raid_disks) { 5212 rdev->raid_disk = info->raid_disk; 5213 set_bit(In_sync, &rdev->flags); 5214 } else 5215 rdev->raid_disk = -1; 5216 } else 5217 super_types[mddev->major_version]. 5218 validate_super(mddev, rdev); 5219 if ((info->state & (1<<MD_DISK_SYNC)) && 5220 (!test_bit(In_sync, &rdev->flags) || 5221 rdev->raid_disk != info->raid_disk)) { 5222 /* This was a hot-add request, but events doesn't 5223 * match, so reject it. 5224 */ 5225 export_rdev(rdev); 5226 return -EINVAL; 5227 } 5228 5229 if (test_bit(In_sync, &rdev->flags)) 5230 rdev->saved_raid_disk = rdev->raid_disk; 5231 else 5232 rdev->saved_raid_disk = -1; 5233 5234 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 5235 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5236 set_bit(WriteMostly, &rdev->flags); 5237 else 5238 clear_bit(WriteMostly, &rdev->flags); 5239 5240 rdev->raid_disk = -1; 5241 err = bind_rdev_to_array(rdev, mddev); 5242 if (!err && !mddev->pers->hot_remove_disk) { 5243 /* If there is hot_add_disk but no hot_remove_disk 5244 * then added disks for geometry changes, 5245 * and should be added immediately. 5246 */ 5247 super_types[mddev->major_version]. 5248 validate_super(mddev, rdev); 5249 err = mddev->pers->hot_add_disk(mddev, rdev); 5250 if (err) 5251 unbind_rdev_from_array(rdev); 5252 } 5253 if (err) 5254 export_rdev(rdev); 5255 else 5256 sysfs_notify_dirent_safe(rdev->sysfs_state); 5257 5258 md_update_sb(mddev, 1); 5259 if (mddev->degraded) 5260 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5261 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5262 md_wakeup_thread(mddev->thread); 5263 return err; 5264 } 5265 5266 /* otherwise, add_new_disk is only allowed 5267 * for major_version==0 superblocks 5268 */ 5269 if (mddev->major_version != 0) { 5270 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 5271 mdname(mddev)); 5272 return -EINVAL; 5273 } 5274 5275 if (!(info->state & (1<<MD_DISK_FAULTY))) { 5276 int err; 5277 rdev = md_import_device(dev, -1, 0); 5278 if (IS_ERR(rdev)) { 5279 printk(KERN_WARNING 5280 "md: error, md_import_device() returned %ld\n", 5281 PTR_ERR(rdev)); 5282 return PTR_ERR(rdev); 5283 } 5284 rdev->desc_nr = info->number; 5285 if (info->raid_disk < mddev->raid_disks) 5286 rdev->raid_disk = info->raid_disk; 5287 else 5288 rdev->raid_disk = -1; 5289 5290 if (rdev->raid_disk < mddev->raid_disks) 5291 if (info->state & (1<<MD_DISK_SYNC)) 5292 set_bit(In_sync, &rdev->flags); 5293 5294 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5295 set_bit(WriteMostly, &rdev->flags); 5296 5297 if (!mddev->persistent) { 5298 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 5299 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 5300 } else 5301 rdev->sb_start = calc_dev_sboffset(rdev); 5302 rdev->sectors = rdev->sb_start; 5303 5304 err = bind_rdev_to_array(rdev, mddev); 5305 if (err) { 5306 export_rdev(rdev); 5307 return err; 5308 } 5309 } 5310 5311 return 0; 5312 } 5313 5314 static int hot_remove_disk(mddev_t * mddev, dev_t dev) 5315 { 5316 char b[BDEVNAME_SIZE]; 5317 mdk_rdev_t *rdev; 5318 5319 rdev = find_rdev(mddev, dev); 5320 if (!rdev) 5321 return -ENXIO; 5322 5323 if (rdev->raid_disk >= 0) 5324 goto busy; 5325 5326 kick_rdev_from_array(rdev); 5327 md_update_sb(mddev, 1); 5328 md_new_event(mddev); 5329 5330 return 0; 5331 busy: 5332 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", 5333 bdevname(rdev->bdev,b), mdname(mddev)); 5334 return -EBUSY; 5335 } 5336 5337 static int hot_add_disk(mddev_t * mddev, dev_t dev) 5338 { 5339 char b[BDEVNAME_SIZE]; 5340 int err; 5341 mdk_rdev_t *rdev; 5342 5343 if (!mddev->pers) 5344 return -ENODEV; 5345 5346 if (mddev->major_version != 0) { 5347 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 5348 " version-0 superblocks.\n", 5349 mdname(mddev)); 5350 return -EINVAL; 5351 } 5352 if (!mddev->pers->hot_add_disk) { 5353 printk(KERN_WARNING 5354 "%s: personality does not support diskops!\n", 5355 mdname(mddev)); 5356 return -EINVAL; 5357 } 5358 5359 rdev = md_import_device(dev, -1, 0); 5360 if (IS_ERR(rdev)) { 5361 printk(KERN_WARNING 5362 "md: error, md_import_device() returned %ld\n", 5363 PTR_ERR(rdev)); 5364 return -EINVAL; 5365 } 5366 5367 if (mddev->persistent) 5368 rdev->sb_start = calc_dev_sboffset(rdev); 5369 else 5370 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 5371 5372 rdev->sectors = rdev->sb_start; 5373 5374 if (test_bit(Faulty, &rdev->flags)) { 5375 printk(KERN_WARNING 5376 "md: can not hot-add faulty %s disk to %s!\n", 5377 bdevname(rdev->bdev,b), mdname(mddev)); 5378 err = -EINVAL; 5379 goto abort_export; 5380 } 5381 clear_bit(In_sync, &rdev->flags); 5382 rdev->desc_nr = -1; 5383 rdev->saved_raid_disk = -1; 5384 err = bind_rdev_to_array(rdev, mddev); 5385 if (err) 5386 goto abort_export; 5387 5388 /* 5389 * The rest should better be atomic, we can have disk failures 5390 * noticed in interrupt contexts ... 5391 */ 5392 5393 rdev->raid_disk = -1; 5394 5395 md_update_sb(mddev, 1); 5396 5397 /* 5398 * Kick recovery, maybe this spare has to be added to the 5399 * array immediately. 5400 */ 5401 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5402 md_wakeup_thread(mddev->thread); 5403 md_new_event(mddev); 5404 return 0; 5405 5406 abort_export: 5407 export_rdev(rdev); 5408 return err; 5409 } 5410 5411 static int set_bitmap_file(mddev_t *mddev, int fd) 5412 { 5413 int err; 5414 5415 if (mddev->pers) { 5416 if (!mddev->pers->quiesce) 5417 return -EBUSY; 5418 if (mddev->recovery || mddev->sync_thread) 5419 return -EBUSY; 5420 /* we should be able to change the bitmap.. */ 5421 } 5422 5423 5424 if (fd >= 0) { 5425 if (mddev->bitmap) 5426 return -EEXIST; /* cannot add when bitmap is present */ 5427 mddev->bitmap_info.file = fget(fd); 5428 5429 if (mddev->bitmap_info.file == NULL) { 5430 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 5431 mdname(mddev)); 5432 return -EBADF; 5433 } 5434 5435 err = deny_bitmap_write_access(mddev->bitmap_info.file); 5436 if (err) { 5437 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 5438 mdname(mddev)); 5439 fput(mddev->bitmap_info.file); 5440 mddev->bitmap_info.file = NULL; 5441 return err; 5442 } 5443 mddev->bitmap_info.offset = 0; /* file overrides offset */ 5444 } else if (mddev->bitmap == NULL) 5445 return -ENOENT; /* cannot remove what isn't there */ 5446 err = 0; 5447 if (mddev->pers) { 5448 mddev->pers->quiesce(mddev, 1); 5449 if (fd >= 0) { 5450 err = bitmap_create(mddev); 5451 if (!err) 5452 err = bitmap_load(mddev); 5453 } 5454 if (fd < 0 || err) { 5455 bitmap_destroy(mddev); 5456 fd = -1; /* make sure to put the file */ 5457 } 5458 mddev->pers->quiesce(mddev, 0); 5459 } 5460 if (fd < 0) { 5461 if (mddev->bitmap_info.file) { 5462 restore_bitmap_write_access(mddev->bitmap_info.file); 5463 fput(mddev->bitmap_info.file); 5464 } 5465 mddev->bitmap_info.file = NULL; 5466 } 5467 5468 return err; 5469 } 5470 5471 /* 5472 * set_array_info is used two different ways 5473 * The original usage is when creating a new array. 5474 * In this usage, raid_disks is > 0 and it together with 5475 * level, size, not_persistent,layout,chunksize determine the 5476 * shape of the array. 5477 * This will always create an array with a type-0.90.0 superblock. 5478 * The newer usage is when assembling an array. 5479 * In this case raid_disks will be 0, and the major_version field is 5480 * use to determine which style super-blocks are to be found on the devices. 5481 * The minor and patch _version numbers are also kept incase the 5482 * super_block handler wishes to interpret them. 5483 */ 5484 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) 5485 { 5486 5487 if (info->raid_disks == 0) { 5488 /* just setting version number for superblock loading */ 5489 if (info->major_version < 0 || 5490 info->major_version >= ARRAY_SIZE(super_types) || 5491 super_types[info->major_version].name == NULL) { 5492 /* maybe try to auto-load a module? */ 5493 printk(KERN_INFO 5494 "md: superblock version %d not known\n", 5495 info->major_version); 5496 return -EINVAL; 5497 } 5498 mddev->major_version = info->major_version; 5499 mddev->minor_version = info->minor_version; 5500 mddev->patch_version = info->patch_version; 5501 mddev->persistent = !info->not_persistent; 5502 /* ensure mddev_put doesn't delete this now that there 5503 * is some minimal configuration. 5504 */ 5505 mddev->ctime = get_seconds(); 5506 return 0; 5507 } 5508 mddev->major_version = MD_MAJOR_VERSION; 5509 mddev->minor_version = MD_MINOR_VERSION; 5510 mddev->patch_version = MD_PATCHLEVEL_VERSION; 5511 mddev->ctime = get_seconds(); 5512 5513 mddev->level = info->level; 5514 mddev->clevel[0] = 0; 5515 mddev->dev_sectors = 2 * (sector_t)info->size; 5516 mddev->raid_disks = info->raid_disks; 5517 /* don't set md_minor, it is determined by which /dev/md* was 5518 * openned 5519 */ 5520 if (info->state & (1<<MD_SB_CLEAN)) 5521 mddev->recovery_cp = MaxSector; 5522 else 5523 mddev->recovery_cp = 0; 5524 mddev->persistent = ! info->not_persistent; 5525 mddev->external = 0; 5526 5527 mddev->layout = info->layout; 5528 mddev->chunk_sectors = info->chunk_size >> 9; 5529 5530 mddev->max_disks = MD_SB_DISKS; 5531 5532 if (mddev->persistent) 5533 mddev->flags = 0; 5534 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5535 5536 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 5537 mddev->bitmap_info.offset = 0; 5538 5539 mddev->reshape_position = MaxSector; 5540 5541 /* 5542 * Generate a 128 bit UUID 5543 */ 5544 get_random_bytes(mddev->uuid, 16); 5545 5546 mddev->new_level = mddev->level; 5547 mddev->new_chunk_sectors = mddev->chunk_sectors; 5548 mddev->new_layout = mddev->layout; 5549 mddev->delta_disks = 0; 5550 5551 return 0; 5552 } 5553 5554 void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors) 5555 { 5556 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); 5557 5558 if (mddev->external_size) 5559 return; 5560 5561 mddev->array_sectors = array_sectors; 5562 } 5563 EXPORT_SYMBOL(md_set_array_sectors); 5564 5565 static int update_size(mddev_t *mddev, sector_t num_sectors) 5566 { 5567 mdk_rdev_t *rdev; 5568 int rv; 5569 int fit = (num_sectors == 0); 5570 5571 if (mddev->pers->resize == NULL) 5572 return -EINVAL; 5573 /* The "num_sectors" is the number of sectors of each device that 5574 * is used. This can only make sense for arrays with redundancy. 5575 * linear and raid0 always use whatever space is available. We can only 5576 * consider changing this number if no resync or reconstruction is 5577 * happening, and if the new size is acceptable. It must fit before the 5578 * sb_start or, if that is <data_offset, it must fit before the size 5579 * of each device. If num_sectors is zero, we find the largest size 5580 * that fits. 5581 */ 5582 if (mddev->sync_thread) 5583 return -EBUSY; 5584 if (mddev->bitmap) 5585 /* Sorry, cannot grow a bitmap yet, just remove it, 5586 * grow, and re-add. 5587 */ 5588 return -EBUSY; 5589 list_for_each_entry(rdev, &mddev->disks, same_set) { 5590 sector_t avail = rdev->sectors; 5591 5592 if (fit && (num_sectors == 0 || num_sectors > avail)) 5593 num_sectors = avail; 5594 if (avail < num_sectors) 5595 return -ENOSPC; 5596 } 5597 rv = mddev->pers->resize(mddev, num_sectors); 5598 if (!rv) 5599 revalidate_disk(mddev->gendisk); 5600 return rv; 5601 } 5602 5603 static int update_raid_disks(mddev_t *mddev, int raid_disks) 5604 { 5605 int rv; 5606 /* change the number of raid disks */ 5607 if (mddev->pers->check_reshape == NULL) 5608 return -EINVAL; 5609 if (raid_disks <= 0 || 5610 (mddev->max_disks && raid_disks >= mddev->max_disks)) 5611 return -EINVAL; 5612 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 5613 return -EBUSY; 5614 mddev->delta_disks = raid_disks - mddev->raid_disks; 5615 5616 rv = mddev->pers->check_reshape(mddev); 5617 if (rv < 0) 5618 mddev->delta_disks = 0; 5619 return rv; 5620 } 5621 5622 5623 /* 5624 * update_array_info is used to change the configuration of an 5625 * on-line array. 5626 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 5627 * fields in the info are checked against the array. 5628 * Any differences that cannot be handled will cause an error. 5629 * Normally, only one change can be managed at a time. 5630 */ 5631 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) 5632 { 5633 int rv = 0; 5634 int cnt = 0; 5635 int state = 0; 5636 5637 /* calculate expected state,ignoring low bits */ 5638 if (mddev->bitmap && mddev->bitmap_info.offset) 5639 state |= (1 << MD_SB_BITMAP_PRESENT); 5640 5641 if (mddev->major_version != info->major_version || 5642 mddev->minor_version != info->minor_version || 5643 /* mddev->patch_version != info->patch_version || */ 5644 mddev->ctime != info->ctime || 5645 mddev->level != info->level || 5646 /* mddev->layout != info->layout || */ 5647 !mddev->persistent != info->not_persistent|| 5648 mddev->chunk_sectors != info->chunk_size >> 9 || 5649 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 5650 ((state^info->state) & 0xfffffe00) 5651 ) 5652 return -EINVAL; 5653 /* Check there is only one change */ 5654 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 5655 cnt++; 5656 if (mddev->raid_disks != info->raid_disks) 5657 cnt++; 5658 if (mddev->layout != info->layout) 5659 cnt++; 5660 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 5661 cnt++; 5662 if (cnt == 0) 5663 return 0; 5664 if (cnt > 1) 5665 return -EINVAL; 5666 5667 if (mddev->layout != info->layout) { 5668 /* Change layout 5669 * we don't need to do anything at the md level, the 5670 * personality will take care of it all. 5671 */ 5672 if (mddev->pers->check_reshape == NULL) 5673 return -EINVAL; 5674 else { 5675 mddev->new_layout = info->layout; 5676 rv = mddev->pers->check_reshape(mddev); 5677 if (rv) 5678 mddev->new_layout = mddev->layout; 5679 return rv; 5680 } 5681 } 5682 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 5683 rv = update_size(mddev, (sector_t)info->size * 2); 5684 5685 if (mddev->raid_disks != info->raid_disks) 5686 rv = update_raid_disks(mddev, info->raid_disks); 5687 5688 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 5689 if (mddev->pers->quiesce == NULL) 5690 return -EINVAL; 5691 if (mddev->recovery || mddev->sync_thread) 5692 return -EBUSY; 5693 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 5694 /* add the bitmap */ 5695 if (mddev->bitmap) 5696 return -EEXIST; 5697 if (mddev->bitmap_info.default_offset == 0) 5698 return -EINVAL; 5699 mddev->bitmap_info.offset = 5700 mddev->bitmap_info.default_offset; 5701 mddev->pers->quiesce(mddev, 1); 5702 rv = bitmap_create(mddev); 5703 if (!rv) 5704 rv = bitmap_load(mddev); 5705 if (rv) 5706 bitmap_destroy(mddev); 5707 mddev->pers->quiesce(mddev, 0); 5708 } else { 5709 /* remove the bitmap */ 5710 if (!mddev->bitmap) 5711 return -ENOENT; 5712 if (mddev->bitmap->file) 5713 return -EINVAL; 5714 mddev->pers->quiesce(mddev, 1); 5715 bitmap_destroy(mddev); 5716 mddev->pers->quiesce(mddev, 0); 5717 mddev->bitmap_info.offset = 0; 5718 } 5719 } 5720 md_update_sb(mddev, 1); 5721 return rv; 5722 } 5723 5724 static int set_disk_faulty(mddev_t *mddev, dev_t dev) 5725 { 5726 mdk_rdev_t *rdev; 5727 5728 if (mddev->pers == NULL) 5729 return -ENODEV; 5730 5731 rdev = find_rdev(mddev, dev); 5732 if (!rdev) 5733 return -ENODEV; 5734 5735 md_error(mddev, rdev); 5736 return 0; 5737 } 5738 5739 /* 5740 * We have a problem here : there is no easy way to give a CHS 5741 * virtual geometry. We currently pretend that we have a 2 heads 5742 * 4 sectors (with a BIG number of cylinders...). This drives 5743 * dosfs just mad... ;-) 5744 */ 5745 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 5746 { 5747 mddev_t *mddev = bdev->bd_disk->private_data; 5748 5749 geo->heads = 2; 5750 geo->sectors = 4; 5751 geo->cylinders = mddev->array_sectors / 8; 5752 return 0; 5753 } 5754 5755 static int md_ioctl(struct block_device *bdev, fmode_t mode, 5756 unsigned int cmd, unsigned long arg) 5757 { 5758 int err = 0; 5759 void __user *argp = (void __user *)arg; 5760 mddev_t *mddev = NULL; 5761 int ro; 5762 5763 if (!capable(CAP_SYS_ADMIN)) 5764 return -EACCES; 5765 5766 /* 5767 * Commands dealing with the RAID driver but not any 5768 * particular array: 5769 */ 5770 switch (cmd) 5771 { 5772 case RAID_VERSION: 5773 err = get_version(argp); 5774 goto done; 5775 5776 case PRINT_RAID_DEBUG: 5777 err = 0; 5778 md_print_devices(); 5779 goto done; 5780 5781 #ifndef MODULE 5782 case RAID_AUTORUN: 5783 err = 0; 5784 autostart_arrays(arg); 5785 goto done; 5786 #endif 5787 default:; 5788 } 5789 5790 /* 5791 * Commands creating/starting a new array: 5792 */ 5793 5794 mddev = bdev->bd_disk->private_data; 5795 5796 if (!mddev) { 5797 BUG(); 5798 goto abort; 5799 } 5800 5801 err = mddev_lock(mddev); 5802 if (err) { 5803 printk(KERN_INFO 5804 "md: ioctl lock interrupted, reason %d, cmd %d\n", 5805 err, cmd); 5806 goto abort; 5807 } 5808 5809 switch (cmd) 5810 { 5811 case SET_ARRAY_INFO: 5812 { 5813 mdu_array_info_t info; 5814 if (!arg) 5815 memset(&info, 0, sizeof(info)); 5816 else if (copy_from_user(&info, argp, sizeof(info))) { 5817 err = -EFAULT; 5818 goto abort_unlock; 5819 } 5820 if (mddev->pers) { 5821 err = update_array_info(mddev, &info); 5822 if (err) { 5823 printk(KERN_WARNING "md: couldn't update" 5824 " array info. %d\n", err); 5825 goto abort_unlock; 5826 } 5827 goto done_unlock; 5828 } 5829 if (!list_empty(&mddev->disks)) { 5830 printk(KERN_WARNING 5831 "md: array %s already has disks!\n", 5832 mdname(mddev)); 5833 err = -EBUSY; 5834 goto abort_unlock; 5835 } 5836 if (mddev->raid_disks) { 5837 printk(KERN_WARNING 5838 "md: array %s already initialised!\n", 5839 mdname(mddev)); 5840 err = -EBUSY; 5841 goto abort_unlock; 5842 } 5843 err = set_array_info(mddev, &info); 5844 if (err) { 5845 printk(KERN_WARNING "md: couldn't set" 5846 " array info. %d\n", err); 5847 goto abort_unlock; 5848 } 5849 } 5850 goto done_unlock; 5851 5852 default:; 5853 } 5854 5855 /* 5856 * Commands querying/configuring an existing array: 5857 */ 5858 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 5859 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 5860 if ((!mddev->raid_disks && !mddev->external) 5861 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 5862 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 5863 && cmd != GET_BITMAP_FILE) { 5864 err = -ENODEV; 5865 goto abort_unlock; 5866 } 5867 5868 /* 5869 * Commands even a read-only array can execute: 5870 */ 5871 switch (cmd) 5872 { 5873 case GET_ARRAY_INFO: 5874 err = get_array_info(mddev, argp); 5875 goto done_unlock; 5876 5877 case GET_BITMAP_FILE: 5878 err = get_bitmap_file(mddev, argp); 5879 goto done_unlock; 5880 5881 case GET_DISK_INFO: 5882 err = get_disk_info(mddev, argp); 5883 goto done_unlock; 5884 5885 case RESTART_ARRAY_RW: 5886 err = restart_array(mddev); 5887 goto done_unlock; 5888 5889 case STOP_ARRAY: 5890 err = do_md_stop(mddev, 0, 1); 5891 goto done_unlock; 5892 5893 case STOP_ARRAY_RO: 5894 err = md_set_readonly(mddev, 1); 5895 goto done_unlock; 5896 5897 case BLKROSET: 5898 if (get_user(ro, (int __user *)(arg))) { 5899 err = -EFAULT; 5900 goto done_unlock; 5901 } 5902 err = -EINVAL; 5903 5904 /* if the bdev is going readonly the value of mddev->ro 5905 * does not matter, no writes are coming 5906 */ 5907 if (ro) 5908 goto done_unlock; 5909 5910 /* are we are already prepared for writes? */ 5911 if (mddev->ro != 1) 5912 goto done_unlock; 5913 5914 /* transitioning to readauto need only happen for 5915 * arrays that call md_write_start 5916 */ 5917 if (mddev->pers) { 5918 err = restart_array(mddev); 5919 if (err == 0) { 5920 mddev->ro = 2; 5921 set_disk_ro(mddev->gendisk, 0); 5922 } 5923 } 5924 goto done_unlock; 5925 } 5926 5927 /* 5928 * The remaining ioctls are changing the state of the 5929 * superblock, so we do not allow them on read-only arrays. 5930 * However non-MD ioctls (e.g. get-size) will still come through 5931 * here and hit the 'default' below, so only disallow 5932 * 'md' ioctls, and switch to rw mode if started auto-readonly. 5933 */ 5934 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) { 5935 if (mddev->ro == 2) { 5936 mddev->ro = 0; 5937 sysfs_notify_dirent_safe(mddev->sysfs_state); 5938 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5939 md_wakeup_thread(mddev->thread); 5940 } else { 5941 err = -EROFS; 5942 goto abort_unlock; 5943 } 5944 } 5945 5946 switch (cmd) 5947 { 5948 case ADD_NEW_DISK: 5949 { 5950 mdu_disk_info_t info; 5951 if (copy_from_user(&info, argp, sizeof(info))) 5952 err = -EFAULT; 5953 else 5954 err = add_new_disk(mddev, &info); 5955 goto done_unlock; 5956 } 5957 5958 case HOT_REMOVE_DISK: 5959 err = hot_remove_disk(mddev, new_decode_dev(arg)); 5960 goto done_unlock; 5961 5962 case HOT_ADD_DISK: 5963 err = hot_add_disk(mddev, new_decode_dev(arg)); 5964 goto done_unlock; 5965 5966 case SET_DISK_FAULTY: 5967 err = set_disk_faulty(mddev, new_decode_dev(arg)); 5968 goto done_unlock; 5969 5970 case RUN_ARRAY: 5971 err = do_md_run(mddev); 5972 goto done_unlock; 5973 5974 case SET_BITMAP_FILE: 5975 err = set_bitmap_file(mddev, (int)arg); 5976 goto done_unlock; 5977 5978 default: 5979 err = -EINVAL; 5980 goto abort_unlock; 5981 } 5982 5983 done_unlock: 5984 abort_unlock: 5985 if (mddev->hold_active == UNTIL_IOCTL && 5986 err != -EINVAL) 5987 mddev->hold_active = 0; 5988 mddev_unlock(mddev); 5989 5990 return err; 5991 done: 5992 if (err) 5993 MD_BUG(); 5994 abort: 5995 return err; 5996 } 5997 #ifdef CONFIG_COMPAT 5998 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, 5999 unsigned int cmd, unsigned long arg) 6000 { 6001 switch (cmd) { 6002 case HOT_REMOVE_DISK: 6003 case HOT_ADD_DISK: 6004 case SET_DISK_FAULTY: 6005 case SET_BITMAP_FILE: 6006 /* These take in integer arg, do not convert */ 6007 break; 6008 default: 6009 arg = (unsigned long)compat_ptr(arg); 6010 break; 6011 } 6012 6013 return md_ioctl(bdev, mode, cmd, arg); 6014 } 6015 #endif /* CONFIG_COMPAT */ 6016 6017 static int md_open(struct block_device *bdev, fmode_t mode) 6018 { 6019 /* 6020 * Succeed if we can lock the mddev, which confirms that 6021 * it isn't being stopped right now. 6022 */ 6023 mddev_t *mddev = mddev_find(bdev->bd_dev); 6024 int err; 6025 6026 if (mddev->gendisk != bdev->bd_disk) { 6027 /* we are racing with mddev_put which is discarding this 6028 * bd_disk. 6029 */ 6030 mddev_put(mddev); 6031 /* Wait until bdev->bd_disk is definitely gone */ 6032 flush_workqueue(md_misc_wq); 6033 /* Then retry the open from the top */ 6034 return -ERESTARTSYS; 6035 } 6036 BUG_ON(mddev != bdev->bd_disk->private_data); 6037 6038 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) 6039 goto out; 6040 6041 err = 0; 6042 atomic_inc(&mddev->openers); 6043 mutex_unlock(&mddev->open_mutex); 6044 6045 check_disk_change(bdev); 6046 out: 6047 return err; 6048 } 6049 6050 static int md_release(struct gendisk *disk, fmode_t mode) 6051 { 6052 mddev_t *mddev = disk->private_data; 6053 6054 BUG_ON(!mddev); 6055 atomic_dec(&mddev->openers); 6056 mddev_put(mddev); 6057 6058 return 0; 6059 } 6060 6061 static int md_media_changed(struct gendisk *disk) 6062 { 6063 mddev_t *mddev = disk->private_data; 6064 6065 return mddev->changed; 6066 } 6067 6068 static int md_revalidate(struct gendisk *disk) 6069 { 6070 mddev_t *mddev = disk->private_data; 6071 6072 mddev->changed = 0; 6073 return 0; 6074 } 6075 static const struct block_device_operations md_fops = 6076 { 6077 .owner = THIS_MODULE, 6078 .open = md_open, 6079 .release = md_release, 6080 .ioctl = md_ioctl, 6081 #ifdef CONFIG_COMPAT 6082 .compat_ioctl = md_compat_ioctl, 6083 #endif 6084 .getgeo = md_getgeo, 6085 .media_changed = md_media_changed, 6086 .revalidate_disk= md_revalidate, 6087 }; 6088 6089 static int md_thread(void * arg) 6090 { 6091 mdk_thread_t *thread = arg; 6092 6093 /* 6094 * md_thread is a 'system-thread', it's priority should be very 6095 * high. We avoid resource deadlocks individually in each 6096 * raid personality. (RAID5 does preallocation) We also use RR and 6097 * the very same RT priority as kswapd, thus we will never get 6098 * into a priority inversion deadlock. 6099 * 6100 * we definitely have to have equal or higher priority than 6101 * bdflush, otherwise bdflush will deadlock if there are too 6102 * many dirty RAID5 blocks. 6103 */ 6104 6105 allow_signal(SIGKILL); 6106 while (!kthread_should_stop()) { 6107 6108 /* We need to wait INTERRUPTIBLE so that 6109 * we don't add to the load-average. 6110 * That means we need to be sure no signals are 6111 * pending 6112 */ 6113 if (signal_pending(current)) 6114 flush_signals(current); 6115 6116 wait_event_interruptible_timeout 6117 (thread->wqueue, 6118 test_bit(THREAD_WAKEUP, &thread->flags) 6119 || kthread_should_stop(), 6120 thread->timeout); 6121 6122 clear_bit(THREAD_WAKEUP, &thread->flags); 6123 if (!kthread_should_stop()) 6124 thread->run(thread->mddev); 6125 } 6126 6127 return 0; 6128 } 6129 6130 void md_wakeup_thread(mdk_thread_t *thread) 6131 { 6132 if (thread) { 6133 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm); 6134 set_bit(THREAD_WAKEUP, &thread->flags); 6135 wake_up(&thread->wqueue); 6136 } 6137 } 6138 6139 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, 6140 const char *name) 6141 { 6142 mdk_thread_t *thread; 6143 6144 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL); 6145 if (!thread) 6146 return NULL; 6147 6148 init_waitqueue_head(&thread->wqueue); 6149 6150 thread->run = run; 6151 thread->mddev = mddev; 6152 thread->timeout = MAX_SCHEDULE_TIMEOUT; 6153 thread->tsk = kthread_run(md_thread, thread, 6154 "%s_%s", 6155 mdname(thread->mddev), 6156 name ?: mddev->pers->name); 6157 if (IS_ERR(thread->tsk)) { 6158 kfree(thread); 6159 return NULL; 6160 } 6161 return thread; 6162 } 6163 6164 void md_unregister_thread(mdk_thread_t *thread) 6165 { 6166 if (!thread) 6167 return; 6168 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 6169 6170 kthread_stop(thread->tsk); 6171 kfree(thread); 6172 } 6173 6174 void md_error(mddev_t *mddev, mdk_rdev_t *rdev) 6175 { 6176 if (!mddev) { 6177 MD_BUG(); 6178 return; 6179 } 6180 6181 if (!rdev || test_bit(Faulty, &rdev->flags)) 6182 return; 6183 6184 if (mddev->external) 6185 set_bit(Blocked, &rdev->flags); 6186 /* 6187 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", 6188 mdname(mddev), 6189 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev), 6190 __builtin_return_address(0),__builtin_return_address(1), 6191 __builtin_return_address(2),__builtin_return_address(3)); 6192 */ 6193 if (!mddev->pers) 6194 return; 6195 if (!mddev->pers->error_handler) 6196 return; 6197 mddev->pers->error_handler(mddev,rdev); 6198 if (mddev->degraded) 6199 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6200 sysfs_notify_dirent_safe(rdev->sysfs_state); 6201 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6202 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6203 md_wakeup_thread(mddev->thread); 6204 if (mddev->event_work.func) 6205 queue_work(md_misc_wq, &mddev->event_work); 6206 md_new_event_inintr(mddev); 6207 } 6208 6209 /* seq_file implementation /proc/mdstat */ 6210 6211 static void status_unused(struct seq_file *seq) 6212 { 6213 int i = 0; 6214 mdk_rdev_t *rdev; 6215 6216 seq_printf(seq, "unused devices: "); 6217 6218 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 6219 char b[BDEVNAME_SIZE]; 6220 i++; 6221 seq_printf(seq, "%s ", 6222 bdevname(rdev->bdev,b)); 6223 } 6224 if (!i) 6225 seq_printf(seq, "<none>"); 6226 6227 seq_printf(seq, "\n"); 6228 } 6229 6230 6231 static void status_resync(struct seq_file *seq, mddev_t * mddev) 6232 { 6233 sector_t max_sectors, resync, res; 6234 unsigned long dt, db; 6235 sector_t rt; 6236 int scale; 6237 unsigned int per_milli; 6238 6239 resync = mddev->curr_resync - atomic_read(&mddev->recovery_active); 6240 6241 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 6242 max_sectors = mddev->resync_max_sectors; 6243 else 6244 max_sectors = mddev->dev_sectors; 6245 6246 /* 6247 * Should not happen. 6248 */ 6249 if (!max_sectors) { 6250 MD_BUG(); 6251 return; 6252 } 6253 /* Pick 'scale' such that (resync>>scale)*1000 will fit 6254 * in a sector_t, and (max_sectors>>scale) will fit in a 6255 * u32, as those are the requirements for sector_div. 6256 * Thus 'scale' must be at least 10 6257 */ 6258 scale = 10; 6259 if (sizeof(sector_t) > sizeof(unsigned long)) { 6260 while ( max_sectors/2 > (1ULL<<(scale+32))) 6261 scale++; 6262 } 6263 res = (resync>>scale)*1000; 6264 sector_div(res, (u32)((max_sectors>>scale)+1)); 6265 6266 per_milli = res; 6267 { 6268 int i, x = per_milli/50, y = 20-x; 6269 seq_printf(seq, "["); 6270 for (i = 0; i < x; i++) 6271 seq_printf(seq, "="); 6272 seq_printf(seq, ">"); 6273 for (i = 0; i < y; i++) 6274 seq_printf(seq, "."); 6275 seq_printf(seq, "] "); 6276 } 6277 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 6278 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 6279 "reshape" : 6280 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 6281 "check" : 6282 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 6283 "resync" : "recovery"))), 6284 per_milli/10, per_milli % 10, 6285 (unsigned long long) resync/2, 6286 (unsigned long long) max_sectors/2); 6287 6288 /* 6289 * dt: time from mark until now 6290 * db: blocks written from mark until now 6291 * rt: remaining time 6292 * 6293 * rt is a sector_t, so could be 32bit or 64bit. 6294 * So we divide before multiply in case it is 32bit and close 6295 * to the limit. 6296 * We scale the divisor (db) by 32 to avoid losing precision 6297 * near the end of resync when the number of remaining sectors 6298 * is close to 'db'. 6299 * We then divide rt by 32 after multiplying by db to compensate. 6300 * The '+1' avoids division by zero if db is very small. 6301 */ 6302 dt = ((jiffies - mddev->resync_mark) / HZ); 6303 if (!dt) dt++; 6304 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) 6305 - mddev->resync_mark_cnt; 6306 6307 rt = max_sectors - resync; /* number of remaining sectors */ 6308 sector_div(rt, db/32+1); 6309 rt *= dt; 6310 rt >>= 5; 6311 6312 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 6313 ((unsigned long)rt % 60)/6); 6314 6315 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 6316 } 6317 6318 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 6319 { 6320 struct list_head *tmp; 6321 loff_t l = *pos; 6322 mddev_t *mddev; 6323 6324 if (l >= 0x10000) 6325 return NULL; 6326 if (!l--) 6327 /* header */ 6328 return (void*)1; 6329 6330 spin_lock(&all_mddevs_lock); 6331 list_for_each(tmp,&all_mddevs) 6332 if (!l--) { 6333 mddev = list_entry(tmp, mddev_t, all_mddevs); 6334 mddev_get(mddev); 6335 spin_unlock(&all_mddevs_lock); 6336 return mddev; 6337 } 6338 spin_unlock(&all_mddevs_lock); 6339 if (!l--) 6340 return (void*)2;/* tail */ 6341 return NULL; 6342 } 6343 6344 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 6345 { 6346 struct list_head *tmp; 6347 mddev_t *next_mddev, *mddev = v; 6348 6349 ++*pos; 6350 if (v == (void*)2) 6351 return NULL; 6352 6353 spin_lock(&all_mddevs_lock); 6354 if (v == (void*)1) 6355 tmp = all_mddevs.next; 6356 else 6357 tmp = mddev->all_mddevs.next; 6358 if (tmp != &all_mddevs) 6359 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs)); 6360 else { 6361 next_mddev = (void*)2; 6362 *pos = 0x10000; 6363 } 6364 spin_unlock(&all_mddevs_lock); 6365 6366 if (v != (void*)1) 6367 mddev_put(mddev); 6368 return next_mddev; 6369 6370 } 6371 6372 static void md_seq_stop(struct seq_file *seq, void *v) 6373 { 6374 mddev_t *mddev = v; 6375 6376 if (mddev && v != (void*)1 && v != (void*)2) 6377 mddev_put(mddev); 6378 } 6379 6380 struct mdstat_info { 6381 int event; 6382 }; 6383 6384 static int md_seq_show(struct seq_file *seq, void *v) 6385 { 6386 mddev_t *mddev = v; 6387 sector_t sectors; 6388 mdk_rdev_t *rdev; 6389 struct mdstat_info *mi = seq->private; 6390 struct bitmap *bitmap; 6391 6392 if (v == (void*)1) { 6393 struct mdk_personality *pers; 6394 seq_printf(seq, "Personalities : "); 6395 spin_lock(&pers_lock); 6396 list_for_each_entry(pers, &pers_list, list) 6397 seq_printf(seq, "[%s] ", pers->name); 6398 6399 spin_unlock(&pers_lock); 6400 seq_printf(seq, "\n"); 6401 mi->event = atomic_read(&md_event_count); 6402 return 0; 6403 } 6404 if (v == (void*)2) { 6405 status_unused(seq); 6406 return 0; 6407 } 6408 6409 if (mddev_lock(mddev) < 0) 6410 return -EINTR; 6411 6412 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 6413 seq_printf(seq, "%s : %sactive", mdname(mddev), 6414 mddev->pers ? "" : "in"); 6415 if (mddev->pers) { 6416 if (mddev->ro==1) 6417 seq_printf(seq, " (read-only)"); 6418 if (mddev->ro==2) 6419 seq_printf(seq, " (auto-read-only)"); 6420 seq_printf(seq, " %s", mddev->pers->name); 6421 } 6422 6423 sectors = 0; 6424 list_for_each_entry(rdev, &mddev->disks, same_set) { 6425 char b[BDEVNAME_SIZE]; 6426 seq_printf(seq, " %s[%d]", 6427 bdevname(rdev->bdev,b), rdev->desc_nr); 6428 if (test_bit(WriteMostly, &rdev->flags)) 6429 seq_printf(seq, "(W)"); 6430 if (test_bit(Faulty, &rdev->flags)) { 6431 seq_printf(seq, "(F)"); 6432 continue; 6433 } else if (rdev->raid_disk < 0) 6434 seq_printf(seq, "(S)"); /* spare */ 6435 sectors += rdev->sectors; 6436 } 6437 6438 if (!list_empty(&mddev->disks)) { 6439 if (mddev->pers) 6440 seq_printf(seq, "\n %llu blocks", 6441 (unsigned long long) 6442 mddev->array_sectors / 2); 6443 else 6444 seq_printf(seq, "\n %llu blocks", 6445 (unsigned long long)sectors / 2); 6446 } 6447 if (mddev->persistent) { 6448 if (mddev->major_version != 0 || 6449 mddev->minor_version != 90) { 6450 seq_printf(seq," super %d.%d", 6451 mddev->major_version, 6452 mddev->minor_version); 6453 } 6454 } else if (mddev->external) 6455 seq_printf(seq, " super external:%s", 6456 mddev->metadata_type); 6457 else 6458 seq_printf(seq, " super non-persistent"); 6459 6460 if (mddev->pers) { 6461 mddev->pers->status(seq, mddev); 6462 seq_printf(seq, "\n "); 6463 if (mddev->pers->sync_request) { 6464 if (mddev->curr_resync > 2) { 6465 status_resync(seq, mddev); 6466 seq_printf(seq, "\n "); 6467 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) 6468 seq_printf(seq, "\tresync=DELAYED\n "); 6469 else if (mddev->recovery_cp < MaxSector) 6470 seq_printf(seq, "\tresync=PENDING\n "); 6471 } 6472 } else 6473 seq_printf(seq, "\n "); 6474 6475 if ((bitmap = mddev->bitmap)) { 6476 unsigned long chunk_kb; 6477 unsigned long flags; 6478 spin_lock_irqsave(&bitmap->lock, flags); 6479 chunk_kb = mddev->bitmap_info.chunksize >> 10; 6480 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 6481 "%lu%s chunk", 6482 bitmap->pages - bitmap->missing_pages, 6483 bitmap->pages, 6484 (bitmap->pages - bitmap->missing_pages) 6485 << (PAGE_SHIFT - 10), 6486 chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize, 6487 chunk_kb ? "KB" : "B"); 6488 if (bitmap->file) { 6489 seq_printf(seq, ", file: "); 6490 seq_path(seq, &bitmap->file->f_path, " \t\n"); 6491 } 6492 6493 seq_printf(seq, "\n"); 6494 spin_unlock_irqrestore(&bitmap->lock, flags); 6495 } 6496 6497 seq_printf(seq, "\n"); 6498 } 6499 mddev_unlock(mddev); 6500 6501 return 0; 6502 } 6503 6504 static const struct seq_operations md_seq_ops = { 6505 .start = md_seq_start, 6506 .next = md_seq_next, 6507 .stop = md_seq_stop, 6508 .show = md_seq_show, 6509 }; 6510 6511 static int md_seq_open(struct inode *inode, struct file *file) 6512 { 6513 int error; 6514 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL); 6515 if (mi == NULL) 6516 return -ENOMEM; 6517 6518 error = seq_open(file, &md_seq_ops); 6519 if (error) 6520 kfree(mi); 6521 else { 6522 struct seq_file *p = file->private_data; 6523 p->private = mi; 6524 mi->event = atomic_read(&md_event_count); 6525 } 6526 return error; 6527 } 6528 6529 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 6530 { 6531 struct seq_file *m = filp->private_data; 6532 struct mdstat_info *mi = m->private; 6533 int mask; 6534 6535 poll_wait(filp, &md_event_waiters, wait); 6536 6537 /* always allow read */ 6538 mask = POLLIN | POLLRDNORM; 6539 6540 if (mi->event != atomic_read(&md_event_count)) 6541 mask |= POLLERR | POLLPRI; 6542 return mask; 6543 } 6544 6545 static const struct file_operations md_seq_fops = { 6546 .owner = THIS_MODULE, 6547 .open = md_seq_open, 6548 .read = seq_read, 6549 .llseek = seq_lseek, 6550 .release = seq_release_private, 6551 .poll = mdstat_poll, 6552 }; 6553 6554 int register_md_personality(struct mdk_personality *p) 6555 { 6556 spin_lock(&pers_lock); 6557 list_add_tail(&p->list, &pers_list); 6558 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); 6559 spin_unlock(&pers_lock); 6560 return 0; 6561 } 6562 6563 int unregister_md_personality(struct mdk_personality *p) 6564 { 6565 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 6566 spin_lock(&pers_lock); 6567 list_del_init(&p->list); 6568 spin_unlock(&pers_lock); 6569 return 0; 6570 } 6571 6572 static int is_mddev_idle(mddev_t *mddev, int init) 6573 { 6574 mdk_rdev_t * rdev; 6575 int idle; 6576 int curr_events; 6577 6578 idle = 1; 6579 rcu_read_lock(); 6580 rdev_for_each_rcu(rdev, mddev) { 6581 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 6582 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + 6583 (int)part_stat_read(&disk->part0, sectors[1]) - 6584 atomic_read(&disk->sync_io); 6585 /* sync IO will cause sync_io to increase before the disk_stats 6586 * as sync_io is counted when a request starts, and 6587 * disk_stats is counted when it completes. 6588 * So resync activity will cause curr_events to be smaller than 6589 * when there was no such activity. 6590 * non-sync IO will cause disk_stat to increase without 6591 * increasing sync_io so curr_events will (eventually) 6592 * be larger than it was before. Once it becomes 6593 * substantially larger, the test below will cause 6594 * the array to appear non-idle, and resync will slow 6595 * down. 6596 * If there is a lot of outstanding resync activity when 6597 * we set last_event to curr_events, then all that activity 6598 * completing might cause the array to appear non-idle 6599 * and resync will be slowed down even though there might 6600 * not have been non-resync activity. This will only 6601 * happen once though. 'last_events' will soon reflect 6602 * the state where there is little or no outstanding 6603 * resync requests, and further resync activity will 6604 * always make curr_events less than last_events. 6605 * 6606 */ 6607 if (init || curr_events - rdev->last_events > 64) { 6608 rdev->last_events = curr_events; 6609 idle = 0; 6610 } 6611 } 6612 rcu_read_unlock(); 6613 return idle; 6614 } 6615 6616 void md_done_sync(mddev_t *mddev, int blocks, int ok) 6617 { 6618 /* another "blocks" (512byte) blocks have been synced */ 6619 atomic_sub(blocks, &mddev->recovery_active); 6620 wake_up(&mddev->recovery_wait); 6621 if (!ok) { 6622 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6623 md_wakeup_thread(mddev->thread); 6624 // stop recovery, signal do_sync .... 6625 } 6626 } 6627 6628 6629 /* md_write_start(mddev, bi) 6630 * If we need to update some array metadata (e.g. 'active' flag 6631 * in superblock) before writing, schedule a superblock update 6632 * and wait for it to complete. 6633 */ 6634 void md_write_start(mddev_t *mddev, struct bio *bi) 6635 { 6636 int did_change = 0; 6637 if (bio_data_dir(bi) != WRITE) 6638 return; 6639 6640 BUG_ON(mddev->ro == 1); 6641 if (mddev->ro == 2) { 6642 /* need to switch to read/write */ 6643 mddev->ro = 0; 6644 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6645 md_wakeup_thread(mddev->thread); 6646 md_wakeup_thread(mddev->sync_thread); 6647 did_change = 1; 6648 } 6649 atomic_inc(&mddev->writes_pending); 6650 if (mddev->safemode == 1) 6651 mddev->safemode = 0; 6652 if (mddev->in_sync) { 6653 spin_lock_irq(&mddev->write_lock); 6654 if (mddev->in_sync) { 6655 mddev->in_sync = 0; 6656 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6657 set_bit(MD_CHANGE_PENDING, &mddev->flags); 6658 md_wakeup_thread(mddev->thread); 6659 did_change = 1; 6660 } 6661 spin_unlock_irq(&mddev->write_lock); 6662 } 6663 if (did_change) 6664 sysfs_notify_dirent_safe(mddev->sysfs_state); 6665 wait_event(mddev->sb_wait, 6666 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 6667 } 6668 6669 void md_write_end(mddev_t *mddev) 6670 { 6671 if (atomic_dec_and_test(&mddev->writes_pending)) { 6672 if (mddev->safemode == 2) 6673 md_wakeup_thread(mddev->thread); 6674 else if (mddev->safemode_delay) 6675 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 6676 } 6677 } 6678 6679 /* md_allow_write(mddev) 6680 * Calling this ensures that the array is marked 'active' so that writes 6681 * may proceed without blocking. It is important to call this before 6682 * attempting a GFP_KERNEL allocation while holding the mddev lock. 6683 * Must be called with mddev_lock held. 6684 * 6685 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock 6686 * is dropped, so return -EAGAIN after notifying userspace. 6687 */ 6688 int md_allow_write(mddev_t *mddev) 6689 { 6690 if (!mddev->pers) 6691 return 0; 6692 if (mddev->ro) 6693 return 0; 6694 if (!mddev->pers->sync_request) 6695 return 0; 6696 6697 spin_lock_irq(&mddev->write_lock); 6698 if (mddev->in_sync) { 6699 mddev->in_sync = 0; 6700 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6701 set_bit(MD_CHANGE_PENDING, &mddev->flags); 6702 if (mddev->safemode_delay && 6703 mddev->safemode == 0) 6704 mddev->safemode = 1; 6705 spin_unlock_irq(&mddev->write_lock); 6706 md_update_sb(mddev, 0); 6707 sysfs_notify_dirent_safe(mddev->sysfs_state); 6708 } else 6709 spin_unlock_irq(&mddev->write_lock); 6710 6711 if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 6712 return -EAGAIN; 6713 else 6714 return 0; 6715 } 6716 EXPORT_SYMBOL_GPL(md_allow_write); 6717 6718 #define SYNC_MARKS 10 6719 #define SYNC_MARK_STEP (3*HZ) 6720 void md_do_sync(mddev_t *mddev) 6721 { 6722 mddev_t *mddev2; 6723 unsigned int currspeed = 0, 6724 window; 6725 sector_t max_sectors,j, io_sectors; 6726 unsigned long mark[SYNC_MARKS]; 6727 sector_t mark_cnt[SYNC_MARKS]; 6728 int last_mark,m; 6729 struct list_head *tmp; 6730 sector_t last_check; 6731 int skipped = 0; 6732 mdk_rdev_t *rdev; 6733 char *desc; 6734 6735 /* just incase thread restarts... */ 6736 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 6737 return; 6738 if (mddev->ro) /* never try to sync a read-only array */ 6739 return; 6740 6741 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6742 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 6743 desc = "data-check"; 6744 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 6745 desc = "requested-resync"; 6746 else 6747 desc = "resync"; 6748 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 6749 desc = "reshape"; 6750 else 6751 desc = "recovery"; 6752 6753 /* we overload curr_resync somewhat here. 6754 * 0 == not engaged in resync at all 6755 * 2 == checking that there is no conflict with another sync 6756 * 1 == like 2, but have yielded to allow conflicting resync to 6757 * commense 6758 * other == active in resync - this many blocks 6759 * 6760 * Before starting a resync we must have set curr_resync to 6761 * 2, and then checked that every "conflicting" array has curr_resync 6762 * less than ours. When we find one that is the same or higher 6763 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 6764 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 6765 * This will mean we have to start checking from the beginning again. 6766 * 6767 */ 6768 6769 do { 6770 mddev->curr_resync = 2; 6771 6772 try_again: 6773 if (kthread_should_stop()) 6774 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6775 6776 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6777 goto skip; 6778 for_each_mddev(mddev2, tmp) { 6779 if (mddev2 == mddev) 6780 continue; 6781 if (!mddev->parallel_resync 6782 && mddev2->curr_resync 6783 && match_mddev_units(mddev, mddev2)) { 6784 DEFINE_WAIT(wq); 6785 if (mddev < mddev2 && mddev->curr_resync == 2) { 6786 /* arbitrarily yield */ 6787 mddev->curr_resync = 1; 6788 wake_up(&resync_wait); 6789 } 6790 if (mddev > mddev2 && mddev->curr_resync == 1) 6791 /* no need to wait here, we can wait the next 6792 * time 'round when curr_resync == 2 6793 */ 6794 continue; 6795 /* We need to wait 'interruptible' so as not to 6796 * contribute to the load average, and not to 6797 * be caught by 'softlockup' 6798 */ 6799 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 6800 if (!kthread_should_stop() && 6801 mddev2->curr_resync >= mddev->curr_resync) { 6802 printk(KERN_INFO "md: delaying %s of %s" 6803 " until %s has finished (they" 6804 " share one or more physical units)\n", 6805 desc, mdname(mddev), mdname(mddev2)); 6806 mddev_put(mddev2); 6807 if (signal_pending(current)) 6808 flush_signals(current); 6809 schedule(); 6810 finish_wait(&resync_wait, &wq); 6811 goto try_again; 6812 } 6813 finish_wait(&resync_wait, &wq); 6814 } 6815 } 6816 } while (mddev->curr_resync < 2); 6817 6818 j = 0; 6819 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6820 /* resync follows the size requested by the personality, 6821 * which defaults to physical size, but can be virtual size 6822 */ 6823 max_sectors = mddev->resync_max_sectors; 6824 mddev->resync_mismatches = 0; 6825 /* we don't use the checkpoint if there's a bitmap */ 6826 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 6827 j = mddev->resync_min; 6828 else if (!mddev->bitmap) 6829 j = mddev->recovery_cp; 6830 6831 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 6832 max_sectors = mddev->dev_sectors; 6833 else { 6834 /* recovery follows the physical size of devices */ 6835 max_sectors = mddev->dev_sectors; 6836 j = MaxSector; 6837 rcu_read_lock(); 6838 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 6839 if (rdev->raid_disk >= 0 && 6840 !test_bit(Faulty, &rdev->flags) && 6841 !test_bit(In_sync, &rdev->flags) && 6842 rdev->recovery_offset < j) 6843 j = rdev->recovery_offset; 6844 rcu_read_unlock(); 6845 } 6846 6847 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); 6848 printk(KERN_INFO "md: minimum _guaranteed_ speed:" 6849 " %d KB/sec/disk.\n", speed_min(mddev)); 6850 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 6851 "(but not more than %d KB/sec) for %s.\n", 6852 speed_max(mddev), desc); 6853 6854 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 6855 6856 io_sectors = 0; 6857 for (m = 0; m < SYNC_MARKS; m++) { 6858 mark[m] = jiffies; 6859 mark_cnt[m] = io_sectors; 6860 } 6861 last_mark = 0; 6862 mddev->resync_mark = mark[last_mark]; 6863 mddev->resync_mark_cnt = mark_cnt[last_mark]; 6864 6865 /* 6866 * Tune reconstruction: 6867 */ 6868 window = 32*(PAGE_SIZE/512); 6869 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", 6870 window/2,(unsigned long long) max_sectors/2); 6871 6872 atomic_set(&mddev->recovery_active, 0); 6873 last_check = 0; 6874 6875 if (j>2) { 6876 printk(KERN_INFO 6877 "md: resuming %s of %s from checkpoint.\n", 6878 desc, mdname(mddev)); 6879 mddev->curr_resync = j; 6880 } 6881 mddev->curr_resync_completed = j; 6882 6883 while (j < max_sectors) { 6884 sector_t sectors; 6885 6886 skipped = 0; 6887 6888 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 6889 ((mddev->curr_resync > mddev->curr_resync_completed && 6890 (mddev->curr_resync - mddev->curr_resync_completed) 6891 > (max_sectors >> 4)) || 6892 (j - mddev->curr_resync_completed)*2 6893 >= mddev->resync_max - mddev->curr_resync_completed 6894 )) { 6895 /* time to update curr_resync_completed */ 6896 wait_event(mddev->recovery_wait, 6897 atomic_read(&mddev->recovery_active) == 0); 6898 mddev->curr_resync_completed = j; 6899 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6900 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6901 } 6902 6903 while (j >= mddev->resync_max && !kthread_should_stop()) { 6904 /* As this condition is controlled by user-space, 6905 * we can block indefinitely, so use '_interruptible' 6906 * to avoid triggering warnings. 6907 */ 6908 flush_signals(current); /* just in case */ 6909 wait_event_interruptible(mddev->recovery_wait, 6910 mddev->resync_max > j 6911 || kthread_should_stop()); 6912 } 6913 6914 if (kthread_should_stop()) 6915 goto interrupted; 6916 6917 sectors = mddev->pers->sync_request(mddev, j, &skipped, 6918 currspeed < speed_min(mddev)); 6919 if (sectors == 0) { 6920 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6921 goto out; 6922 } 6923 6924 if (!skipped) { /* actual IO requested */ 6925 io_sectors += sectors; 6926 atomic_add(sectors, &mddev->recovery_active); 6927 } 6928 6929 j += sectors; 6930 if (j>1) mddev->curr_resync = j; 6931 mddev->curr_mark_cnt = io_sectors; 6932 if (last_check == 0) 6933 /* this is the earliers that rebuilt will be 6934 * visible in /proc/mdstat 6935 */ 6936 md_new_event(mddev); 6937 6938 if (last_check + window > io_sectors || j == max_sectors) 6939 continue; 6940 6941 last_check = io_sectors; 6942 6943 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6944 break; 6945 6946 repeat: 6947 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 6948 /* step marks */ 6949 int next = (last_mark+1) % SYNC_MARKS; 6950 6951 mddev->resync_mark = mark[next]; 6952 mddev->resync_mark_cnt = mark_cnt[next]; 6953 mark[next] = jiffies; 6954 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 6955 last_mark = next; 6956 } 6957 6958 6959 if (kthread_should_stop()) 6960 goto interrupted; 6961 6962 6963 /* 6964 * this loop exits only if either when we are slower than 6965 * the 'hard' speed limit, or the system was IO-idle for 6966 * a jiffy. 6967 * the system might be non-idle CPU-wise, but we only care 6968 * about not overloading the IO subsystem. (things like an 6969 * e2fsck being done on the RAID array should execute fast) 6970 */ 6971 cond_resched(); 6972 6973 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 6974 /((jiffies-mddev->resync_mark)/HZ +1) +1; 6975 6976 if (currspeed > speed_min(mddev)) { 6977 if ((currspeed > speed_max(mddev)) || 6978 !is_mddev_idle(mddev, 0)) { 6979 msleep(500); 6980 goto repeat; 6981 } 6982 } 6983 } 6984 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); 6985 /* 6986 * this also signals 'finished resyncing' to md_stop 6987 */ 6988 out: 6989 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 6990 6991 /* tell personality that we are finished */ 6992 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 6993 6994 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 6995 mddev->curr_resync > 2) { 6996 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6997 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 6998 if (mddev->curr_resync >= mddev->recovery_cp) { 6999 printk(KERN_INFO 7000 "md: checkpointing %s of %s.\n", 7001 desc, mdname(mddev)); 7002 mddev->recovery_cp = mddev->curr_resync; 7003 } 7004 } else 7005 mddev->recovery_cp = MaxSector; 7006 } else { 7007 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7008 mddev->curr_resync = MaxSector; 7009 rcu_read_lock(); 7010 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 7011 if (rdev->raid_disk >= 0 && 7012 mddev->delta_disks >= 0 && 7013 !test_bit(Faulty, &rdev->flags) && 7014 !test_bit(In_sync, &rdev->flags) && 7015 rdev->recovery_offset < mddev->curr_resync) 7016 rdev->recovery_offset = mddev->curr_resync; 7017 rcu_read_unlock(); 7018 } 7019 } 7020 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7021 7022 skip: 7023 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7024 /* We completed so min/max setting can be forgotten if used. */ 7025 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7026 mddev->resync_min = 0; 7027 mddev->resync_max = MaxSector; 7028 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7029 mddev->resync_min = mddev->curr_resync_completed; 7030 mddev->curr_resync = 0; 7031 wake_up(&resync_wait); 7032 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 7033 md_wakeup_thread(mddev->thread); 7034 return; 7035 7036 interrupted: 7037 /* 7038 * got a signal, exit. 7039 */ 7040 printk(KERN_INFO 7041 "md: md_do_sync() got signal ... exiting\n"); 7042 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7043 goto out; 7044 7045 } 7046 EXPORT_SYMBOL_GPL(md_do_sync); 7047 7048 7049 static int remove_and_add_spares(mddev_t *mddev) 7050 { 7051 mdk_rdev_t *rdev; 7052 int spares = 0; 7053 7054 mddev->curr_resync_completed = 0; 7055 7056 list_for_each_entry(rdev, &mddev->disks, same_set) 7057 if (rdev->raid_disk >= 0 && 7058 !test_bit(Blocked, &rdev->flags) && 7059 (test_bit(Faulty, &rdev->flags) || 7060 ! test_bit(In_sync, &rdev->flags)) && 7061 atomic_read(&rdev->nr_pending)==0) { 7062 if (mddev->pers->hot_remove_disk( 7063 mddev, rdev->raid_disk)==0) { 7064 char nm[20]; 7065 sprintf(nm,"rd%d", rdev->raid_disk); 7066 sysfs_remove_link(&mddev->kobj, nm); 7067 rdev->raid_disk = -1; 7068 } 7069 } 7070 7071 if (mddev->degraded && !mddev->recovery_disabled) { 7072 list_for_each_entry(rdev, &mddev->disks, same_set) { 7073 if (rdev->raid_disk >= 0 && 7074 !test_bit(In_sync, &rdev->flags) && 7075 !test_bit(Blocked, &rdev->flags)) 7076 spares++; 7077 if (rdev->raid_disk < 0 7078 && !test_bit(Faulty, &rdev->flags)) { 7079 rdev->recovery_offset = 0; 7080 if (mddev->pers-> 7081 hot_add_disk(mddev, rdev) == 0) { 7082 char nm[20]; 7083 sprintf(nm, "rd%d", rdev->raid_disk); 7084 if (sysfs_create_link(&mddev->kobj, 7085 &rdev->kobj, nm)) 7086 /* failure here is OK */; 7087 spares++; 7088 md_new_event(mddev); 7089 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7090 } else 7091 break; 7092 } 7093 } 7094 } 7095 return spares; 7096 } 7097 7098 static void reap_sync_thread(mddev_t *mddev) 7099 { 7100 mdk_rdev_t *rdev; 7101 7102 /* resync has finished, collect result */ 7103 md_unregister_thread(mddev->sync_thread); 7104 mddev->sync_thread = NULL; 7105 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7106 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7107 /* success...*/ 7108 /* activate any spares */ 7109 if (mddev->pers->spare_active(mddev)) 7110 sysfs_notify(&mddev->kobj, NULL, 7111 "degraded"); 7112 } 7113 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 7114 mddev->pers->finish_reshape) 7115 mddev->pers->finish_reshape(mddev); 7116 md_update_sb(mddev, 1); 7117 7118 /* if array is no-longer degraded, then any saved_raid_disk 7119 * information must be scrapped 7120 */ 7121 if (!mddev->degraded) 7122 list_for_each_entry(rdev, &mddev->disks, same_set) 7123 rdev->saved_raid_disk = -1; 7124 7125 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7126 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7127 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7128 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7129 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7130 /* flag recovery needed just to double check */ 7131 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7132 sysfs_notify_dirent_safe(mddev->sysfs_action); 7133 md_new_event(mddev); 7134 } 7135 7136 /* 7137 * This routine is regularly called by all per-raid-array threads to 7138 * deal with generic issues like resync and super-block update. 7139 * Raid personalities that don't have a thread (linear/raid0) do not 7140 * need this as they never do any recovery or update the superblock. 7141 * 7142 * It does not do any resync itself, but rather "forks" off other threads 7143 * to do that as needed. 7144 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 7145 * "->recovery" and create a thread at ->sync_thread. 7146 * When the thread finishes it sets MD_RECOVERY_DONE 7147 * and wakeups up this thread which will reap the thread and finish up. 7148 * This thread also removes any faulty devices (with nr_pending == 0). 7149 * 7150 * The overall approach is: 7151 * 1/ if the superblock needs updating, update it. 7152 * 2/ If a recovery thread is running, don't do anything else. 7153 * 3/ If recovery has finished, clean up, possibly marking spares active. 7154 * 4/ If there are any faulty devices, remove them. 7155 * 5/ If array is degraded, try to add spares devices 7156 * 6/ If array has spares or is not in-sync, start a resync thread. 7157 */ 7158 void md_check_recovery(mddev_t *mddev) 7159 { 7160 if (mddev->bitmap) 7161 bitmap_daemon_work(mddev); 7162 7163 if (mddev->ro) 7164 return; 7165 7166 if (signal_pending(current)) { 7167 if (mddev->pers->sync_request && !mddev->external) { 7168 printk(KERN_INFO "md: %s in immediate safe mode\n", 7169 mdname(mddev)); 7170 mddev->safemode = 2; 7171 } 7172 flush_signals(current); 7173 } 7174 7175 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 7176 return; 7177 if ( ! ( 7178 (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) || 7179 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 7180 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 7181 (mddev->external == 0 && mddev->safemode == 1) || 7182 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 7183 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 7184 )) 7185 return; 7186 7187 if (mddev_trylock(mddev)) { 7188 int spares = 0; 7189 7190 if (mddev->ro) { 7191 /* Only thing we do on a ro array is remove 7192 * failed devices. 7193 */ 7194 mdk_rdev_t *rdev; 7195 list_for_each_entry(rdev, &mddev->disks, same_set) 7196 if (rdev->raid_disk >= 0 && 7197 !test_bit(Blocked, &rdev->flags) && 7198 test_bit(Faulty, &rdev->flags) && 7199 atomic_read(&rdev->nr_pending)==0) { 7200 if (mddev->pers->hot_remove_disk( 7201 mddev, rdev->raid_disk)==0) { 7202 char nm[20]; 7203 sprintf(nm,"rd%d", rdev->raid_disk); 7204 sysfs_remove_link(&mddev->kobj, nm); 7205 rdev->raid_disk = -1; 7206 } 7207 } 7208 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7209 goto unlock; 7210 } 7211 7212 if (!mddev->external) { 7213 int did_change = 0; 7214 spin_lock_irq(&mddev->write_lock); 7215 if (mddev->safemode && 7216 !atomic_read(&mddev->writes_pending) && 7217 !mddev->in_sync && 7218 mddev->recovery_cp == MaxSector) { 7219 mddev->in_sync = 1; 7220 did_change = 1; 7221 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7222 } 7223 if (mddev->safemode == 1) 7224 mddev->safemode = 0; 7225 spin_unlock_irq(&mddev->write_lock); 7226 if (did_change) 7227 sysfs_notify_dirent_safe(mddev->sysfs_state); 7228 } 7229 7230 if (mddev->flags) 7231 md_update_sb(mddev, 0); 7232 7233 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 7234 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 7235 /* resync/recovery still happening */ 7236 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7237 goto unlock; 7238 } 7239 if (mddev->sync_thread) { 7240 reap_sync_thread(mddev); 7241 goto unlock; 7242 } 7243 /* Set RUNNING before clearing NEEDED to avoid 7244 * any transients in the value of "sync_action". 7245 */ 7246 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7247 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7248 /* Clear some bits that don't mean anything, but 7249 * might be left set 7250 */ 7251 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 7252 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 7253 7254 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 7255 goto unlock; 7256 /* no recovery is running. 7257 * remove any failed drives, then 7258 * add spares if possible. 7259 * Spare are also removed and re-added, to allow 7260 * the personality to fail the re-add. 7261 */ 7262 7263 if (mddev->reshape_position != MaxSector) { 7264 if (mddev->pers->check_reshape == NULL || 7265 mddev->pers->check_reshape(mddev) != 0) 7266 /* Cannot proceed */ 7267 goto unlock; 7268 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7269 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7270 } else if ((spares = remove_and_add_spares(mddev))) { 7271 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7272 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7273 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7274 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7275 } else if (mddev->recovery_cp < MaxSector) { 7276 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7277 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7278 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 7279 /* nothing to be done ... */ 7280 goto unlock; 7281 7282 if (mddev->pers->sync_request) { 7283 if (spares && mddev->bitmap && ! mddev->bitmap->file) { 7284 /* We are adding a device or devices to an array 7285 * which has the bitmap stored on all devices. 7286 * So make sure all bitmap pages get written 7287 */ 7288 bitmap_write_all(mddev->bitmap); 7289 } 7290 mddev->sync_thread = md_register_thread(md_do_sync, 7291 mddev, 7292 "resync"); 7293 if (!mddev->sync_thread) { 7294 printk(KERN_ERR "%s: could not start resync" 7295 " thread...\n", 7296 mdname(mddev)); 7297 /* leave the spares where they are, it shouldn't hurt */ 7298 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7299 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7300 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7301 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7302 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7303 } else 7304 md_wakeup_thread(mddev->sync_thread); 7305 sysfs_notify_dirent_safe(mddev->sysfs_action); 7306 md_new_event(mddev); 7307 } 7308 unlock: 7309 if (!mddev->sync_thread) { 7310 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7311 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 7312 &mddev->recovery)) 7313 if (mddev->sysfs_action) 7314 sysfs_notify_dirent_safe(mddev->sysfs_action); 7315 } 7316 mddev_unlock(mddev); 7317 } 7318 } 7319 7320 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) 7321 { 7322 sysfs_notify_dirent_safe(rdev->sysfs_state); 7323 wait_event_timeout(rdev->blocked_wait, 7324 !test_bit(Blocked, &rdev->flags), 7325 msecs_to_jiffies(5000)); 7326 rdev_dec_pending(rdev, mddev); 7327 } 7328 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 7329 7330 static int md_notify_reboot(struct notifier_block *this, 7331 unsigned long code, void *x) 7332 { 7333 struct list_head *tmp; 7334 mddev_t *mddev; 7335 7336 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { 7337 7338 printk(KERN_INFO "md: stopping all md devices.\n"); 7339 7340 for_each_mddev(mddev, tmp) 7341 if (mddev_trylock(mddev)) { 7342 /* Force a switch to readonly even array 7343 * appears to still be in use. Hence 7344 * the '100'. 7345 */ 7346 md_set_readonly(mddev, 100); 7347 mddev_unlock(mddev); 7348 } 7349 /* 7350 * certain more exotic SCSI devices are known to be 7351 * volatile wrt too early system reboots. While the 7352 * right place to handle this issue is the given 7353 * driver, we do want to have a safe RAID driver ... 7354 */ 7355 mdelay(1000*1); 7356 } 7357 return NOTIFY_DONE; 7358 } 7359 7360 static struct notifier_block md_notifier = { 7361 .notifier_call = md_notify_reboot, 7362 .next = NULL, 7363 .priority = INT_MAX, /* before any real devices */ 7364 }; 7365 7366 static void md_geninit(void) 7367 { 7368 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 7369 7370 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); 7371 } 7372 7373 static int __init md_init(void) 7374 { 7375 int ret = -ENOMEM; 7376 7377 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); 7378 if (!md_wq) 7379 goto err_wq; 7380 7381 md_misc_wq = alloc_workqueue("md_misc", 0, 0); 7382 if (!md_misc_wq) 7383 goto err_misc_wq; 7384 7385 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0) 7386 goto err_md; 7387 7388 if ((ret = register_blkdev(0, "mdp")) < 0) 7389 goto err_mdp; 7390 mdp_major = ret; 7391 7392 blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE, 7393 md_probe, NULL, NULL); 7394 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 7395 md_probe, NULL, NULL); 7396 7397 register_reboot_notifier(&md_notifier); 7398 raid_table_header = register_sysctl_table(raid_root_table); 7399 7400 md_geninit(); 7401 return 0; 7402 7403 err_mdp: 7404 unregister_blkdev(MD_MAJOR, "md"); 7405 err_md: 7406 destroy_workqueue(md_misc_wq); 7407 err_misc_wq: 7408 destroy_workqueue(md_wq); 7409 err_wq: 7410 return ret; 7411 } 7412 7413 #ifndef MODULE 7414 7415 /* 7416 * Searches all registered partitions for autorun RAID arrays 7417 * at boot time. 7418 */ 7419 7420 static LIST_HEAD(all_detected_devices); 7421 struct detected_devices_node { 7422 struct list_head list; 7423 dev_t dev; 7424 }; 7425 7426 void md_autodetect_dev(dev_t dev) 7427 { 7428 struct detected_devices_node *node_detected_dev; 7429 7430 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 7431 if (node_detected_dev) { 7432 node_detected_dev->dev = dev; 7433 list_add_tail(&node_detected_dev->list, &all_detected_devices); 7434 } else { 7435 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed" 7436 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev)); 7437 } 7438 } 7439 7440 7441 static void autostart_arrays(int part) 7442 { 7443 mdk_rdev_t *rdev; 7444 struct detected_devices_node *node_detected_dev; 7445 dev_t dev; 7446 int i_scanned, i_passed; 7447 7448 i_scanned = 0; 7449 i_passed = 0; 7450 7451 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 7452 7453 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 7454 i_scanned++; 7455 node_detected_dev = list_entry(all_detected_devices.next, 7456 struct detected_devices_node, list); 7457 list_del(&node_detected_dev->list); 7458 dev = node_detected_dev->dev; 7459 kfree(node_detected_dev); 7460 rdev = md_import_device(dev,0, 90); 7461 if (IS_ERR(rdev)) 7462 continue; 7463 7464 if (test_bit(Faulty, &rdev->flags)) { 7465 MD_BUG(); 7466 continue; 7467 } 7468 set_bit(AutoDetected, &rdev->flags); 7469 list_add(&rdev->same_set, &pending_raid_disks); 7470 i_passed++; 7471 } 7472 7473 printk(KERN_INFO "md: Scanned %d and added %d devices.\n", 7474 i_scanned, i_passed); 7475 7476 autorun_devices(part); 7477 } 7478 7479 #endif /* !MODULE */ 7480 7481 static __exit void md_exit(void) 7482 { 7483 mddev_t *mddev; 7484 struct list_head *tmp; 7485 7486 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS); 7487 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 7488 7489 unregister_blkdev(MD_MAJOR,"md"); 7490 unregister_blkdev(mdp_major, "mdp"); 7491 unregister_reboot_notifier(&md_notifier); 7492 unregister_sysctl_table(raid_table_header); 7493 remove_proc_entry("mdstat", NULL); 7494 for_each_mddev(mddev, tmp) { 7495 export_array(mddev); 7496 mddev->hold_active = 0; 7497 } 7498 destroy_workqueue(md_misc_wq); 7499 destroy_workqueue(md_wq); 7500 } 7501 7502 subsys_initcall(md_init); 7503 module_exit(md_exit) 7504 7505 static int get_ro(char *buffer, struct kernel_param *kp) 7506 { 7507 return sprintf(buffer, "%d", start_readonly); 7508 } 7509 static int set_ro(const char *val, struct kernel_param *kp) 7510 { 7511 char *e; 7512 int num = simple_strtoul(val, &e, 10); 7513 if (*val && (*e == '\0' || *e == '\n')) { 7514 start_readonly = num; 7515 return 0; 7516 } 7517 return -EINVAL; 7518 } 7519 7520 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 7521 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 7522 7523 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 7524 7525 EXPORT_SYMBOL(register_md_personality); 7526 EXPORT_SYMBOL(unregister_md_personality); 7527 EXPORT_SYMBOL(md_error); 7528 EXPORT_SYMBOL(md_done_sync); 7529 EXPORT_SYMBOL(md_write_start); 7530 EXPORT_SYMBOL(md_write_end); 7531 EXPORT_SYMBOL(md_register_thread); 7532 EXPORT_SYMBOL(md_unregister_thread); 7533 EXPORT_SYMBOL(md_wakeup_thread); 7534 EXPORT_SYMBOL(md_check_recovery); 7535 MODULE_LICENSE("GPL"); 7536 MODULE_DESCRIPTION("MD RAID framework"); 7537 MODULE_ALIAS("md"); 7538 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 7539