1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 #include <linux/kthread.h> 36 #include <linux/blkdev.h> 37 #include <linux/sysctl.h> 38 #include <linux/seq_file.h> 39 #include <linux/fs.h> 40 #include <linux/poll.h> 41 #include <linux/ctype.h> 42 #include <linux/string.h> 43 #include <linux/hdreg.h> 44 #include <linux/proc_fs.h> 45 #include <linux/random.h> 46 #include <linux/module.h> 47 #include <linux/reboot.h> 48 #include <linux/file.h> 49 #include <linux/compat.h> 50 #include <linux/delay.h> 51 #include <linux/raid/md_p.h> 52 #include <linux/raid/md_u.h> 53 #include <linux/slab.h> 54 #include "md.h" 55 #include "bitmap.h" 56 #include "md-cluster.h" 57 58 #ifndef MODULE 59 static void autostart_arrays(int part); 60 #endif 61 62 /* pers_list is a list of registered personalities protected 63 * by pers_lock. 64 * pers_lock does extra service to protect accesses to 65 * mddev->thread when the mutex cannot be held. 66 */ 67 static LIST_HEAD(pers_list); 68 static DEFINE_SPINLOCK(pers_lock); 69 70 struct md_cluster_operations *md_cluster_ops; 71 EXPORT_SYMBOL(md_cluster_ops); 72 struct module *md_cluster_mod; 73 EXPORT_SYMBOL(md_cluster_mod); 74 75 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 76 static struct workqueue_struct *md_wq; 77 static struct workqueue_struct *md_misc_wq; 78 79 static int remove_and_add_spares(struct mddev *mddev, 80 struct md_rdev *this); 81 static void mddev_detach(struct mddev *mddev); 82 83 /* 84 * Default number of read corrections we'll attempt on an rdev 85 * before ejecting it from the array. We divide the read error 86 * count by 2 for every hour elapsed between read errors. 87 */ 88 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 89 /* 90 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 91 * is 1000 KB/sec, so the extra system load does not show up that much. 92 * Increase it if you want to have more _guaranteed_ speed. Note that 93 * the RAID driver will use the maximum available bandwidth if the IO 94 * subsystem is idle. There is also an 'absolute maximum' reconstruction 95 * speed limit - in case reconstruction slows down your system despite 96 * idle IO detection. 97 * 98 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 99 * or /sys/block/mdX/md/sync_speed_{min,max} 100 */ 101 102 static int sysctl_speed_limit_min = 1000; 103 static int sysctl_speed_limit_max = 200000; 104 static inline int speed_min(struct mddev *mddev) 105 { 106 return mddev->sync_speed_min ? 107 mddev->sync_speed_min : sysctl_speed_limit_min; 108 } 109 110 static inline int speed_max(struct mddev *mddev) 111 { 112 return mddev->sync_speed_max ? 113 mddev->sync_speed_max : sysctl_speed_limit_max; 114 } 115 116 static struct ctl_table_header *raid_table_header; 117 118 static struct ctl_table raid_table[] = { 119 { 120 .procname = "speed_limit_min", 121 .data = &sysctl_speed_limit_min, 122 .maxlen = sizeof(int), 123 .mode = S_IRUGO|S_IWUSR, 124 .proc_handler = proc_dointvec, 125 }, 126 { 127 .procname = "speed_limit_max", 128 .data = &sysctl_speed_limit_max, 129 .maxlen = sizeof(int), 130 .mode = S_IRUGO|S_IWUSR, 131 .proc_handler = proc_dointvec, 132 }, 133 { } 134 }; 135 136 static struct ctl_table raid_dir_table[] = { 137 { 138 .procname = "raid", 139 .maxlen = 0, 140 .mode = S_IRUGO|S_IXUGO, 141 .child = raid_table, 142 }, 143 { } 144 }; 145 146 static struct ctl_table raid_root_table[] = { 147 { 148 .procname = "dev", 149 .maxlen = 0, 150 .mode = 0555, 151 .child = raid_dir_table, 152 }, 153 { } 154 }; 155 156 static const struct block_device_operations md_fops; 157 158 static int start_readonly; 159 160 /* bio_clone_mddev 161 * like bio_clone, but with a local bio set 162 */ 163 164 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 165 struct mddev *mddev) 166 { 167 struct bio *b; 168 169 if (!mddev || !mddev->bio_set) 170 return bio_alloc(gfp_mask, nr_iovecs); 171 172 b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set); 173 if (!b) 174 return NULL; 175 return b; 176 } 177 EXPORT_SYMBOL_GPL(bio_alloc_mddev); 178 179 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, 180 struct mddev *mddev) 181 { 182 if (!mddev || !mddev->bio_set) 183 return bio_clone(bio, gfp_mask); 184 185 return bio_clone_bioset(bio, gfp_mask, mddev->bio_set); 186 } 187 EXPORT_SYMBOL_GPL(bio_clone_mddev); 188 189 /* 190 * We have a system wide 'event count' that is incremented 191 * on any 'interesting' event, and readers of /proc/mdstat 192 * can use 'poll' or 'select' to find out when the event 193 * count increases. 194 * 195 * Events are: 196 * start array, stop array, error, add device, remove device, 197 * start build, activate spare 198 */ 199 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 200 static atomic_t md_event_count; 201 void md_new_event(struct mddev *mddev) 202 { 203 atomic_inc(&md_event_count); 204 wake_up(&md_event_waiters); 205 } 206 EXPORT_SYMBOL_GPL(md_new_event); 207 208 /* Alternate version that can be called from interrupts 209 * when calling sysfs_notify isn't needed. 210 */ 211 static void md_new_event_inintr(struct mddev *mddev) 212 { 213 atomic_inc(&md_event_count); 214 wake_up(&md_event_waiters); 215 } 216 217 /* 218 * Enables to iterate over all existing md arrays 219 * all_mddevs_lock protects this list. 220 */ 221 static LIST_HEAD(all_mddevs); 222 static DEFINE_SPINLOCK(all_mddevs_lock); 223 224 /* 225 * iterates through all used mddevs in the system. 226 * We take care to grab the all_mddevs_lock whenever navigating 227 * the list, and to always hold a refcount when unlocked. 228 * Any code which breaks out of this loop while own 229 * a reference to the current mddev and must mddev_put it. 230 */ 231 #define for_each_mddev(_mddev,_tmp) \ 232 \ 233 for (({ spin_lock(&all_mddevs_lock); \ 234 _tmp = all_mddevs.next; \ 235 _mddev = NULL;}); \ 236 ({ if (_tmp != &all_mddevs) \ 237 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\ 238 spin_unlock(&all_mddevs_lock); \ 239 if (_mddev) mddev_put(_mddev); \ 240 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \ 241 _tmp != &all_mddevs;}); \ 242 ({ spin_lock(&all_mddevs_lock); \ 243 _tmp = _tmp->next;}) \ 244 ) 245 246 /* Rather than calling directly into the personality make_request function, 247 * IO requests come here first so that we can check if the device is 248 * being suspended pending a reconfiguration. 249 * We hold a refcount over the call to ->make_request. By the time that 250 * call has finished, the bio has been linked into some internal structure 251 * and so is visible to ->quiesce(), so we don't need the refcount any more. 252 */ 253 static void md_make_request(struct request_queue *q, struct bio *bio) 254 { 255 const int rw = bio_data_dir(bio); 256 struct mddev *mddev = q->queuedata; 257 unsigned int sectors; 258 int cpu; 259 260 blk_queue_split(q, &bio, q->bio_split); 261 262 if (mddev == NULL || mddev->pers == NULL 263 || !mddev->ready) { 264 bio_io_error(bio); 265 return; 266 } 267 if (mddev->ro == 1 && unlikely(rw == WRITE)) { 268 if (bio_sectors(bio) != 0) 269 bio->bi_error = -EROFS; 270 bio_endio(bio); 271 return; 272 } 273 smp_rmb(); /* Ensure implications of 'active' are visible */ 274 rcu_read_lock(); 275 if (mddev->suspended) { 276 DEFINE_WAIT(__wait); 277 for (;;) { 278 prepare_to_wait(&mddev->sb_wait, &__wait, 279 TASK_UNINTERRUPTIBLE); 280 if (!mddev->suspended) 281 break; 282 rcu_read_unlock(); 283 schedule(); 284 rcu_read_lock(); 285 } 286 finish_wait(&mddev->sb_wait, &__wait); 287 } 288 atomic_inc(&mddev->active_io); 289 rcu_read_unlock(); 290 291 /* 292 * save the sectors now since our bio can 293 * go away inside make_request 294 */ 295 sectors = bio_sectors(bio); 296 mddev->pers->make_request(mddev, bio); 297 298 cpu = part_stat_lock(); 299 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 300 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); 301 part_stat_unlock(); 302 303 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 304 wake_up(&mddev->sb_wait); 305 } 306 307 /* mddev_suspend makes sure no new requests are submitted 308 * to the device, and that any requests that have been submitted 309 * are completely handled. 310 * Once mddev_detach() is called and completes, the module will be 311 * completely unused. 312 */ 313 void mddev_suspend(struct mddev *mddev) 314 { 315 BUG_ON(mddev->suspended); 316 mddev->suspended = 1; 317 synchronize_rcu(); 318 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 319 mddev->pers->quiesce(mddev, 1); 320 321 del_timer_sync(&mddev->safemode_timer); 322 } 323 EXPORT_SYMBOL_GPL(mddev_suspend); 324 325 void mddev_resume(struct mddev *mddev) 326 { 327 mddev->suspended = 0; 328 wake_up(&mddev->sb_wait); 329 mddev->pers->quiesce(mddev, 0); 330 331 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 332 md_wakeup_thread(mddev->thread); 333 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 334 } 335 EXPORT_SYMBOL_GPL(mddev_resume); 336 337 int mddev_congested(struct mddev *mddev, int bits) 338 { 339 struct md_personality *pers = mddev->pers; 340 int ret = 0; 341 342 rcu_read_lock(); 343 if (mddev->suspended) 344 ret = 1; 345 else if (pers && pers->congested) 346 ret = pers->congested(mddev, bits); 347 rcu_read_unlock(); 348 return ret; 349 } 350 EXPORT_SYMBOL_GPL(mddev_congested); 351 static int md_congested(void *data, int bits) 352 { 353 struct mddev *mddev = data; 354 return mddev_congested(mddev, bits); 355 } 356 357 /* 358 * Generic flush handling for md 359 */ 360 361 static void md_end_flush(struct bio *bio) 362 { 363 struct md_rdev *rdev = bio->bi_private; 364 struct mddev *mddev = rdev->mddev; 365 366 rdev_dec_pending(rdev, mddev); 367 368 if (atomic_dec_and_test(&mddev->flush_pending)) { 369 /* The pre-request flush has finished */ 370 queue_work(md_wq, &mddev->flush_work); 371 } 372 bio_put(bio); 373 } 374 375 static void md_submit_flush_data(struct work_struct *ws); 376 377 static void submit_flushes(struct work_struct *ws) 378 { 379 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 380 struct md_rdev *rdev; 381 382 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 383 atomic_set(&mddev->flush_pending, 1); 384 rcu_read_lock(); 385 rdev_for_each_rcu(rdev, mddev) 386 if (rdev->raid_disk >= 0 && 387 !test_bit(Faulty, &rdev->flags)) { 388 /* Take two references, one is dropped 389 * when request finishes, one after 390 * we reclaim rcu_read_lock 391 */ 392 struct bio *bi; 393 atomic_inc(&rdev->nr_pending); 394 atomic_inc(&rdev->nr_pending); 395 rcu_read_unlock(); 396 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); 397 bi->bi_end_io = md_end_flush; 398 bi->bi_private = rdev; 399 bi->bi_bdev = rdev->bdev; 400 atomic_inc(&mddev->flush_pending); 401 submit_bio(WRITE_FLUSH, bi); 402 rcu_read_lock(); 403 rdev_dec_pending(rdev, mddev); 404 } 405 rcu_read_unlock(); 406 if (atomic_dec_and_test(&mddev->flush_pending)) 407 queue_work(md_wq, &mddev->flush_work); 408 } 409 410 static void md_submit_flush_data(struct work_struct *ws) 411 { 412 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 413 struct bio *bio = mddev->flush_bio; 414 415 if (bio->bi_iter.bi_size == 0) 416 /* an empty barrier - all done */ 417 bio_endio(bio); 418 else { 419 bio->bi_rw &= ~REQ_FLUSH; 420 mddev->pers->make_request(mddev, bio); 421 } 422 423 mddev->flush_bio = NULL; 424 wake_up(&mddev->sb_wait); 425 } 426 427 void md_flush_request(struct mddev *mddev, struct bio *bio) 428 { 429 spin_lock_irq(&mddev->lock); 430 wait_event_lock_irq(mddev->sb_wait, 431 !mddev->flush_bio, 432 mddev->lock); 433 mddev->flush_bio = bio; 434 spin_unlock_irq(&mddev->lock); 435 436 INIT_WORK(&mddev->flush_work, submit_flushes); 437 queue_work(md_wq, &mddev->flush_work); 438 } 439 EXPORT_SYMBOL(md_flush_request); 440 441 void md_unplug(struct blk_plug_cb *cb, bool from_schedule) 442 { 443 struct mddev *mddev = cb->data; 444 md_wakeup_thread(mddev->thread); 445 kfree(cb); 446 } 447 EXPORT_SYMBOL(md_unplug); 448 449 static inline struct mddev *mddev_get(struct mddev *mddev) 450 { 451 atomic_inc(&mddev->active); 452 return mddev; 453 } 454 455 static void mddev_delayed_delete(struct work_struct *ws); 456 457 static void mddev_put(struct mddev *mddev) 458 { 459 struct bio_set *bs = NULL; 460 461 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 462 return; 463 if (!mddev->raid_disks && list_empty(&mddev->disks) && 464 mddev->ctime == 0 && !mddev->hold_active) { 465 /* Array is not configured at all, and not held active, 466 * so destroy it */ 467 list_del_init(&mddev->all_mddevs); 468 bs = mddev->bio_set; 469 mddev->bio_set = NULL; 470 if (mddev->gendisk) { 471 /* We did a probe so need to clean up. Call 472 * queue_work inside the spinlock so that 473 * flush_workqueue() after mddev_find will 474 * succeed in waiting for the work to be done. 475 */ 476 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 477 queue_work(md_misc_wq, &mddev->del_work); 478 } else 479 kfree(mddev); 480 } 481 spin_unlock(&all_mddevs_lock); 482 if (bs) 483 bioset_free(bs); 484 } 485 486 static void md_safemode_timeout(unsigned long data); 487 488 void mddev_init(struct mddev *mddev) 489 { 490 mutex_init(&mddev->open_mutex); 491 mutex_init(&mddev->reconfig_mutex); 492 mutex_init(&mddev->bitmap_info.mutex); 493 INIT_LIST_HEAD(&mddev->disks); 494 INIT_LIST_HEAD(&mddev->all_mddevs); 495 setup_timer(&mddev->safemode_timer, md_safemode_timeout, 496 (unsigned long) mddev); 497 atomic_set(&mddev->active, 1); 498 atomic_set(&mddev->openers, 0); 499 atomic_set(&mddev->active_io, 0); 500 spin_lock_init(&mddev->lock); 501 atomic_set(&mddev->flush_pending, 0); 502 init_waitqueue_head(&mddev->sb_wait); 503 init_waitqueue_head(&mddev->recovery_wait); 504 mddev->reshape_position = MaxSector; 505 mddev->reshape_backwards = 0; 506 mddev->last_sync_action = "none"; 507 mddev->resync_min = 0; 508 mddev->resync_max = MaxSector; 509 mddev->level = LEVEL_NONE; 510 } 511 EXPORT_SYMBOL_GPL(mddev_init); 512 513 static struct mddev *mddev_find(dev_t unit) 514 { 515 struct mddev *mddev, *new = NULL; 516 517 if (unit && MAJOR(unit) != MD_MAJOR) 518 unit &= ~((1<<MdpMinorShift)-1); 519 520 retry: 521 spin_lock(&all_mddevs_lock); 522 523 if (unit) { 524 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 525 if (mddev->unit == unit) { 526 mddev_get(mddev); 527 spin_unlock(&all_mddevs_lock); 528 kfree(new); 529 return mddev; 530 } 531 532 if (new) { 533 list_add(&new->all_mddevs, &all_mddevs); 534 spin_unlock(&all_mddevs_lock); 535 new->hold_active = UNTIL_IOCTL; 536 return new; 537 } 538 } else if (new) { 539 /* find an unused unit number */ 540 static int next_minor = 512; 541 int start = next_minor; 542 int is_free = 0; 543 int dev = 0; 544 while (!is_free) { 545 dev = MKDEV(MD_MAJOR, next_minor); 546 next_minor++; 547 if (next_minor > MINORMASK) 548 next_minor = 0; 549 if (next_minor == start) { 550 /* Oh dear, all in use. */ 551 spin_unlock(&all_mddevs_lock); 552 kfree(new); 553 return NULL; 554 } 555 556 is_free = 1; 557 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 558 if (mddev->unit == dev) { 559 is_free = 0; 560 break; 561 } 562 } 563 new->unit = dev; 564 new->md_minor = MINOR(dev); 565 new->hold_active = UNTIL_STOP; 566 list_add(&new->all_mddevs, &all_mddevs); 567 spin_unlock(&all_mddevs_lock); 568 return new; 569 } 570 spin_unlock(&all_mddevs_lock); 571 572 new = kzalloc(sizeof(*new), GFP_KERNEL); 573 if (!new) 574 return NULL; 575 576 new->unit = unit; 577 if (MAJOR(unit) == MD_MAJOR) 578 new->md_minor = MINOR(unit); 579 else 580 new->md_minor = MINOR(unit) >> MdpMinorShift; 581 582 mddev_init(new); 583 584 goto retry; 585 } 586 587 static struct attribute_group md_redundancy_group; 588 589 void mddev_unlock(struct mddev *mddev) 590 { 591 if (mddev->to_remove) { 592 /* These cannot be removed under reconfig_mutex as 593 * an access to the files will try to take reconfig_mutex 594 * while holding the file unremovable, which leads to 595 * a deadlock. 596 * So hold set sysfs_active while the remove in happeing, 597 * and anything else which might set ->to_remove or my 598 * otherwise change the sysfs namespace will fail with 599 * -EBUSY if sysfs_active is still set. 600 * We set sysfs_active under reconfig_mutex and elsewhere 601 * test it under the same mutex to ensure its correct value 602 * is seen. 603 */ 604 struct attribute_group *to_remove = mddev->to_remove; 605 mddev->to_remove = NULL; 606 mddev->sysfs_active = 1; 607 mutex_unlock(&mddev->reconfig_mutex); 608 609 if (mddev->kobj.sd) { 610 if (to_remove != &md_redundancy_group) 611 sysfs_remove_group(&mddev->kobj, to_remove); 612 if (mddev->pers == NULL || 613 mddev->pers->sync_request == NULL) { 614 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 615 if (mddev->sysfs_action) 616 sysfs_put(mddev->sysfs_action); 617 mddev->sysfs_action = NULL; 618 } 619 } 620 mddev->sysfs_active = 0; 621 } else 622 mutex_unlock(&mddev->reconfig_mutex); 623 624 /* As we've dropped the mutex we need a spinlock to 625 * make sure the thread doesn't disappear 626 */ 627 spin_lock(&pers_lock); 628 md_wakeup_thread(mddev->thread); 629 spin_unlock(&pers_lock); 630 } 631 EXPORT_SYMBOL_GPL(mddev_unlock); 632 633 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) 634 { 635 struct md_rdev *rdev; 636 637 rdev_for_each_rcu(rdev, mddev) 638 if (rdev->desc_nr == nr) 639 return rdev; 640 641 return NULL; 642 } 643 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu); 644 645 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) 646 { 647 struct md_rdev *rdev; 648 649 rdev_for_each(rdev, mddev) 650 if (rdev->bdev->bd_dev == dev) 651 return rdev; 652 653 return NULL; 654 } 655 656 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev) 657 { 658 struct md_rdev *rdev; 659 660 rdev_for_each_rcu(rdev, mddev) 661 if (rdev->bdev->bd_dev == dev) 662 return rdev; 663 664 return NULL; 665 } 666 667 static struct md_personality *find_pers(int level, char *clevel) 668 { 669 struct md_personality *pers; 670 list_for_each_entry(pers, &pers_list, list) { 671 if (level != LEVEL_NONE && pers->level == level) 672 return pers; 673 if (strcmp(pers->name, clevel)==0) 674 return pers; 675 } 676 return NULL; 677 } 678 679 /* return the offset of the super block in 512byte sectors */ 680 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) 681 { 682 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; 683 return MD_NEW_SIZE_SECTORS(num_sectors); 684 } 685 686 static int alloc_disk_sb(struct md_rdev *rdev) 687 { 688 rdev->sb_page = alloc_page(GFP_KERNEL); 689 if (!rdev->sb_page) { 690 printk(KERN_ALERT "md: out of memory.\n"); 691 return -ENOMEM; 692 } 693 694 return 0; 695 } 696 697 void md_rdev_clear(struct md_rdev *rdev) 698 { 699 if (rdev->sb_page) { 700 put_page(rdev->sb_page); 701 rdev->sb_loaded = 0; 702 rdev->sb_page = NULL; 703 rdev->sb_start = 0; 704 rdev->sectors = 0; 705 } 706 if (rdev->bb_page) { 707 put_page(rdev->bb_page); 708 rdev->bb_page = NULL; 709 } 710 kfree(rdev->badblocks.page); 711 rdev->badblocks.page = NULL; 712 } 713 EXPORT_SYMBOL_GPL(md_rdev_clear); 714 715 static void super_written(struct bio *bio) 716 { 717 struct md_rdev *rdev = bio->bi_private; 718 struct mddev *mddev = rdev->mddev; 719 720 if (bio->bi_error) { 721 printk("md: super_written gets error=%d\n", bio->bi_error); 722 md_error(mddev, rdev); 723 } 724 725 if (atomic_dec_and_test(&mddev->pending_writes)) 726 wake_up(&mddev->sb_wait); 727 bio_put(bio); 728 } 729 730 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, 731 sector_t sector, int size, struct page *page) 732 { 733 /* write first size bytes of page to sector of rdev 734 * Increment mddev->pending_writes before returning 735 * and decrement it on completion, waking up sb_wait 736 * if zero is reached. 737 * If an error occurred, call md_error 738 */ 739 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 740 741 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 742 bio->bi_iter.bi_sector = sector; 743 bio_add_page(bio, page, size, 0); 744 bio->bi_private = rdev; 745 bio->bi_end_io = super_written; 746 747 atomic_inc(&mddev->pending_writes); 748 submit_bio(WRITE_FLUSH_FUA, bio); 749 } 750 751 void md_super_wait(struct mddev *mddev) 752 { 753 /* wait for all superblock writes that were scheduled to complete */ 754 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); 755 } 756 757 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 758 struct page *page, int rw, bool metadata_op) 759 { 760 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); 761 int ret; 762 763 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 764 rdev->meta_bdev : rdev->bdev; 765 if (metadata_op) 766 bio->bi_iter.bi_sector = sector + rdev->sb_start; 767 else if (rdev->mddev->reshape_position != MaxSector && 768 (rdev->mddev->reshape_backwards == 769 (sector >= rdev->mddev->reshape_position))) 770 bio->bi_iter.bi_sector = sector + rdev->new_data_offset; 771 else 772 bio->bi_iter.bi_sector = sector + rdev->data_offset; 773 bio_add_page(bio, page, size, 0); 774 submit_bio_wait(rw, bio); 775 776 ret = !bio->bi_error; 777 bio_put(bio); 778 return ret; 779 } 780 EXPORT_SYMBOL_GPL(sync_page_io); 781 782 static int read_disk_sb(struct md_rdev *rdev, int size) 783 { 784 char b[BDEVNAME_SIZE]; 785 786 if (rdev->sb_loaded) 787 return 0; 788 789 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true)) 790 goto fail; 791 rdev->sb_loaded = 1; 792 return 0; 793 794 fail: 795 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 796 bdevname(rdev->bdev,b)); 797 return -EINVAL; 798 } 799 800 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 801 { 802 return sb1->set_uuid0 == sb2->set_uuid0 && 803 sb1->set_uuid1 == sb2->set_uuid1 && 804 sb1->set_uuid2 == sb2->set_uuid2 && 805 sb1->set_uuid3 == sb2->set_uuid3; 806 } 807 808 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 809 { 810 int ret; 811 mdp_super_t *tmp1, *tmp2; 812 813 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 814 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 815 816 if (!tmp1 || !tmp2) { 817 ret = 0; 818 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n"); 819 goto abort; 820 } 821 822 *tmp1 = *sb1; 823 *tmp2 = *sb2; 824 825 /* 826 * nr_disks is not constant 827 */ 828 tmp1->nr_disks = 0; 829 tmp2->nr_disks = 0; 830 831 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 832 abort: 833 kfree(tmp1); 834 kfree(tmp2); 835 return ret; 836 } 837 838 static u32 md_csum_fold(u32 csum) 839 { 840 csum = (csum & 0xffff) + (csum >> 16); 841 return (csum & 0xffff) + (csum >> 16); 842 } 843 844 static unsigned int calc_sb_csum(mdp_super_t *sb) 845 { 846 u64 newcsum = 0; 847 u32 *sb32 = (u32*)sb; 848 int i; 849 unsigned int disk_csum, csum; 850 851 disk_csum = sb->sb_csum; 852 sb->sb_csum = 0; 853 854 for (i = 0; i < MD_SB_BYTES/4 ; i++) 855 newcsum += sb32[i]; 856 csum = (newcsum & 0xffffffff) + (newcsum>>32); 857 858 #ifdef CONFIG_ALPHA 859 /* This used to use csum_partial, which was wrong for several 860 * reasons including that different results are returned on 861 * different architectures. It isn't critical that we get exactly 862 * the same return value as before (we always csum_fold before 863 * testing, and that removes any differences). However as we 864 * know that csum_partial always returned a 16bit value on 865 * alphas, do a fold to maximise conformity to previous behaviour. 866 */ 867 sb->sb_csum = md_csum_fold(disk_csum); 868 #else 869 sb->sb_csum = disk_csum; 870 #endif 871 return csum; 872 } 873 874 /* 875 * Handle superblock details. 876 * We want to be able to handle multiple superblock formats 877 * so we have a common interface to them all, and an array of 878 * different handlers. 879 * We rely on user-space to write the initial superblock, and support 880 * reading and updating of superblocks. 881 * Interface methods are: 882 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version) 883 * loads and validates a superblock on dev. 884 * if refdev != NULL, compare superblocks on both devices 885 * Return: 886 * 0 - dev has a superblock that is compatible with refdev 887 * 1 - dev has a superblock that is compatible and newer than refdev 888 * so dev should be used as the refdev in future 889 * -EINVAL superblock incompatible or invalid 890 * -othererror e.g. -EIO 891 * 892 * int validate_super(struct mddev *mddev, struct md_rdev *dev) 893 * Verify that dev is acceptable into mddev. 894 * The first time, mddev->raid_disks will be 0, and data from 895 * dev should be merged in. Subsequent calls check that dev 896 * is new enough. Return 0 or -EINVAL 897 * 898 * void sync_super(struct mddev *mddev, struct md_rdev *dev) 899 * Update the superblock for rdev with data in mddev 900 * This does not write to disc. 901 * 902 */ 903 904 struct super_type { 905 char *name; 906 struct module *owner; 907 int (*load_super)(struct md_rdev *rdev, 908 struct md_rdev *refdev, 909 int minor_version); 910 int (*validate_super)(struct mddev *mddev, 911 struct md_rdev *rdev); 912 void (*sync_super)(struct mddev *mddev, 913 struct md_rdev *rdev); 914 unsigned long long (*rdev_size_change)(struct md_rdev *rdev, 915 sector_t num_sectors); 916 int (*allow_new_offset)(struct md_rdev *rdev, 917 unsigned long long new_offset); 918 }; 919 920 /* 921 * Check that the given mddev has no bitmap. 922 * 923 * This function is called from the run method of all personalities that do not 924 * support bitmaps. It prints an error message and returns non-zero if mddev 925 * has a bitmap. Otherwise, it returns 0. 926 * 927 */ 928 int md_check_no_bitmap(struct mddev *mddev) 929 { 930 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 931 return 0; 932 printk(KERN_ERR "%s: bitmaps are not supported for %s\n", 933 mdname(mddev), mddev->pers->name); 934 return 1; 935 } 936 EXPORT_SYMBOL(md_check_no_bitmap); 937 938 /* 939 * load_super for 0.90.0 940 */ 941 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 942 { 943 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 944 mdp_super_t *sb; 945 int ret; 946 947 /* 948 * Calculate the position of the superblock (512byte sectors), 949 * it's at the end of the disk. 950 * 951 * It also happens to be a multiple of 4Kb. 952 */ 953 rdev->sb_start = calc_dev_sboffset(rdev); 954 955 ret = read_disk_sb(rdev, MD_SB_BYTES); 956 if (ret) return ret; 957 958 ret = -EINVAL; 959 960 bdevname(rdev->bdev, b); 961 sb = page_address(rdev->sb_page); 962 963 if (sb->md_magic != MD_SB_MAGIC) { 964 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 965 b); 966 goto abort; 967 } 968 969 if (sb->major_version != 0 || 970 sb->minor_version < 90 || 971 sb->minor_version > 91) { 972 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 973 sb->major_version, sb->minor_version, 974 b); 975 goto abort; 976 } 977 978 if (sb->raid_disks <= 0) 979 goto abort; 980 981 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 982 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 983 b); 984 goto abort; 985 } 986 987 rdev->preferred_minor = sb->md_minor; 988 rdev->data_offset = 0; 989 rdev->new_data_offset = 0; 990 rdev->sb_size = MD_SB_BYTES; 991 rdev->badblocks.shift = -1; 992 993 if (sb->level == LEVEL_MULTIPATH) 994 rdev->desc_nr = -1; 995 else 996 rdev->desc_nr = sb->this_disk.number; 997 998 if (!refdev) { 999 ret = 1; 1000 } else { 1001 __u64 ev1, ev2; 1002 mdp_super_t *refsb = page_address(refdev->sb_page); 1003 if (!uuid_equal(refsb, sb)) { 1004 printk(KERN_WARNING "md: %s has different UUID to %s\n", 1005 b, bdevname(refdev->bdev,b2)); 1006 goto abort; 1007 } 1008 if (!sb_equal(refsb, sb)) { 1009 printk(KERN_WARNING "md: %s has same UUID" 1010 " but different superblock to %s\n", 1011 b, bdevname(refdev->bdev, b2)); 1012 goto abort; 1013 } 1014 ev1 = md_event(sb); 1015 ev2 = md_event(refsb); 1016 if (ev1 > ev2) 1017 ret = 1; 1018 else 1019 ret = 0; 1020 } 1021 rdev->sectors = rdev->sb_start; 1022 /* Limit to 4TB as metadata cannot record more than that. 1023 * (not needed for Linear and RAID0 as metadata doesn't 1024 * record this size) 1025 */ 1026 if (rdev->sectors >= (2ULL << 32) && sb->level >= 1) 1027 rdev->sectors = (2ULL << 32) - 2; 1028 1029 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) 1030 /* "this cannot possibly happen" ... */ 1031 ret = -EINVAL; 1032 1033 abort: 1034 return ret; 1035 } 1036 1037 /* 1038 * validate_super for 0.90.0 1039 */ 1040 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) 1041 { 1042 mdp_disk_t *desc; 1043 mdp_super_t *sb = page_address(rdev->sb_page); 1044 __u64 ev1 = md_event(sb); 1045 1046 rdev->raid_disk = -1; 1047 clear_bit(Faulty, &rdev->flags); 1048 clear_bit(In_sync, &rdev->flags); 1049 clear_bit(Bitmap_sync, &rdev->flags); 1050 clear_bit(WriteMostly, &rdev->flags); 1051 1052 if (mddev->raid_disks == 0) { 1053 mddev->major_version = 0; 1054 mddev->minor_version = sb->minor_version; 1055 mddev->patch_version = sb->patch_version; 1056 mddev->external = 0; 1057 mddev->chunk_sectors = sb->chunk_size >> 9; 1058 mddev->ctime = sb->ctime; 1059 mddev->utime = sb->utime; 1060 mddev->level = sb->level; 1061 mddev->clevel[0] = 0; 1062 mddev->layout = sb->layout; 1063 mddev->raid_disks = sb->raid_disks; 1064 mddev->dev_sectors = ((sector_t)sb->size) * 2; 1065 mddev->events = ev1; 1066 mddev->bitmap_info.offset = 0; 1067 mddev->bitmap_info.space = 0; 1068 /* bitmap can use 60 K after the 4K superblocks */ 1069 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1070 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 1071 mddev->reshape_backwards = 0; 1072 1073 if (mddev->minor_version >= 91) { 1074 mddev->reshape_position = sb->reshape_position; 1075 mddev->delta_disks = sb->delta_disks; 1076 mddev->new_level = sb->new_level; 1077 mddev->new_layout = sb->new_layout; 1078 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1079 if (mddev->delta_disks < 0) 1080 mddev->reshape_backwards = 1; 1081 } else { 1082 mddev->reshape_position = MaxSector; 1083 mddev->delta_disks = 0; 1084 mddev->new_level = mddev->level; 1085 mddev->new_layout = mddev->layout; 1086 mddev->new_chunk_sectors = mddev->chunk_sectors; 1087 } 1088 1089 if (sb->state & (1<<MD_SB_CLEAN)) 1090 mddev->recovery_cp = MaxSector; 1091 else { 1092 if (sb->events_hi == sb->cp_events_hi && 1093 sb->events_lo == sb->cp_events_lo) { 1094 mddev->recovery_cp = sb->recovery_cp; 1095 } else 1096 mddev->recovery_cp = 0; 1097 } 1098 1099 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1100 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1101 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1102 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1103 1104 mddev->max_disks = MD_SB_DISKS; 1105 1106 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1107 mddev->bitmap_info.file == NULL) { 1108 mddev->bitmap_info.offset = 1109 mddev->bitmap_info.default_offset; 1110 mddev->bitmap_info.space = 1111 mddev->bitmap_info.default_space; 1112 } 1113 1114 } else if (mddev->pers == NULL) { 1115 /* Insist on good event counter while assembling, except 1116 * for spares (which don't need an event count) */ 1117 ++ev1; 1118 if (sb->disks[rdev->desc_nr].state & ( 1119 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1120 if (ev1 < mddev->events) 1121 return -EINVAL; 1122 } else if (mddev->bitmap) { 1123 /* if adding to array with a bitmap, then we can accept an 1124 * older device ... but not too old. 1125 */ 1126 if (ev1 < mddev->bitmap->events_cleared) 1127 return 0; 1128 if (ev1 < mddev->events) 1129 set_bit(Bitmap_sync, &rdev->flags); 1130 } else { 1131 if (ev1 < mddev->events) 1132 /* just a hot-add of a new device, leave raid_disk at -1 */ 1133 return 0; 1134 } 1135 1136 if (mddev->level != LEVEL_MULTIPATH) { 1137 desc = sb->disks + rdev->desc_nr; 1138 1139 if (desc->state & (1<<MD_DISK_FAULTY)) 1140 set_bit(Faulty, &rdev->flags); 1141 else if (desc->state & (1<<MD_DISK_SYNC) /* && 1142 desc->raid_disk < mddev->raid_disks */) { 1143 set_bit(In_sync, &rdev->flags); 1144 rdev->raid_disk = desc->raid_disk; 1145 rdev->saved_raid_disk = desc->raid_disk; 1146 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1147 /* active but not in sync implies recovery up to 1148 * reshape position. We don't know exactly where 1149 * that is, so set to zero for now */ 1150 if (mddev->minor_version >= 91) { 1151 rdev->recovery_offset = 0; 1152 rdev->raid_disk = desc->raid_disk; 1153 } 1154 } 1155 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1156 set_bit(WriteMostly, &rdev->flags); 1157 } else /* MULTIPATH are always insync */ 1158 set_bit(In_sync, &rdev->flags); 1159 return 0; 1160 } 1161 1162 /* 1163 * sync_super for 0.90.0 1164 */ 1165 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) 1166 { 1167 mdp_super_t *sb; 1168 struct md_rdev *rdev2; 1169 int next_spare = mddev->raid_disks; 1170 1171 /* make rdev->sb match mddev data.. 1172 * 1173 * 1/ zero out disks 1174 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1175 * 3/ any empty disks < next_spare become removed 1176 * 1177 * disks[0] gets initialised to REMOVED because 1178 * we cannot be sure from other fields if it has 1179 * been initialised or not. 1180 */ 1181 int i; 1182 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1183 1184 rdev->sb_size = MD_SB_BYTES; 1185 1186 sb = page_address(rdev->sb_page); 1187 1188 memset(sb, 0, sizeof(*sb)); 1189 1190 sb->md_magic = MD_SB_MAGIC; 1191 sb->major_version = mddev->major_version; 1192 sb->patch_version = mddev->patch_version; 1193 sb->gvalid_words = 0; /* ignored */ 1194 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1195 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1196 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1197 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1198 1199 sb->ctime = mddev->ctime; 1200 sb->level = mddev->level; 1201 sb->size = mddev->dev_sectors / 2; 1202 sb->raid_disks = mddev->raid_disks; 1203 sb->md_minor = mddev->md_minor; 1204 sb->not_persistent = 0; 1205 sb->utime = mddev->utime; 1206 sb->state = 0; 1207 sb->events_hi = (mddev->events>>32); 1208 sb->events_lo = (u32)mddev->events; 1209 1210 if (mddev->reshape_position == MaxSector) 1211 sb->minor_version = 90; 1212 else { 1213 sb->minor_version = 91; 1214 sb->reshape_position = mddev->reshape_position; 1215 sb->new_level = mddev->new_level; 1216 sb->delta_disks = mddev->delta_disks; 1217 sb->new_layout = mddev->new_layout; 1218 sb->new_chunk = mddev->new_chunk_sectors << 9; 1219 } 1220 mddev->minor_version = sb->minor_version; 1221 if (mddev->in_sync) 1222 { 1223 sb->recovery_cp = mddev->recovery_cp; 1224 sb->cp_events_hi = (mddev->events>>32); 1225 sb->cp_events_lo = (u32)mddev->events; 1226 if (mddev->recovery_cp == MaxSector) 1227 sb->state = (1<< MD_SB_CLEAN); 1228 } else 1229 sb->recovery_cp = 0; 1230 1231 sb->layout = mddev->layout; 1232 sb->chunk_size = mddev->chunk_sectors << 9; 1233 1234 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1235 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1236 1237 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1238 rdev_for_each(rdev2, mddev) { 1239 mdp_disk_t *d; 1240 int desc_nr; 1241 int is_active = test_bit(In_sync, &rdev2->flags); 1242 1243 if (rdev2->raid_disk >= 0 && 1244 sb->minor_version >= 91) 1245 /* we have nowhere to store the recovery_offset, 1246 * but if it is not below the reshape_position, 1247 * we can piggy-back on that. 1248 */ 1249 is_active = 1; 1250 if (rdev2->raid_disk < 0 || 1251 test_bit(Faulty, &rdev2->flags)) 1252 is_active = 0; 1253 if (is_active) 1254 desc_nr = rdev2->raid_disk; 1255 else 1256 desc_nr = next_spare++; 1257 rdev2->desc_nr = desc_nr; 1258 d = &sb->disks[rdev2->desc_nr]; 1259 nr_disks++; 1260 d->number = rdev2->desc_nr; 1261 d->major = MAJOR(rdev2->bdev->bd_dev); 1262 d->minor = MINOR(rdev2->bdev->bd_dev); 1263 if (is_active) 1264 d->raid_disk = rdev2->raid_disk; 1265 else 1266 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1267 if (test_bit(Faulty, &rdev2->flags)) 1268 d->state = (1<<MD_DISK_FAULTY); 1269 else if (is_active) { 1270 d->state = (1<<MD_DISK_ACTIVE); 1271 if (test_bit(In_sync, &rdev2->flags)) 1272 d->state |= (1<<MD_DISK_SYNC); 1273 active++; 1274 working++; 1275 } else { 1276 d->state = 0; 1277 spare++; 1278 working++; 1279 } 1280 if (test_bit(WriteMostly, &rdev2->flags)) 1281 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1282 } 1283 /* now set the "removed" and "faulty" bits on any missing devices */ 1284 for (i=0 ; i < mddev->raid_disks ; i++) { 1285 mdp_disk_t *d = &sb->disks[i]; 1286 if (d->state == 0 && d->number == 0) { 1287 d->number = i; 1288 d->raid_disk = i; 1289 d->state = (1<<MD_DISK_REMOVED); 1290 d->state |= (1<<MD_DISK_FAULTY); 1291 failed++; 1292 } 1293 } 1294 sb->nr_disks = nr_disks; 1295 sb->active_disks = active; 1296 sb->working_disks = working; 1297 sb->failed_disks = failed; 1298 sb->spare_disks = spare; 1299 1300 sb->this_disk = sb->disks[rdev->desc_nr]; 1301 sb->sb_csum = calc_sb_csum(sb); 1302 } 1303 1304 /* 1305 * rdev_size_change for 0.90.0 1306 */ 1307 static unsigned long long 1308 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1309 { 1310 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1311 return 0; /* component must fit device */ 1312 if (rdev->mddev->bitmap_info.offset) 1313 return 0; /* can't move bitmap */ 1314 rdev->sb_start = calc_dev_sboffset(rdev); 1315 if (!num_sectors || num_sectors > rdev->sb_start) 1316 num_sectors = rdev->sb_start; 1317 /* Limit to 4TB as metadata cannot record more than that. 1318 * 4TB == 2^32 KB, or 2*2^32 sectors. 1319 */ 1320 if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) 1321 num_sectors = (2ULL << 32) - 2; 1322 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1323 rdev->sb_page); 1324 md_super_wait(rdev->mddev); 1325 return num_sectors; 1326 } 1327 1328 static int 1329 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) 1330 { 1331 /* non-zero offset changes not possible with v0.90 */ 1332 return new_offset == 0; 1333 } 1334 1335 /* 1336 * version 1 superblock 1337 */ 1338 1339 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb) 1340 { 1341 __le32 disk_csum; 1342 u32 csum; 1343 unsigned long long newcsum; 1344 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1345 __le32 *isuper = (__le32*)sb; 1346 1347 disk_csum = sb->sb_csum; 1348 sb->sb_csum = 0; 1349 newcsum = 0; 1350 for (; size >= 4; size -= 4) 1351 newcsum += le32_to_cpu(*isuper++); 1352 1353 if (size == 2) 1354 newcsum += le16_to_cpu(*(__le16*) isuper); 1355 1356 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1357 sb->sb_csum = disk_csum; 1358 return cpu_to_le32(csum); 1359 } 1360 1361 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, 1362 int acknowledged); 1363 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1364 { 1365 struct mdp_superblock_1 *sb; 1366 int ret; 1367 sector_t sb_start; 1368 sector_t sectors; 1369 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1370 int bmask; 1371 1372 /* 1373 * Calculate the position of the superblock in 512byte sectors. 1374 * It is always aligned to a 4K boundary and 1375 * depeding on minor_version, it can be: 1376 * 0: At least 8K, but less than 12K, from end of device 1377 * 1: At start of device 1378 * 2: 4K from start of device. 1379 */ 1380 switch(minor_version) { 1381 case 0: 1382 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; 1383 sb_start -= 8*2; 1384 sb_start &= ~(sector_t)(4*2-1); 1385 break; 1386 case 1: 1387 sb_start = 0; 1388 break; 1389 case 2: 1390 sb_start = 8; 1391 break; 1392 default: 1393 return -EINVAL; 1394 } 1395 rdev->sb_start = sb_start; 1396 1397 /* superblock is rarely larger than 1K, but it can be larger, 1398 * and it is safe to read 4k, so we do that 1399 */ 1400 ret = read_disk_sb(rdev, 4096); 1401 if (ret) return ret; 1402 1403 sb = page_address(rdev->sb_page); 1404 1405 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1406 sb->major_version != cpu_to_le32(1) || 1407 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1408 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1409 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1410 return -EINVAL; 1411 1412 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1413 printk("md: invalid superblock checksum on %s\n", 1414 bdevname(rdev->bdev,b)); 1415 return -EINVAL; 1416 } 1417 if (le64_to_cpu(sb->data_size) < 10) { 1418 printk("md: data_size too small on %s\n", 1419 bdevname(rdev->bdev,b)); 1420 return -EINVAL; 1421 } 1422 if (sb->pad0 || 1423 sb->pad3[0] || 1424 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) 1425 /* Some padding is non-zero, might be a new feature */ 1426 return -EINVAL; 1427 1428 rdev->preferred_minor = 0xffff; 1429 rdev->data_offset = le64_to_cpu(sb->data_offset); 1430 rdev->new_data_offset = rdev->data_offset; 1431 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) && 1432 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) 1433 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); 1434 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1435 1436 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1437 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1438 if (rdev->sb_size & bmask) 1439 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1440 1441 if (minor_version 1442 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1443 return -EINVAL; 1444 if (minor_version 1445 && rdev->new_data_offset < sb_start + (rdev->sb_size/512)) 1446 return -EINVAL; 1447 1448 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1449 rdev->desc_nr = -1; 1450 else 1451 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1452 1453 if (!rdev->bb_page) { 1454 rdev->bb_page = alloc_page(GFP_KERNEL); 1455 if (!rdev->bb_page) 1456 return -ENOMEM; 1457 } 1458 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && 1459 rdev->badblocks.count == 0) { 1460 /* need to load the bad block list. 1461 * Currently we limit it to one page. 1462 */ 1463 s32 offset; 1464 sector_t bb_sector; 1465 u64 *bbp; 1466 int i; 1467 int sectors = le16_to_cpu(sb->bblog_size); 1468 if (sectors > (PAGE_SIZE / 512)) 1469 return -EINVAL; 1470 offset = le32_to_cpu(sb->bblog_offset); 1471 if (offset == 0) 1472 return -EINVAL; 1473 bb_sector = (long long)offset; 1474 if (!sync_page_io(rdev, bb_sector, sectors << 9, 1475 rdev->bb_page, READ, true)) 1476 return -EIO; 1477 bbp = (u64 *)page_address(rdev->bb_page); 1478 rdev->badblocks.shift = sb->bblog_shift; 1479 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { 1480 u64 bb = le64_to_cpu(*bbp); 1481 int count = bb & (0x3ff); 1482 u64 sector = bb >> 10; 1483 sector <<= sb->bblog_shift; 1484 count <<= sb->bblog_shift; 1485 if (bb + 1 == 0) 1486 break; 1487 if (md_set_badblocks(&rdev->badblocks, 1488 sector, count, 1) == 0) 1489 return -EINVAL; 1490 } 1491 } else if (sb->bblog_offset != 0) 1492 rdev->badblocks.shift = 0; 1493 1494 if (!refdev) { 1495 ret = 1; 1496 } else { 1497 __u64 ev1, ev2; 1498 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); 1499 1500 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1501 sb->level != refsb->level || 1502 sb->layout != refsb->layout || 1503 sb->chunksize != refsb->chunksize) { 1504 printk(KERN_WARNING "md: %s has strangely different" 1505 " superblock to %s\n", 1506 bdevname(rdev->bdev,b), 1507 bdevname(refdev->bdev,b2)); 1508 return -EINVAL; 1509 } 1510 ev1 = le64_to_cpu(sb->events); 1511 ev2 = le64_to_cpu(refsb->events); 1512 1513 if (ev1 > ev2) 1514 ret = 1; 1515 else 1516 ret = 0; 1517 } 1518 if (minor_version) { 1519 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); 1520 sectors -= rdev->data_offset; 1521 } else 1522 sectors = rdev->sb_start; 1523 if (sectors < le64_to_cpu(sb->data_size)) 1524 return -EINVAL; 1525 rdev->sectors = le64_to_cpu(sb->data_size); 1526 return ret; 1527 } 1528 1529 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) 1530 { 1531 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 1532 __u64 ev1 = le64_to_cpu(sb->events); 1533 1534 rdev->raid_disk = -1; 1535 clear_bit(Faulty, &rdev->flags); 1536 clear_bit(In_sync, &rdev->flags); 1537 clear_bit(Bitmap_sync, &rdev->flags); 1538 clear_bit(WriteMostly, &rdev->flags); 1539 1540 if (mddev->raid_disks == 0) { 1541 mddev->major_version = 1; 1542 mddev->patch_version = 0; 1543 mddev->external = 0; 1544 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1545 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1546 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1547 mddev->level = le32_to_cpu(sb->level); 1548 mddev->clevel[0] = 0; 1549 mddev->layout = le32_to_cpu(sb->layout); 1550 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1551 mddev->dev_sectors = le64_to_cpu(sb->size); 1552 mddev->events = ev1; 1553 mddev->bitmap_info.offset = 0; 1554 mddev->bitmap_info.space = 0; 1555 /* Default location for bitmap is 1K after superblock 1556 * using 3K - total of 4K 1557 */ 1558 mddev->bitmap_info.default_offset = 1024 >> 9; 1559 mddev->bitmap_info.default_space = (4096-1024) >> 9; 1560 mddev->reshape_backwards = 0; 1561 1562 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1563 memcpy(mddev->uuid, sb->set_uuid, 16); 1564 1565 mddev->max_disks = (4096-256)/2; 1566 1567 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1568 mddev->bitmap_info.file == NULL) { 1569 mddev->bitmap_info.offset = 1570 (__s32)le32_to_cpu(sb->bitmap_offset); 1571 /* Metadata doesn't record how much space is available. 1572 * For 1.0, we assume we can use up to the superblock 1573 * if before, else to 4K beyond superblock. 1574 * For others, assume no change is possible. 1575 */ 1576 if (mddev->minor_version > 0) 1577 mddev->bitmap_info.space = 0; 1578 else if (mddev->bitmap_info.offset > 0) 1579 mddev->bitmap_info.space = 1580 8 - mddev->bitmap_info.offset; 1581 else 1582 mddev->bitmap_info.space = 1583 -mddev->bitmap_info.offset; 1584 } 1585 1586 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1587 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1588 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1589 mddev->new_level = le32_to_cpu(sb->new_level); 1590 mddev->new_layout = le32_to_cpu(sb->new_layout); 1591 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1592 if (mddev->delta_disks < 0 || 1593 (mddev->delta_disks == 0 && 1594 (le32_to_cpu(sb->feature_map) 1595 & MD_FEATURE_RESHAPE_BACKWARDS))) 1596 mddev->reshape_backwards = 1; 1597 } else { 1598 mddev->reshape_position = MaxSector; 1599 mddev->delta_disks = 0; 1600 mddev->new_level = mddev->level; 1601 mddev->new_layout = mddev->layout; 1602 mddev->new_chunk_sectors = mddev->chunk_sectors; 1603 } 1604 1605 } else if (mddev->pers == NULL) { 1606 /* Insist of good event counter while assembling, except for 1607 * spares (which don't need an event count) */ 1608 ++ev1; 1609 if (rdev->desc_nr >= 0 && 1610 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1611 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe) 1612 if (ev1 < mddev->events) 1613 return -EINVAL; 1614 } else if (mddev->bitmap) { 1615 /* If adding to array with a bitmap, then we can accept an 1616 * older device, but not too old. 1617 */ 1618 if (ev1 < mddev->bitmap->events_cleared) 1619 return 0; 1620 if (ev1 < mddev->events) 1621 set_bit(Bitmap_sync, &rdev->flags); 1622 } else { 1623 if (ev1 < mddev->events) 1624 /* just a hot-add of a new device, leave raid_disk at -1 */ 1625 return 0; 1626 } 1627 if (mddev->level != LEVEL_MULTIPATH) { 1628 int role; 1629 if (rdev->desc_nr < 0 || 1630 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1631 role = 0xffff; 1632 rdev->desc_nr = -1; 1633 } else 1634 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1635 switch(role) { 1636 case 0xffff: /* spare */ 1637 break; 1638 case 0xfffe: /* faulty */ 1639 set_bit(Faulty, &rdev->flags); 1640 break; 1641 default: 1642 rdev->saved_raid_disk = role; 1643 if ((le32_to_cpu(sb->feature_map) & 1644 MD_FEATURE_RECOVERY_OFFSET)) { 1645 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1646 if (!(le32_to_cpu(sb->feature_map) & 1647 MD_FEATURE_RECOVERY_BITMAP)) 1648 rdev->saved_raid_disk = -1; 1649 } else 1650 set_bit(In_sync, &rdev->flags); 1651 rdev->raid_disk = role; 1652 break; 1653 } 1654 if (sb->devflags & WriteMostly1) 1655 set_bit(WriteMostly, &rdev->flags); 1656 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) 1657 set_bit(Replacement, &rdev->flags); 1658 } else /* MULTIPATH are always insync */ 1659 set_bit(In_sync, &rdev->flags); 1660 1661 return 0; 1662 } 1663 1664 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) 1665 { 1666 struct mdp_superblock_1 *sb; 1667 struct md_rdev *rdev2; 1668 int max_dev, i; 1669 /* make rdev->sb match mddev and rdev data. */ 1670 1671 sb = page_address(rdev->sb_page); 1672 1673 sb->feature_map = 0; 1674 sb->pad0 = 0; 1675 sb->recovery_offset = cpu_to_le64(0); 1676 memset(sb->pad3, 0, sizeof(sb->pad3)); 1677 1678 sb->utime = cpu_to_le64((__u64)mddev->utime); 1679 sb->events = cpu_to_le64(mddev->events); 1680 if (mddev->in_sync) 1681 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1682 else 1683 sb->resync_offset = cpu_to_le64(0); 1684 1685 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1686 1687 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1688 sb->size = cpu_to_le64(mddev->dev_sectors); 1689 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 1690 sb->level = cpu_to_le32(mddev->level); 1691 sb->layout = cpu_to_le32(mddev->layout); 1692 1693 if (test_bit(WriteMostly, &rdev->flags)) 1694 sb->devflags |= WriteMostly1; 1695 else 1696 sb->devflags &= ~WriteMostly1; 1697 sb->data_offset = cpu_to_le64(rdev->data_offset); 1698 sb->data_size = cpu_to_le64(rdev->sectors); 1699 1700 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1701 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 1702 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1703 } 1704 1705 if (rdev->raid_disk >= 0 && 1706 !test_bit(In_sync, &rdev->flags)) { 1707 sb->feature_map |= 1708 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1709 sb->recovery_offset = 1710 cpu_to_le64(rdev->recovery_offset); 1711 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) 1712 sb->feature_map |= 1713 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP); 1714 } 1715 if (test_bit(Replacement, &rdev->flags)) 1716 sb->feature_map |= 1717 cpu_to_le32(MD_FEATURE_REPLACEMENT); 1718 1719 if (mddev->reshape_position != MaxSector) { 1720 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1721 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1722 sb->new_layout = cpu_to_le32(mddev->new_layout); 1723 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1724 sb->new_level = cpu_to_le32(mddev->new_level); 1725 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 1726 if (mddev->delta_disks == 0 && 1727 mddev->reshape_backwards) 1728 sb->feature_map 1729 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS); 1730 if (rdev->new_data_offset != rdev->data_offset) { 1731 sb->feature_map 1732 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET); 1733 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset 1734 - rdev->data_offset)); 1735 } 1736 } 1737 1738 if (rdev->badblocks.count == 0) 1739 /* Nothing to do for bad blocks*/ ; 1740 else if (sb->bblog_offset == 0) 1741 /* Cannot record bad blocks on this device */ 1742 md_error(mddev, rdev); 1743 else { 1744 struct badblocks *bb = &rdev->badblocks; 1745 u64 *bbp = (u64 *)page_address(rdev->bb_page); 1746 u64 *p = bb->page; 1747 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); 1748 if (bb->changed) { 1749 unsigned seq; 1750 1751 retry: 1752 seq = read_seqbegin(&bb->lock); 1753 1754 memset(bbp, 0xff, PAGE_SIZE); 1755 1756 for (i = 0 ; i < bb->count ; i++) { 1757 u64 internal_bb = p[i]; 1758 u64 store_bb = ((BB_OFFSET(internal_bb) << 10) 1759 | BB_LEN(internal_bb)); 1760 bbp[i] = cpu_to_le64(store_bb); 1761 } 1762 bb->changed = 0; 1763 if (read_seqretry(&bb->lock, seq)) 1764 goto retry; 1765 1766 bb->sector = (rdev->sb_start + 1767 (int)le32_to_cpu(sb->bblog_offset)); 1768 bb->size = le16_to_cpu(sb->bblog_size); 1769 } 1770 } 1771 1772 max_dev = 0; 1773 rdev_for_each(rdev2, mddev) 1774 if (rdev2->desc_nr+1 > max_dev) 1775 max_dev = rdev2->desc_nr+1; 1776 1777 if (max_dev > le32_to_cpu(sb->max_dev)) { 1778 int bmask; 1779 sb->max_dev = cpu_to_le32(max_dev); 1780 rdev->sb_size = max_dev * 2 + 256; 1781 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1782 if (rdev->sb_size & bmask) 1783 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1784 } else 1785 max_dev = le32_to_cpu(sb->max_dev); 1786 1787 for (i=0; i<max_dev;i++) 1788 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1789 1790 rdev_for_each(rdev2, mddev) { 1791 i = rdev2->desc_nr; 1792 if (test_bit(Faulty, &rdev2->flags)) 1793 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1794 else if (test_bit(In_sync, &rdev2->flags)) 1795 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1796 else if (rdev2->raid_disk >= 0) 1797 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1798 else 1799 sb->dev_roles[i] = cpu_to_le16(0xffff); 1800 } 1801 1802 sb->sb_csum = calc_sb_1_csum(sb); 1803 } 1804 1805 static unsigned long long 1806 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1807 { 1808 struct mdp_superblock_1 *sb; 1809 sector_t max_sectors; 1810 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1811 return 0; /* component must fit device */ 1812 if (rdev->data_offset != rdev->new_data_offset) 1813 return 0; /* too confusing */ 1814 if (rdev->sb_start < rdev->data_offset) { 1815 /* minor versions 1 and 2; superblock before data */ 1816 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; 1817 max_sectors -= rdev->data_offset; 1818 if (!num_sectors || num_sectors > max_sectors) 1819 num_sectors = max_sectors; 1820 } else if (rdev->mddev->bitmap_info.offset) { 1821 /* minor version 0 with bitmap we can't move */ 1822 return 0; 1823 } else { 1824 /* minor version 0; superblock after data */ 1825 sector_t sb_start; 1826 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2; 1827 sb_start &= ~(sector_t)(4*2 - 1); 1828 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 1829 if (!num_sectors || num_sectors > max_sectors) 1830 num_sectors = max_sectors; 1831 rdev->sb_start = sb_start; 1832 } 1833 sb = page_address(rdev->sb_page); 1834 sb->data_size = cpu_to_le64(num_sectors); 1835 sb->super_offset = rdev->sb_start; 1836 sb->sb_csum = calc_sb_1_csum(sb); 1837 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1838 rdev->sb_page); 1839 md_super_wait(rdev->mddev); 1840 return num_sectors; 1841 1842 } 1843 1844 static int 1845 super_1_allow_new_offset(struct md_rdev *rdev, 1846 unsigned long long new_offset) 1847 { 1848 /* All necessary checks on new >= old have been done */ 1849 struct bitmap *bitmap; 1850 if (new_offset >= rdev->data_offset) 1851 return 1; 1852 1853 /* with 1.0 metadata, there is no metadata to tread on 1854 * so we can always move back */ 1855 if (rdev->mddev->minor_version == 0) 1856 return 1; 1857 1858 /* otherwise we must be sure not to step on 1859 * any metadata, so stay: 1860 * 36K beyond start of superblock 1861 * beyond end of badblocks 1862 * beyond write-intent bitmap 1863 */ 1864 if (rdev->sb_start + (32+4)*2 > new_offset) 1865 return 0; 1866 bitmap = rdev->mddev->bitmap; 1867 if (bitmap && !rdev->mddev->bitmap_info.file && 1868 rdev->sb_start + rdev->mddev->bitmap_info.offset + 1869 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset) 1870 return 0; 1871 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) 1872 return 0; 1873 1874 return 1; 1875 } 1876 1877 static struct super_type super_types[] = { 1878 [0] = { 1879 .name = "0.90.0", 1880 .owner = THIS_MODULE, 1881 .load_super = super_90_load, 1882 .validate_super = super_90_validate, 1883 .sync_super = super_90_sync, 1884 .rdev_size_change = super_90_rdev_size_change, 1885 .allow_new_offset = super_90_allow_new_offset, 1886 }, 1887 [1] = { 1888 .name = "md-1", 1889 .owner = THIS_MODULE, 1890 .load_super = super_1_load, 1891 .validate_super = super_1_validate, 1892 .sync_super = super_1_sync, 1893 .rdev_size_change = super_1_rdev_size_change, 1894 .allow_new_offset = super_1_allow_new_offset, 1895 }, 1896 }; 1897 1898 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) 1899 { 1900 if (mddev->sync_super) { 1901 mddev->sync_super(mddev, rdev); 1902 return; 1903 } 1904 1905 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); 1906 1907 super_types[mddev->major_version].sync_super(mddev, rdev); 1908 } 1909 1910 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) 1911 { 1912 struct md_rdev *rdev, *rdev2; 1913 1914 rcu_read_lock(); 1915 rdev_for_each_rcu(rdev, mddev1) 1916 rdev_for_each_rcu(rdev2, mddev2) 1917 if (rdev->bdev->bd_contains == 1918 rdev2->bdev->bd_contains) { 1919 rcu_read_unlock(); 1920 return 1; 1921 } 1922 rcu_read_unlock(); 1923 return 0; 1924 } 1925 1926 static LIST_HEAD(pending_raid_disks); 1927 1928 /* 1929 * Try to register data integrity profile for an mddev 1930 * 1931 * This is called when an array is started and after a disk has been kicked 1932 * from the array. It only succeeds if all working and active component devices 1933 * are integrity capable with matching profiles. 1934 */ 1935 int md_integrity_register(struct mddev *mddev) 1936 { 1937 struct md_rdev *rdev, *reference = NULL; 1938 1939 if (list_empty(&mddev->disks)) 1940 return 0; /* nothing to do */ 1941 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) 1942 return 0; /* shouldn't register, or already is */ 1943 rdev_for_each(rdev, mddev) { 1944 /* skip spares and non-functional disks */ 1945 if (test_bit(Faulty, &rdev->flags)) 1946 continue; 1947 if (rdev->raid_disk < 0) 1948 continue; 1949 if (!reference) { 1950 /* Use the first rdev as the reference */ 1951 reference = rdev; 1952 continue; 1953 } 1954 /* does this rdev's profile match the reference profile? */ 1955 if (blk_integrity_compare(reference->bdev->bd_disk, 1956 rdev->bdev->bd_disk) < 0) 1957 return -EINVAL; 1958 } 1959 if (!reference || !bdev_get_integrity(reference->bdev)) 1960 return 0; 1961 /* 1962 * All component devices are integrity capable and have matching 1963 * profiles, register the common profile for the md device. 1964 */ 1965 if (blk_integrity_register(mddev->gendisk, 1966 bdev_get_integrity(reference->bdev)) != 0) { 1967 printk(KERN_ERR "md: failed to register integrity for %s\n", 1968 mdname(mddev)); 1969 return -EINVAL; 1970 } 1971 printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev)); 1972 if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) { 1973 printk(KERN_ERR "md: failed to create integrity pool for %s\n", 1974 mdname(mddev)); 1975 return -EINVAL; 1976 } 1977 return 0; 1978 } 1979 EXPORT_SYMBOL(md_integrity_register); 1980 1981 /* Disable data integrity if non-capable/non-matching disk is being added */ 1982 void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) 1983 { 1984 struct blk_integrity *bi_rdev; 1985 struct blk_integrity *bi_mddev; 1986 1987 if (!mddev->gendisk) 1988 return; 1989 1990 bi_rdev = bdev_get_integrity(rdev->bdev); 1991 bi_mddev = blk_get_integrity(mddev->gendisk); 1992 1993 if (!bi_mddev) /* nothing to do */ 1994 return; 1995 if (rdev->raid_disk < 0) /* skip spares */ 1996 return; 1997 if (bi_rdev && blk_integrity_compare(mddev->gendisk, 1998 rdev->bdev->bd_disk) >= 0) 1999 return; 2000 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev)); 2001 blk_integrity_unregister(mddev->gendisk); 2002 } 2003 EXPORT_SYMBOL(md_integrity_add_rdev); 2004 2005 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) 2006 { 2007 char b[BDEVNAME_SIZE]; 2008 struct kobject *ko; 2009 int err; 2010 2011 /* prevent duplicates */ 2012 if (find_rdev(mddev, rdev->bdev->bd_dev)) 2013 return -EEXIST; 2014 2015 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 2016 if (rdev->sectors && (mddev->dev_sectors == 0 || 2017 rdev->sectors < mddev->dev_sectors)) { 2018 if (mddev->pers) { 2019 /* Cannot change size, so fail 2020 * If mddev->level <= 0, then we don't care 2021 * about aligning sizes (e.g. linear) 2022 */ 2023 if (mddev->level > 0) 2024 return -ENOSPC; 2025 } else 2026 mddev->dev_sectors = rdev->sectors; 2027 } 2028 2029 /* Verify rdev->desc_nr is unique. 2030 * If it is -1, assign a free number, else 2031 * check number is not in use 2032 */ 2033 rcu_read_lock(); 2034 if (rdev->desc_nr < 0) { 2035 int choice = 0; 2036 if (mddev->pers) 2037 choice = mddev->raid_disks; 2038 while (md_find_rdev_nr_rcu(mddev, choice)) 2039 choice++; 2040 rdev->desc_nr = choice; 2041 } else { 2042 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { 2043 rcu_read_unlock(); 2044 return -EBUSY; 2045 } 2046 } 2047 rcu_read_unlock(); 2048 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 2049 printk(KERN_WARNING "md: %s: array is limited to %d devices\n", 2050 mdname(mddev), mddev->max_disks); 2051 return -EBUSY; 2052 } 2053 bdevname(rdev->bdev,b); 2054 strreplace(b, '/', '!'); 2055 2056 rdev->mddev = mddev; 2057 printk(KERN_INFO "md: bind<%s>\n", b); 2058 2059 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 2060 goto fail; 2061 2062 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 2063 if (sysfs_create_link(&rdev->kobj, ko, "block")) 2064 /* failure here is OK */; 2065 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 2066 2067 list_add_rcu(&rdev->same_set, &mddev->disks); 2068 bd_link_disk_holder(rdev->bdev, mddev->gendisk); 2069 2070 /* May as well allow recovery to be retried once */ 2071 mddev->recovery_disabled++; 2072 2073 return 0; 2074 2075 fail: 2076 printk(KERN_WARNING "md: failed to register dev-%s for %s\n", 2077 b, mdname(mddev)); 2078 return err; 2079 } 2080 2081 static void md_delayed_delete(struct work_struct *ws) 2082 { 2083 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); 2084 kobject_del(&rdev->kobj); 2085 kobject_put(&rdev->kobj); 2086 } 2087 2088 static void unbind_rdev_from_array(struct md_rdev *rdev) 2089 { 2090 char b[BDEVNAME_SIZE]; 2091 2092 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); 2093 list_del_rcu(&rdev->same_set); 2094 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 2095 rdev->mddev = NULL; 2096 sysfs_remove_link(&rdev->kobj, "block"); 2097 sysfs_put(rdev->sysfs_state); 2098 rdev->sysfs_state = NULL; 2099 rdev->badblocks.count = 0; 2100 /* We need to delay this, otherwise we can deadlock when 2101 * writing to 'remove' to "dev/state". We also need 2102 * to delay it due to rcu usage. 2103 */ 2104 synchronize_rcu(); 2105 INIT_WORK(&rdev->del_work, md_delayed_delete); 2106 kobject_get(&rdev->kobj); 2107 queue_work(md_misc_wq, &rdev->del_work); 2108 } 2109 2110 /* 2111 * prevent the device from being mounted, repartitioned or 2112 * otherwise reused by a RAID array (or any other kernel 2113 * subsystem), by bd_claiming the device. 2114 */ 2115 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) 2116 { 2117 int err = 0; 2118 struct block_device *bdev; 2119 char b[BDEVNAME_SIZE]; 2120 2121 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2122 shared ? (struct md_rdev *)lock_rdev : rdev); 2123 if (IS_ERR(bdev)) { 2124 printk(KERN_ERR "md: could not open %s.\n", 2125 __bdevname(dev, b)); 2126 return PTR_ERR(bdev); 2127 } 2128 rdev->bdev = bdev; 2129 return err; 2130 } 2131 2132 static void unlock_rdev(struct md_rdev *rdev) 2133 { 2134 struct block_device *bdev = rdev->bdev; 2135 rdev->bdev = NULL; 2136 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2137 } 2138 2139 void md_autodetect_dev(dev_t dev); 2140 2141 static void export_rdev(struct md_rdev *rdev) 2142 { 2143 char b[BDEVNAME_SIZE]; 2144 2145 printk(KERN_INFO "md: export_rdev(%s)\n", 2146 bdevname(rdev->bdev,b)); 2147 md_rdev_clear(rdev); 2148 #ifndef MODULE 2149 if (test_bit(AutoDetected, &rdev->flags)) 2150 md_autodetect_dev(rdev->bdev->bd_dev); 2151 #endif 2152 unlock_rdev(rdev); 2153 kobject_put(&rdev->kobj); 2154 } 2155 2156 void md_kick_rdev_from_array(struct md_rdev *rdev) 2157 { 2158 unbind_rdev_from_array(rdev); 2159 export_rdev(rdev); 2160 } 2161 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array); 2162 2163 static void export_array(struct mddev *mddev) 2164 { 2165 struct md_rdev *rdev; 2166 2167 while (!list_empty(&mddev->disks)) { 2168 rdev = list_first_entry(&mddev->disks, struct md_rdev, 2169 same_set); 2170 md_kick_rdev_from_array(rdev); 2171 } 2172 mddev->raid_disks = 0; 2173 mddev->major_version = 0; 2174 } 2175 2176 static void sync_sbs(struct mddev *mddev, int nospares) 2177 { 2178 /* Update each superblock (in-memory image), but 2179 * if we are allowed to, skip spares which already 2180 * have the right event counter, or have one earlier 2181 * (which would mean they aren't being marked as dirty 2182 * with the rest of the array) 2183 */ 2184 struct md_rdev *rdev; 2185 rdev_for_each(rdev, mddev) { 2186 if (rdev->sb_events == mddev->events || 2187 (nospares && 2188 rdev->raid_disk < 0 && 2189 rdev->sb_events+1 == mddev->events)) { 2190 /* Don't update this superblock */ 2191 rdev->sb_loaded = 2; 2192 } else { 2193 sync_super(mddev, rdev); 2194 rdev->sb_loaded = 1; 2195 } 2196 } 2197 } 2198 2199 void md_update_sb(struct mddev *mddev, int force_change) 2200 { 2201 struct md_rdev *rdev; 2202 int sync_req; 2203 int nospares = 0; 2204 int any_badblocks_changed = 0; 2205 2206 if (mddev->ro) { 2207 if (force_change) 2208 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2209 return; 2210 } 2211 repeat: 2212 /* First make sure individual recovery_offsets are correct */ 2213 rdev_for_each(rdev, mddev) { 2214 if (rdev->raid_disk >= 0 && 2215 mddev->delta_disks >= 0 && 2216 !test_bit(In_sync, &rdev->flags) && 2217 mddev->curr_resync_completed > rdev->recovery_offset) 2218 rdev->recovery_offset = mddev->curr_resync_completed; 2219 2220 } 2221 if (!mddev->persistent) { 2222 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); 2223 clear_bit(MD_CHANGE_DEVS, &mddev->flags); 2224 if (!mddev->external) { 2225 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2226 rdev_for_each(rdev, mddev) { 2227 if (rdev->badblocks.changed) { 2228 rdev->badblocks.changed = 0; 2229 md_ack_all_badblocks(&rdev->badblocks); 2230 md_error(mddev, rdev); 2231 } 2232 clear_bit(Blocked, &rdev->flags); 2233 clear_bit(BlockedBadBlocks, &rdev->flags); 2234 wake_up(&rdev->blocked_wait); 2235 } 2236 } 2237 wake_up(&mddev->sb_wait); 2238 return; 2239 } 2240 2241 spin_lock(&mddev->lock); 2242 2243 mddev->utime = get_seconds(); 2244 2245 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 2246 force_change = 1; 2247 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 2248 /* just a clean<-> dirty transition, possibly leave spares alone, 2249 * though if events isn't the right even/odd, we will have to do 2250 * spares after all 2251 */ 2252 nospares = 1; 2253 if (force_change) 2254 nospares = 0; 2255 if (mddev->degraded) 2256 /* If the array is degraded, then skipping spares is both 2257 * dangerous and fairly pointless. 2258 * Dangerous because a device that was removed from the array 2259 * might have a event_count that still looks up-to-date, 2260 * so it can be re-added without a resync. 2261 * Pointless because if there are any spares to skip, 2262 * then a recovery will happen and soon that array won't 2263 * be degraded any more and the spare can go back to sleep then. 2264 */ 2265 nospares = 0; 2266 2267 sync_req = mddev->in_sync; 2268 2269 /* If this is just a dirty<->clean transition, and the array is clean 2270 * and 'events' is odd, we can roll back to the previous clean state */ 2271 if (nospares 2272 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2273 && mddev->can_decrease_events 2274 && mddev->events != 1) { 2275 mddev->events--; 2276 mddev->can_decrease_events = 0; 2277 } else { 2278 /* otherwise we have to go forward and ... */ 2279 mddev->events ++; 2280 mddev->can_decrease_events = nospares; 2281 } 2282 2283 /* 2284 * This 64-bit counter should never wrap. 2285 * Either we are in around ~1 trillion A.C., assuming 2286 * 1 reboot per second, or we have a bug... 2287 */ 2288 WARN_ON(mddev->events == 0); 2289 2290 rdev_for_each(rdev, mddev) { 2291 if (rdev->badblocks.changed) 2292 any_badblocks_changed++; 2293 if (test_bit(Faulty, &rdev->flags)) 2294 set_bit(FaultRecorded, &rdev->flags); 2295 } 2296 2297 sync_sbs(mddev, nospares); 2298 spin_unlock(&mddev->lock); 2299 2300 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", 2301 mdname(mddev), mddev->in_sync); 2302 2303 bitmap_update_sb(mddev->bitmap); 2304 rdev_for_each(rdev, mddev) { 2305 char b[BDEVNAME_SIZE]; 2306 2307 if (rdev->sb_loaded != 1) 2308 continue; /* no noise on spare devices */ 2309 2310 if (!test_bit(Faulty, &rdev->flags)) { 2311 md_super_write(mddev,rdev, 2312 rdev->sb_start, rdev->sb_size, 2313 rdev->sb_page); 2314 pr_debug("md: (write) %s's sb offset: %llu\n", 2315 bdevname(rdev->bdev, b), 2316 (unsigned long long)rdev->sb_start); 2317 rdev->sb_events = mddev->events; 2318 if (rdev->badblocks.size) { 2319 md_super_write(mddev, rdev, 2320 rdev->badblocks.sector, 2321 rdev->badblocks.size << 9, 2322 rdev->bb_page); 2323 rdev->badblocks.size = 0; 2324 } 2325 2326 } else 2327 pr_debug("md: %s (skipping faulty)\n", 2328 bdevname(rdev->bdev, b)); 2329 2330 if (mddev->level == LEVEL_MULTIPATH) 2331 /* only need to write one superblock... */ 2332 break; 2333 } 2334 md_super_wait(mddev); 2335 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ 2336 2337 spin_lock(&mddev->lock); 2338 if (mddev->in_sync != sync_req || 2339 test_bit(MD_CHANGE_DEVS, &mddev->flags)) { 2340 /* have to write it out again */ 2341 spin_unlock(&mddev->lock); 2342 goto repeat; 2343 } 2344 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2345 spin_unlock(&mddev->lock); 2346 wake_up(&mddev->sb_wait); 2347 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2348 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2349 2350 rdev_for_each(rdev, mddev) { 2351 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) 2352 clear_bit(Blocked, &rdev->flags); 2353 2354 if (any_badblocks_changed) 2355 md_ack_all_badblocks(&rdev->badblocks); 2356 clear_bit(BlockedBadBlocks, &rdev->flags); 2357 wake_up(&rdev->blocked_wait); 2358 } 2359 } 2360 EXPORT_SYMBOL(md_update_sb); 2361 2362 static int add_bound_rdev(struct md_rdev *rdev) 2363 { 2364 struct mddev *mddev = rdev->mddev; 2365 int err = 0; 2366 2367 if (!mddev->pers->hot_remove_disk) { 2368 /* If there is hot_add_disk but no hot_remove_disk 2369 * then added disks for geometry changes, 2370 * and should be added immediately. 2371 */ 2372 super_types[mddev->major_version]. 2373 validate_super(mddev, rdev); 2374 err = mddev->pers->hot_add_disk(mddev, rdev); 2375 if (err) { 2376 unbind_rdev_from_array(rdev); 2377 export_rdev(rdev); 2378 return err; 2379 } 2380 } 2381 sysfs_notify_dirent_safe(rdev->sysfs_state); 2382 2383 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2384 if (mddev->degraded) 2385 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 2386 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2387 md_new_event(mddev); 2388 md_wakeup_thread(mddev->thread); 2389 return 0; 2390 } 2391 2392 /* words written to sysfs files may, or may not, be \n terminated. 2393 * We want to accept with case. For this we use cmd_match. 2394 */ 2395 static int cmd_match(const char *cmd, const char *str) 2396 { 2397 /* See if cmd, written into a sysfs file, matches 2398 * str. They must either be the same, or cmd can 2399 * have a trailing newline 2400 */ 2401 while (*cmd && *str && *cmd == *str) { 2402 cmd++; 2403 str++; 2404 } 2405 if (*cmd == '\n') 2406 cmd++; 2407 if (*str || *cmd) 2408 return 0; 2409 return 1; 2410 } 2411 2412 struct rdev_sysfs_entry { 2413 struct attribute attr; 2414 ssize_t (*show)(struct md_rdev *, char *); 2415 ssize_t (*store)(struct md_rdev *, const char *, size_t); 2416 }; 2417 2418 static ssize_t 2419 state_show(struct md_rdev *rdev, char *page) 2420 { 2421 char *sep = ""; 2422 size_t len = 0; 2423 unsigned long flags = ACCESS_ONCE(rdev->flags); 2424 2425 if (test_bit(Faulty, &flags) || 2426 rdev->badblocks.unacked_exist) { 2427 len+= sprintf(page+len, "%sfaulty",sep); 2428 sep = ","; 2429 } 2430 if (test_bit(In_sync, &flags)) { 2431 len += sprintf(page+len, "%sin_sync",sep); 2432 sep = ","; 2433 } 2434 if (test_bit(WriteMostly, &flags)) { 2435 len += sprintf(page+len, "%swrite_mostly",sep); 2436 sep = ","; 2437 } 2438 if (test_bit(Blocked, &flags) || 2439 (rdev->badblocks.unacked_exist 2440 && !test_bit(Faulty, &flags))) { 2441 len += sprintf(page+len, "%sblocked", sep); 2442 sep = ","; 2443 } 2444 if (!test_bit(Faulty, &flags) && 2445 !test_bit(In_sync, &flags)) { 2446 len += sprintf(page+len, "%sspare", sep); 2447 sep = ","; 2448 } 2449 if (test_bit(WriteErrorSeen, &flags)) { 2450 len += sprintf(page+len, "%swrite_error", sep); 2451 sep = ","; 2452 } 2453 if (test_bit(WantReplacement, &flags)) { 2454 len += sprintf(page+len, "%swant_replacement", sep); 2455 sep = ","; 2456 } 2457 if (test_bit(Replacement, &flags)) { 2458 len += sprintf(page+len, "%sreplacement", sep); 2459 sep = ","; 2460 } 2461 2462 return len+sprintf(page+len, "\n"); 2463 } 2464 2465 static ssize_t 2466 state_store(struct md_rdev *rdev, const char *buf, size_t len) 2467 { 2468 /* can write 2469 * faulty - simulates an error 2470 * remove - disconnects the device 2471 * writemostly - sets write_mostly 2472 * -writemostly - clears write_mostly 2473 * blocked - sets the Blocked flags 2474 * -blocked - clears the Blocked and possibly simulates an error 2475 * insync - sets Insync providing device isn't active 2476 * -insync - clear Insync for a device with a slot assigned, 2477 * so that it gets rebuilt based on bitmap 2478 * write_error - sets WriteErrorSeen 2479 * -write_error - clears WriteErrorSeen 2480 */ 2481 int err = -EINVAL; 2482 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2483 md_error(rdev->mddev, rdev); 2484 if (test_bit(Faulty, &rdev->flags)) 2485 err = 0; 2486 else 2487 err = -EBUSY; 2488 } else if (cmd_match(buf, "remove")) { 2489 if (rdev->raid_disk >= 0) 2490 err = -EBUSY; 2491 else { 2492 struct mddev *mddev = rdev->mddev; 2493 if (mddev_is_clustered(mddev)) 2494 md_cluster_ops->remove_disk(mddev, rdev); 2495 md_kick_rdev_from_array(rdev); 2496 if (mddev_is_clustered(mddev)) 2497 md_cluster_ops->metadata_update_start(mddev); 2498 if (mddev->pers) 2499 md_update_sb(mddev, 1); 2500 md_new_event(mddev); 2501 if (mddev_is_clustered(mddev)) 2502 md_cluster_ops->metadata_update_finish(mddev); 2503 err = 0; 2504 } 2505 } else if (cmd_match(buf, "writemostly")) { 2506 set_bit(WriteMostly, &rdev->flags); 2507 err = 0; 2508 } else if (cmd_match(buf, "-writemostly")) { 2509 clear_bit(WriteMostly, &rdev->flags); 2510 err = 0; 2511 } else if (cmd_match(buf, "blocked")) { 2512 set_bit(Blocked, &rdev->flags); 2513 err = 0; 2514 } else if (cmd_match(buf, "-blocked")) { 2515 if (!test_bit(Faulty, &rdev->flags) && 2516 rdev->badblocks.unacked_exist) { 2517 /* metadata handler doesn't understand badblocks, 2518 * so we need to fail the device 2519 */ 2520 md_error(rdev->mddev, rdev); 2521 } 2522 clear_bit(Blocked, &rdev->flags); 2523 clear_bit(BlockedBadBlocks, &rdev->flags); 2524 wake_up(&rdev->blocked_wait); 2525 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2526 md_wakeup_thread(rdev->mddev->thread); 2527 2528 err = 0; 2529 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2530 set_bit(In_sync, &rdev->flags); 2531 err = 0; 2532 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) { 2533 if (rdev->mddev->pers == NULL) { 2534 clear_bit(In_sync, &rdev->flags); 2535 rdev->saved_raid_disk = rdev->raid_disk; 2536 rdev->raid_disk = -1; 2537 err = 0; 2538 } 2539 } else if (cmd_match(buf, "write_error")) { 2540 set_bit(WriteErrorSeen, &rdev->flags); 2541 err = 0; 2542 } else if (cmd_match(buf, "-write_error")) { 2543 clear_bit(WriteErrorSeen, &rdev->flags); 2544 err = 0; 2545 } else if (cmd_match(buf, "want_replacement")) { 2546 /* Any non-spare device that is not a replacement can 2547 * become want_replacement at any time, but we then need to 2548 * check if recovery is needed. 2549 */ 2550 if (rdev->raid_disk >= 0 && 2551 !test_bit(Replacement, &rdev->flags)) 2552 set_bit(WantReplacement, &rdev->flags); 2553 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2554 md_wakeup_thread(rdev->mddev->thread); 2555 err = 0; 2556 } else if (cmd_match(buf, "-want_replacement")) { 2557 /* Clearing 'want_replacement' is always allowed. 2558 * Once replacements starts it is too late though. 2559 */ 2560 err = 0; 2561 clear_bit(WantReplacement, &rdev->flags); 2562 } else if (cmd_match(buf, "replacement")) { 2563 /* Can only set a device as a replacement when array has not 2564 * yet been started. Once running, replacement is automatic 2565 * from spares, or by assigning 'slot'. 2566 */ 2567 if (rdev->mddev->pers) 2568 err = -EBUSY; 2569 else { 2570 set_bit(Replacement, &rdev->flags); 2571 err = 0; 2572 } 2573 } else if (cmd_match(buf, "-replacement")) { 2574 /* Similarly, can only clear Replacement before start */ 2575 if (rdev->mddev->pers) 2576 err = -EBUSY; 2577 else { 2578 clear_bit(Replacement, &rdev->flags); 2579 err = 0; 2580 } 2581 } else if (cmd_match(buf, "re-add")) { 2582 if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) { 2583 /* clear_bit is performed _after_ all the devices 2584 * have their local Faulty bit cleared. If any writes 2585 * happen in the meantime in the local node, they 2586 * will land in the local bitmap, which will be synced 2587 * by this node eventually 2588 */ 2589 if (!mddev_is_clustered(rdev->mddev) || 2590 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) { 2591 clear_bit(Faulty, &rdev->flags); 2592 err = add_bound_rdev(rdev); 2593 } 2594 } else 2595 err = -EBUSY; 2596 } 2597 if (!err) 2598 sysfs_notify_dirent_safe(rdev->sysfs_state); 2599 return err ? err : len; 2600 } 2601 static struct rdev_sysfs_entry rdev_state = 2602 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store); 2603 2604 static ssize_t 2605 errors_show(struct md_rdev *rdev, char *page) 2606 { 2607 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 2608 } 2609 2610 static ssize_t 2611 errors_store(struct md_rdev *rdev, const char *buf, size_t len) 2612 { 2613 unsigned int n; 2614 int rv; 2615 2616 rv = kstrtouint(buf, 10, &n); 2617 if (rv < 0) 2618 return rv; 2619 atomic_set(&rdev->corrected_errors, n); 2620 return len; 2621 } 2622 static struct rdev_sysfs_entry rdev_errors = 2623 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 2624 2625 static ssize_t 2626 slot_show(struct md_rdev *rdev, char *page) 2627 { 2628 if (rdev->raid_disk < 0) 2629 return sprintf(page, "none\n"); 2630 else 2631 return sprintf(page, "%d\n", rdev->raid_disk); 2632 } 2633 2634 static ssize_t 2635 slot_store(struct md_rdev *rdev, const char *buf, size_t len) 2636 { 2637 int slot; 2638 int err; 2639 2640 if (strncmp(buf, "none", 4)==0) 2641 slot = -1; 2642 else { 2643 err = kstrtouint(buf, 10, (unsigned int *)&slot); 2644 if (err < 0) 2645 return err; 2646 } 2647 if (rdev->mddev->pers && slot == -1) { 2648 /* Setting 'slot' on an active array requires also 2649 * updating the 'rd%d' link, and communicating 2650 * with the personality with ->hot_*_disk. 2651 * For now we only support removing 2652 * failed/spare devices. This normally happens automatically, 2653 * but not when the metadata is externally managed. 2654 */ 2655 if (rdev->raid_disk == -1) 2656 return -EEXIST; 2657 /* personality does all needed checks */ 2658 if (rdev->mddev->pers->hot_remove_disk == NULL) 2659 return -EINVAL; 2660 clear_bit(Blocked, &rdev->flags); 2661 remove_and_add_spares(rdev->mddev, rdev); 2662 if (rdev->raid_disk >= 0) 2663 return -EBUSY; 2664 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2665 md_wakeup_thread(rdev->mddev->thread); 2666 } else if (rdev->mddev->pers) { 2667 /* Activating a spare .. or possibly reactivating 2668 * if we ever get bitmaps working here. 2669 */ 2670 2671 if (rdev->raid_disk != -1) 2672 return -EBUSY; 2673 2674 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) 2675 return -EBUSY; 2676 2677 if (rdev->mddev->pers->hot_add_disk == NULL) 2678 return -EINVAL; 2679 2680 if (slot >= rdev->mddev->raid_disks && 2681 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 2682 return -ENOSPC; 2683 2684 rdev->raid_disk = slot; 2685 if (test_bit(In_sync, &rdev->flags)) 2686 rdev->saved_raid_disk = slot; 2687 else 2688 rdev->saved_raid_disk = -1; 2689 clear_bit(In_sync, &rdev->flags); 2690 clear_bit(Bitmap_sync, &rdev->flags); 2691 err = rdev->mddev->pers-> 2692 hot_add_disk(rdev->mddev, rdev); 2693 if (err) { 2694 rdev->raid_disk = -1; 2695 return err; 2696 } else 2697 sysfs_notify_dirent_safe(rdev->sysfs_state); 2698 if (sysfs_link_rdev(rdev->mddev, rdev)) 2699 /* failure here is OK */; 2700 /* don't wakeup anyone, leave that to userspace. */ 2701 } else { 2702 if (slot >= rdev->mddev->raid_disks && 2703 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 2704 return -ENOSPC; 2705 rdev->raid_disk = slot; 2706 /* assume it is working */ 2707 clear_bit(Faulty, &rdev->flags); 2708 clear_bit(WriteMostly, &rdev->flags); 2709 set_bit(In_sync, &rdev->flags); 2710 sysfs_notify_dirent_safe(rdev->sysfs_state); 2711 } 2712 return len; 2713 } 2714 2715 static struct rdev_sysfs_entry rdev_slot = 2716 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 2717 2718 static ssize_t 2719 offset_show(struct md_rdev *rdev, char *page) 2720 { 2721 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 2722 } 2723 2724 static ssize_t 2725 offset_store(struct md_rdev *rdev, const char *buf, size_t len) 2726 { 2727 unsigned long long offset; 2728 if (kstrtoull(buf, 10, &offset) < 0) 2729 return -EINVAL; 2730 if (rdev->mddev->pers && rdev->raid_disk >= 0) 2731 return -EBUSY; 2732 if (rdev->sectors && rdev->mddev->external) 2733 /* Must set offset before size, so overlap checks 2734 * can be sane */ 2735 return -EBUSY; 2736 rdev->data_offset = offset; 2737 rdev->new_data_offset = offset; 2738 return len; 2739 } 2740 2741 static struct rdev_sysfs_entry rdev_offset = 2742 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 2743 2744 static ssize_t new_offset_show(struct md_rdev *rdev, char *page) 2745 { 2746 return sprintf(page, "%llu\n", 2747 (unsigned long long)rdev->new_data_offset); 2748 } 2749 2750 static ssize_t new_offset_store(struct md_rdev *rdev, 2751 const char *buf, size_t len) 2752 { 2753 unsigned long long new_offset; 2754 struct mddev *mddev = rdev->mddev; 2755 2756 if (kstrtoull(buf, 10, &new_offset) < 0) 2757 return -EINVAL; 2758 2759 if (mddev->sync_thread || 2760 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) 2761 return -EBUSY; 2762 if (new_offset == rdev->data_offset) 2763 /* reset is always permitted */ 2764 ; 2765 else if (new_offset > rdev->data_offset) { 2766 /* must not push array size beyond rdev_sectors */ 2767 if (new_offset - rdev->data_offset 2768 + mddev->dev_sectors > rdev->sectors) 2769 return -E2BIG; 2770 } 2771 /* Metadata worries about other space details. */ 2772 2773 /* decreasing the offset is inconsistent with a backwards 2774 * reshape. 2775 */ 2776 if (new_offset < rdev->data_offset && 2777 mddev->reshape_backwards) 2778 return -EINVAL; 2779 /* Increasing offset is inconsistent with forwards 2780 * reshape. reshape_direction should be set to 2781 * 'backwards' first. 2782 */ 2783 if (new_offset > rdev->data_offset && 2784 !mddev->reshape_backwards) 2785 return -EINVAL; 2786 2787 if (mddev->pers && mddev->persistent && 2788 !super_types[mddev->major_version] 2789 .allow_new_offset(rdev, new_offset)) 2790 return -E2BIG; 2791 rdev->new_data_offset = new_offset; 2792 if (new_offset > rdev->data_offset) 2793 mddev->reshape_backwards = 1; 2794 else if (new_offset < rdev->data_offset) 2795 mddev->reshape_backwards = 0; 2796 2797 return len; 2798 } 2799 static struct rdev_sysfs_entry rdev_new_offset = 2800 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store); 2801 2802 static ssize_t 2803 rdev_size_show(struct md_rdev *rdev, char *page) 2804 { 2805 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 2806 } 2807 2808 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 2809 { 2810 /* check if two start/length pairs overlap */ 2811 if (s1+l1 <= s2) 2812 return 0; 2813 if (s2+l2 <= s1) 2814 return 0; 2815 return 1; 2816 } 2817 2818 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 2819 { 2820 unsigned long long blocks; 2821 sector_t new; 2822 2823 if (kstrtoull(buf, 10, &blocks) < 0) 2824 return -EINVAL; 2825 2826 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 2827 return -EINVAL; /* sector conversion overflow */ 2828 2829 new = blocks * 2; 2830 if (new != blocks * 2) 2831 return -EINVAL; /* unsigned long long to sector_t overflow */ 2832 2833 *sectors = new; 2834 return 0; 2835 } 2836 2837 static ssize_t 2838 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) 2839 { 2840 struct mddev *my_mddev = rdev->mddev; 2841 sector_t oldsectors = rdev->sectors; 2842 sector_t sectors; 2843 2844 if (strict_blocks_to_sectors(buf, §ors) < 0) 2845 return -EINVAL; 2846 if (rdev->data_offset != rdev->new_data_offset) 2847 return -EINVAL; /* too confusing */ 2848 if (my_mddev->pers && rdev->raid_disk >= 0) { 2849 if (my_mddev->persistent) { 2850 sectors = super_types[my_mddev->major_version]. 2851 rdev_size_change(rdev, sectors); 2852 if (!sectors) 2853 return -EBUSY; 2854 } else if (!sectors) 2855 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 2856 rdev->data_offset; 2857 if (!my_mddev->pers->resize) 2858 /* Cannot change size for RAID0 or Linear etc */ 2859 return -EINVAL; 2860 } 2861 if (sectors < my_mddev->dev_sectors) 2862 return -EINVAL; /* component must fit device */ 2863 2864 rdev->sectors = sectors; 2865 if (sectors > oldsectors && my_mddev->external) { 2866 /* Need to check that all other rdevs with the same 2867 * ->bdev do not overlap. 'rcu' is sufficient to walk 2868 * the rdev lists safely. 2869 * This check does not provide a hard guarantee, it 2870 * just helps avoid dangerous mistakes. 2871 */ 2872 struct mddev *mddev; 2873 int overlap = 0; 2874 struct list_head *tmp; 2875 2876 rcu_read_lock(); 2877 for_each_mddev(mddev, tmp) { 2878 struct md_rdev *rdev2; 2879 2880 rdev_for_each(rdev2, mddev) 2881 if (rdev->bdev == rdev2->bdev && 2882 rdev != rdev2 && 2883 overlaps(rdev->data_offset, rdev->sectors, 2884 rdev2->data_offset, 2885 rdev2->sectors)) { 2886 overlap = 1; 2887 break; 2888 } 2889 if (overlap) { 2890 mddev_put(mddev); 2891 break; 2892 } 2893 } 2894 rcu_read_unlock(); 2895 if (overlap) { 2896 /* Someone else could have slipped in a size 2897 * change here, but doing so is just silly. 2898 * We put oldsectors back because we *know* it is 2899 * safe, and trust userspace not to race with 2900 * itself 2901 */ 2902 rdev->sectors = oldsectors; 2903 return -EBUSY; 2904 } 2905 } 2906 return len; 2907 } 2908 2909 static struct rdev_sysfs_entry rdev_size = 2910 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 2911 2912 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) 2913 { 2914 unsigned long long recovery_start = rdev->recovery_offset; 2915 2916 if (test_bit(In_sync, &rdev->flags) || 2917 recovery_start == MaxSector) 2918 return sprintf(page, "none\n"); 2919 2920 return sprintf(page, "%llu\n", recovery_start); 2921 } 2922 2923 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) 2924 { 2925 unsigned long long recovery_start; 2926 2927 if (cmd_match(buf, "none")) 2928 recovery_start = MaxSector; 2929 else if (kstrtoull(buf, 10, &recovery_start)) 2930 return -EINVAL; 2931 2932 if (rdev->mddev->pers && 2933 rdev->raid_disk >= 0) 2934 return -EBUSY; 2935 2936 rdev->recovery_offset = recovery_start; 2937 if (recovery_start == MaxSector) 2938 set_bit(In_sync, &rdev->flags); 2939 else 2940 clear_bit(In_sync, &rdev->flags); 2941 return len; 2942 } 2943 2944 static struct rdev_sysfs_entry rdev_recovery_start = 2945 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 2946 2947 static ssize_t 2948 badblocks_show(struct badblocks *bb, char *page, int unack); 2949 static ssize_t 2950 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack); 2951 2952 static ssize_t bb_show(struct md_rdev *rdev, char *page) 2953 { 2954 return badblocks_show(&rdev->badblocks, page, 0); 2955 } 2956 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) 2957 { 2958 int rv = badblocks_store(&rdev->badblocks, page, len, 0); 2959 /* Maybe that ack was all we needed */ 2960 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) 2961 wake_up(&rdev->blocked_wait); 2962 return rv; 2963 } 2964 static struct rdev_sysfs_entry rdev_bad_blocks = 2965 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); 2966 2967 static ssize_t ubb_show(struct md_rdev *rdev, char *page) 2968 { 2969 return badblocks_show(&rdev->badblocks, page, 1); 2970 } 2971 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) 2972 { 2973 return badblocks_store(&rdev->badblocks, page, len, 1); 2974 } 2975 static struct rdev_sysfs_entry rdev_unack_bad_blocks = 2976 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store); 2977 2978 static struct attribute *rdev_default_attrs[] = { 2979 &rdev_state.attr, 2980 &rdev_errors.attr, 2981 &rdev_slot.attr, 2982 &rdev_offset.attr, 2983 &rdev_new_offset.attr, 2984 &rdev_size.attr, 2985 &rdev_recovery_start.attr, 2986 &rdev_bad_blocks.attr, 2987 &rdev_unack_bad_blocks.attr, 2988 NULL, 2989 }; 2990 static ssize_t 2991 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 2992 { 2993 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2994 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 2995 2996 if (!entry->show) 2997 return -EIO; 2998 if (!rdev->mddev) 2999 return -EBUSY; 3000 return entry->show(rdev, page); 3001 } 3002 3003 static ssize_t 3004 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 3005 const char *page, size_t length) 3006 { 3007 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3008 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3009 ssize_t rv; 3010 struct mddev *mddev = rdev->mddev; 3011 3012 if (!entry->store) 3013 return -EIO; 3014 if (!capable(CAP_SYS_ADMIN)) 3015 return -EACCES; 3016 rv = mddev ? mddev_lock(mddev): -EBUSY; 3017 if (!rv) { 3018 if (rdev->mddev == NULL) 3019 rv = -EBUSY; 3020 else 3021 rv = entry->store(rdev, page, length); 3022 mddev_unlock(mddev); 3023 } 3024 return rv; 3025 } 3026 3027 static void rdev_free(struct kobject *ko) 3028 { 3029 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); 3030 kfree(rdev); 3031 } 3032 static const struct sysfs_ops rdev_sysfs_ops = { 3033 .show = rdev_attr_show, 3034 .store = rdev_attr_store, 3035 }; 3036 static struct kobj_type rdev_ktype = { 3037 .release = rdev_free, 3038 .sysfs_ops = &rdev_sysfs_ops, 3039 .default_attrs = rdev_default_attrs, 3040 }; 3041 3042 int md_rdev_init(struct md_rdev *rdev) 3043 { 3044 rdev->desc_nr = -1; 3045 rdev->saved_raid_disk = -1; 3046 rdev->raid_disk = -1; 3047 rdev->flags = 0; 3048 rdev->data_offset = 0; 3049 rdev->new_data_offset = 0; 3050 rdev->sb_events = 0; 3051 rdev->last_read_error.tv_sec = 0; 3052 rdev->last_read_error.tv_nsec = 0; 3053 rdev->sb_loaded = 0; 3054 rdev->bb_page = NULL; 3055 atomic_set(&rdev->nr_pending, 0); 3056 atomic_set(&rdev->read_errors, 0); 3057 atomic_set(&rdev->corrected_errors, 0); 3058 3059 INIT_LIST_HEAD(&rdev->same_set); 3060 init_waitqueue_head(&rdev->blocked_wait); 3061 3062 /* Add space to store bad block list. 3063 * This reserves the space even on arrays where it cannot 3064 * be used - I wonder if that matters 3065 */ 3066 rdev->badblocks.count = 0; 3067 rdev->badblocks.shift = -1; /* disabled until explicitly enabled */ 3068 rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL); 3069 seqlock_init(&rdev->badblocks.lock); 3070 if (rdev->badblocks.page == NULL) 3071 return -ENOMEM; 3072 3073 return 0; 3074 } 3075 EXPORT_SYMBOL_GPL(md_rdev_init); 3076 /* 3077 * Import a device. If 'super_format' >= 0, then sanity check the superblock 3078 * 3079 * mark the device faulty if: 3080 * 3081 * - the device is nonexistent (zero size) 3082 * - the device has no valid superblock 3083 * 3084 * a faulty rdev _never_ has rdev->sb set. 3085 */ 3086 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) 3087 { 3088 char b[BDEVNAME_SIZE]; 3089 int err; 3090 struct md_rdev *rdev; 3091 sector_t size; 3092 3093 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 3094 if (!rdev) { 3095 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 3096 return ERR_PTR(-ENOMEM); 3097 } 3098 3099 err = md_rdev_init(rdev); 3100 if (err) 3101 goto abort_free; 3102 err = alloc_disk_sb(rdev); 3103 if (err) 3104 goto abort_free; 3105 3106 err = lock_rdev(rdev, newdev, super_format == -2); 3107 if (err) 3108 goto abort_free; 3109 3110 kobject_init(&rdev->kobj, &rdev_ktype); 3111 3112 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; 3113 if (!size) { 3114 printk(KERN_WARNING 3115 "md: %s has zero or unknown size, marking faulty!\n", 3116 bdevname(rdev->bdev,b)); 3117 err = -EINVAL; 3118 goto abort_free; 3119 } 3120 3121 if (super_format >= 0) { 3122 err = super_types[super_format]. 3123 load_super(rdev, NULL, super_minor); 3124 if (err == -EINVAL) { 3125 printk(KERN_WARNING 3126 "md: %s does not have a valid v%d.%d " 3127 "superblock, not importing!\n", 3128 bdevname(rdev->bdev,b), 3129 super_format, super_minor); 3130 goto abort_free; 3131 } 3132 if (err < 0) { 3133 printk(KERN_WARNING 3134 "md: could not read %s's sb, not importing!\n", 3135 bdevname(rdev->bdev,b)); 3136 goto abort_free; 3137 } 3138 } 3139 3140 return rdev; 3141 3142 abort_free: 3143 if (rdev->bdev) 3144 unlock_rdev(rdev); 3145 md_rdev_clear(rdev); 3146 kfree(rdev); 3147 return ERR_PTR(err); 3148 } 3149 3150 /* 3151 * Check a full RAID array for plausibility 3152 */ 3153 3154 static void analyze_sbs(struct mddev *mddev) 3155 { 3156 int i; 3157 struct md_rdev *rdev, *freshest, *tmp; 3158 char b[BDEVNAME_SIZE]; 3159 3160 freshest = NULL; 3161 rdev_for_each_safe(rdev, tmp, mddev) 3162 switch (super_types[mddev->major_version]. 3163 load_super(rdev, freshest, mddev->minor_version)) { 3164 case 1: 3165 freshest = rdev; 3166 break; 3167 case 0: 3168 break; 3169 default: 3170 printk( KERN_ERR \ 3171 "md: fatal superblock inconsistency in %s" 3172 " -- removing from array\n", 3173 bdevname(rdev->bdev,b)); 3174 md_kick_rdev_from_array(rdev); 3175 } 3176 3177 super_types[mddev->major_version]. 3178 validate_super(mddev, freshest); 3179 3180 i = 0; 3181 rdev_for_each_safe(rdev, tmp, mddev) { 3182 if (mddev->max_disks && 3183 (rdev->desc_nr >= mddev->max_disks || 3184 i > mddev->max_disks)) { 3185 printk(KERN_WARNING 3186 "md: %s: %s: only %d devices permitted\n", 3187 mdname(mddev), bdevname(rdev->bdev, b), 3188 mddev->max_disks); 3189 md_kick_rdev_from_array(rdev); 3190 continue; 3191 } 3192 if (rdev != freshest) { 3193 if (super_types[mddev->major_version]. 3194 validate_super(mddev, rdev)) { 3195 printk(KERN_WARNING "md: kicking non-fresh %s" 3196 " from array!\n", 3197 bdevname(rdev->bdev,b)); 3198 md_kick_rdev_from_array(rdev); 3199 continue; 3200 } 3201 /* No device should have a Candidate flag 3202 * when reading devices 3203 */ 3204 if (test_bit(Candidate, &rdev->flags)) { 3205 pr_info("md: kicking Cluster Candidate %s from array!\n", 3206 bdevname(rdev->bdev, b)); 3207 md_kick_rdev_from_array(rdev); 3208 } 3209 } 3210 if (mddev->level == LEVEL_MULTIPATH) { 3211 rdev->desc_nr = i++; 3212 rdev->raid_disk = rdev->desc_nr; 3213 set_bit(In_sync, &rdev->flags); 3214 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { 3215 rdev->raid_disk = -1; 3216 clear_bit(In_sync, &rdev->flags); 3217 } 3218 } 3219 } 3220 3221 /* Read a fixed-point number. 3222 * Numbers in sysfs attributes should be in "standard" units where 3223 * possible, so time should be in seconds. 3224 * However we internally use a a much smaller unit such as 3225 * milliseconds or jiffies. 3226 * This function takes a decimal number with a possible fractional 3227 * component, and produces an integer which is the result of 3228 * multiplying that number by 10^'scale'. 3229 * all without any floating-point arithmetic. 3230 */ 3231 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 3232 { 3233 unsigned long result = 0; 3234 long decimals = -1; 3235 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 3236 if (*cp == '.') 3237 decimals = 0; 3238 else if (decimals < scale) { 3239 unsigned int value; 3240 value = *cp - '0'; 3241 result = result * 10 + value; 3242 if (decimals >= 0) 3243 decimals++; 3244 } 3245 cp++; 3246 } 3247 if (*cp == '\n') 3248 cp++; 3249 if (*cp) 3250 return -EINVAL; 3251 if (decimals < 0) 3252 decimals = 0; 3253 while (decimals < scale) { 3254 result *= 10; 3255 decimals ++; 3256 } 3257 *res = result; 3258 return 0; 3259 } 3260 3261 static ssize_t 3262 safe_delay_show(struct mddev *mddev, char *page) 3263 { 3264 int msec = (mddev->safemode_delay*1000)/HZ; 3265 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 3266 } 3267 static ssize_t 3268 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) 3269 { 3270 unsigned long msec; 3271 3272 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) 3273 return -EINVAL; 3274 if (msec == 0) 3275 mddev->safemode_delay = 0; 3276 else { 3277 unsigned long old_delay = mddev->safemode_delay; 3278 unsigned long new_delay = (msec*HZ)/1000; 3279 3280 if (new_delay == 0) 3281 new_delay = 1; 3282 mddev->safemode_delay = new_delay; 3283 if (new_delay < old_delay || old_delay == 0) 3284 mod_timer(&mddev->safemode_timer, jiffies+1); 3285 } 3286 return len; 3287 } 3288 static struct md_sysfs_entry md_safe_delay = 3289 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 3290 3291 static ssize_t 3292 level_show(struct mddev *mddev, char *page) 3293 { 3294 struct md_personality *p; 3295 int ret; 3296 spin_lock(&mddev->lock); 3297 p = mddev->pers; 3298 if (p) 3299 ret = sprintf(page, "%s\n", p->name); 3300 else if (mddev->clevel[0]) 3301 ret = sprintf(page, "%s\n", mddev->clevel); 3302 else if (mddev->level != LEVEL_NONE) 3303 ret = sprintf(page, "%d\n", mddev->level); 3304 else 3305 ret = 0; 3306 spin_unlock(&mddev->lock); 3307 return ret; 3308 } 3309 3310 static ssize_t 3311 level_store(struct mddev *mddev, const char *buf, size_t len) 3312 { 3313 char clevel[16]; 3314 ssize_t rv; 3315 size_t slen = len; 3316 struct md_personality *pers, *oldpers; 3317 long level; 3318 void *priv, *oldpriv; 3319 struct md_rdev *rdev; 3320 3321 if (slen == 0 || slen >= sizeof(clevel)) 3322 return -EINVAL; 3323 3324 rv = mddev_lock(mddev); 3325 if (rv) 3326 return rv; 3327 3328 if (mddev->pers == NULL) { 3329 strncpy(mddev->clevel, buf, slen); 3330 if (mddev->clevel[slen-1] == '\n') 3331 slen--; 3332 mddev->clevel[slen] = 0; 3333 mddev->level = LEVEL_NONE; 3334 rv = len; 3335 goto out_unlock; 3336 } 3337 rv = -EROFS; 3338 if (mddev->ro) 3339 goto out_unlock; 3340 3341 /* request to change the personality. Need to ensure: 3342 * - array is not engaged in resync/recovery/reshape 3343 * - old personality can be suspended 3344 * - new personality will access other array. 3345 */ 3346 3347 rv = -EBUSY; 3348 if (mddev->sync_thread || 3349 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3350 mddev->reshape_position != MaxSector || 3351 mddev->sysfs_active) 3352 goto out_unlock; 3353 3354 rv = -EINVAL; 3355 if (!mddev->pers->quiesce) { 3356 printk(KERN_WARNING "md: %s: %s does not support online personality change\n", 3357 mdname(mddev), mddev->pers->name); 3358 goto out_unlock; 3359 } 3360 3361 /* Now find the new personality */ 3362 strncpy(clevel, buf, slen); 3363 if (clevel[slen-1] == '\n') 3364 slen--; 3365 clevel[slen] = 0; 3366 if (kstrtol(clevel, 10, &level)) 3367 level = LEVEL_NONE; 3368 3369 if (request_module("md-%s", clevel) != 0) 3370 request_module("md-level-%s", clevel); 3371 spin_lock(&pers_lock); 3372 pers = find_pers(level, clevel); 3373 if (!pers || !try_module_get(pers->owner)) { 3374 spin_unlock(&pers_lock); 3375 printk(KERN_WARNING "md: personality %s not loaded\n", clevel); 3376 rv = -EINVAL; 3377 goto out_unlock; 3378 } 3379 spin_unlock(&pers_lock); 3380 3381 if (pers == mddev->pers) { 3382 /* Nothing to do! */ 3383 module_put(pers->owner); 3384 rv = len; 3385 goto out_unlock; 3386 } 3387 if (!pers->takeover) { 3388 module_put(pers->owner); 3389 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n", 3390 mdname(mddev), clevel); 3391 rv = -EINVAL; 3392 goto out_unlock; 3393 } 3394 3395 rdev_for_each(rdev, mddev) 3396 rdev->new_raid_disk = rdev->raid_disk; 3397 3398 /* ->takeover must set new_* and/or delta_disks 3399 * if it succeeds, and may set them when it fails. 3400 */ 3401 priv = pers->takeover(mddev); 3402 if (IS_ERR(priv)) { 3403 mddev->new_level = mddev->level; 3404 mddev->new_layout = mddev->layout; 3405 mddev->new_chunk_sectors = mddev->chunk_sectors; 3406 mddev->raid_disks -= mddev->delta_disks; 3407 mddev->delta_disks = 0; 3408 mddev->reshape_backwards = 0; 3409 module_put(pers->owner); 3410 printk(KERN_WARNING "md: %s: %s would not accept array\n", 3411 mdname(mddev), clevel); 3412 rv = PTR_ERR(priv); 3413 goto out_unlock; 3414 } 3415 3416 /* Looks like we have a winner */ 3417 mddev_suspend(mddev); 3418 mddev_detach(mddev); 3419 3420 spin_lock(&mddev->lock); 3421 oldpers = mddev->pers; 3422 oldpriv = mddev->private; 3423 mddev->pers = pers; 3424 mddev->private = priv; 3425 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 3426 mddev->level = mddev->new_level; 3427 mddev->layout = mddev->new_layout; 3428 mddev->chunk_sectors = mddev->new_chunk_sectors; 3429 mddev->delta_disks = 0; 3430 mddev->reshape_backwards = 0; 3431 mddev->degraded = 0; 3432 spin_unlock(&mddev->lock); 3433 3434 if (oldpers->sync_request == NULL && 3435 mddev->external) { 3436 /* We are converting from a no-redundancy array 3437 * to a redundancy array and metadata is managed 3438 * externally so we need to be sure that writes 3439 * won't block due to a need to transition 3440 * clean->dirty 3441 * until external management is started. 3442 */ 3443 mddev->in_sync = 0; 3444 mddev->safemode_delay = 0; 3445 mddev->safemode = 0; 3446 } 3447 3448 oldpers->free(mddev, oldpriv); 3449 3450 if (oldpers->sync_request == NULL && 3451 pers->sync_request != NULL) { 3452 /* need to add the md_redundancy_group */ 3453 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 3454 printk(KERN_WARNING 3455 "md: cannot register extra attributes for %s\n", 3456 mdname(mddev)); 3457 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); 3458 } 3459 if (oldpers->sync_request != NULL && 3460 pers->sync_request == NULL) { 3461 /* need to remove the md_redundancy_group */ 3462 if (mddev->to_remove == NULL) 3463 mddev->to_remove = &md_redundancy_group; 3464 } 3465 3466 rdev_for_each(rdev, mddev) { 3467 if (rdev->raid_disk < 0) 3468 continue; 3469 if (rdev->new_raid_disk >= mddev->raid_disks) 3470 rdev->new_raid_disk = -1; 3471 if (rdev->new_raid_disk == rdev->raid_disk) 3472 continue; 3473 sysfs_unlink_rdev(mddev, rdev); 3474 } 3475 rdev_for_each(rdev, mddev) { 3476 if (rdev->raid_disk < 0) 3477 continue; 3478 if (rdev->new_raid_disk == rdev->raid_disk) 3479 continue; 3480 rdev->raid_disk = rdev->new_raid_disk; 3481 if (rdev->raid_disk < 0) 3482 clear_bit(In_sync, &rdev->flags); 3483 else { 3484 if (sysfs_link_rdev(mddev, rdev)) 3485 printk(KERN_WARNING "md: cannot register rd%d" 3486 " for %s after level change\n", 3487 rdev->raid_disk, mdname(mddev)); 3488 } 3489 } 3490 3491 if (pers->sync_request == NULL) { 3492 /* this is now an array without redundancy, so 3493 * it must always be in_sync 3494 */ 3495 mddev->in_sync = 1; 3496 del_timer_sync(&mddev->safemode_timer); 3497 } 3498 blk_set_stacking_limits(&mddev->queue->limits); 3499 pers->run(mddev); 3500 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3501 mddev_resume(mddev); 3502 if (!mddev->thread) 3503 md_update_sb(mddev, 1); 3504 sysfs_notify(&mddev->kobj, NULL, "level"); 3505 md_new_event(mddev); 3506 rv = len; 3507 out_unlock: 3508 mddev_unlock(mddev); 3509 return rv; 3510 } 3511 3512 static struct md_sysfs_entry md_level = 3513 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 3514 3515 static ssize_t 3516 layout_show(struct mddev *mddev, char *page) 3517 { 3518 /* just a number, not meaningful for all levels */ 3519 if (mddev->reshape_position != MaxSector && 3520 mddev->layout != mddev->new_layout) 3521 return sprintf(page, "%d (%d)\n", 3522 mddev->new_layout, mddev->layout); 3523 return sprintf(page, "%d\n", mddev->layout); 3524 } 3525 3526 static ssize_t 3527 layout_store(struct mddev *mddev, const char *buf, size_t len) 3528 { 3529 unsigned int n; 3530 int err; 3531 3532 err = kstrtouint(buf, 10, &n); 3533 if (err < 0) 3534 return err; 3535 err = mddev_lock(mddev); 3536 if (err) 3537 return err; 3538 3539 if (mddev->pers) { 3540 if (mddev->pers->check_reshape == NULL) 3541 err = -EBUSY; 3542 else if (mddev->ro) 3543 err = -EROFS; 3544 else { 3545 mddev->new_layout = n; 3546 err = mddev->pers->check_reshape(mddev); 3547 if (err) 3548 mddev->new_layout = mddev->layout; 3549 } 3550 } else { 3551 mddev->new_layout = n; 3552 if (mddev->reshape_position == MaxSector) 3553 mddev->layout = n; 3554 } 3555 mddev_unlock(mddev); 3556 return err ?: len; 3557 } 3558 static struct md_sysfs_entry md_layout = 3559 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 3560 3561 static ssize_t 3562 raid_disks_show(struct mddev *mddev, char *page) 3563 { 3564 if (mddev->raid_disks == 0) 3565 return 0; 3566 if (mddev->reshape_position != MaxSector && 3567 mddev->delta_disks != 0) 3568 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 3569 mddev->raid_disks - mddev->delta_disks); 3570 return sprintf(page, "%d\n", mddev->raid_disks); 3571 } 3572 3573 static int update_raid_disks(struct mddev *mddev, int raid_disks); 3574 3575 static ssize_t 3576 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) 3577 { 3578 unsigned int n; 3579 int err; 3580 3581 err = kstrtouint(buf, 10, &n); 3582 if (err < 0) 3583 return err; 3584 3585 err = mddev_lock(mddev); 3586 if (err) 3587 return err; 3588 if (mddev->pers) 3589 err = update_raid_disks(mddev, n); 3590 else if (mddev->reshape_position != MaxSector) { 3591 struct md_rdev *rdev; 3592 int olddisks = mddev->raid_disks - mddev->delta_disks; 3593 3594 err = -EINVAL; 3595 rdev_for_each(rdev, mddev) { 3596 if (olddisks < n && 3597 rdev->data_offset < rdev->new_data_offset) 3598 goto out_unlock; 3599 if (olddisks > n && 3600 rdev->data_offset > rdev->new_data_offset) 3601 goto out_unlock; 3602 } 3603 err = 0; 3604 mddev->delta_disks = n - olddisks; 3605 mddev->raid_disks = n; 3606 mddev->reshape_backwards = (mddev->delta_disks < 0); 3607 } else 3608 mddev->raid_disks = n; 3609 out_unlock: 3610 mddev_unlock(mddev); 3611 return err ? err : len; 3612 } 3613 static struct md_sysfs_entry md_raid_disks = 3614 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 3615 3616 static ssize_t 3617 chunk_size_show(struct mddev *mddev, char *page) 3618 { 3619 if (mddev->reshape_position != MaxSector && 3620 mddev->chunk_sectors != mddev->new_chunk_sectors) 3621 return sprintf(page, "%d (%d)\n", 3622 mddev->new_chunk_sectors << 9, 3623 mddev->chunk_sectors << 9); 3624 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 3625 } 3626 3627 static ssize_t 3628 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) 3629 { 3630 unsigned long n; 3631 int err; 3632 3633 err = kstrtoul(buf, 10, &n); 3634 if (err < 0) 3635 return err; 3636 3637 err = mddev_lock(mddev); 3638 if (err) 3639 return err; 3640 if (mddev->pers) { 3641 if (mddev->pers->check_reshape == NULL) 3642 err = -EBUSY; 3643 else if (mddev->ro) 3644 err = -EROFS; 3645 else { 3646 mddev->new_chunk_sectors = n >> 9; 3647 err = mddev->pers->check_reshape(mddev); 3648 if (err) 3649 mddev->new_chunk_sectors = mddev->chunk_sectors; 3650 } 3651 } else { 3652 mddev->new_chunk_sectors = n >> 9; 3653 if (mddev->reshape_position == MaxSector) 3654 mddev->chunk_sectors = n >> 9; 3655 } 3656 mddev_unlock(mddev); 3657 return err ?: len; 3658 } 3659 static struct md_sysfs_entry md_chunk_size = 3660 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 3661 3662 static ssize_t 3663 resync_start_show(struct mddev *mddev, char *page) 3664 { 3665 if (mddev->recovery_cp == MaxSector) 3666 return sprintf(page, "none\n"); 3667 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 3668 } 3669 3670 static ssize_t 3671 resync_start_store(struct mddev *mddev, const char *buf, size_t len) 3672 { 3673 unsigned long long n; 3674 int err; 3675 3676 if (cmd_match(buf, "none")) 3677 n = MaxSector; 3678 else { 3679 err = kstrtoull(buf, 10, &n); 3680 if (err < 0) 3681 return err; 3682 if (n != (sector_t)n) 3683 return -EINVAL; 3684 } 3685 3686 err = mddev_lock(mddev); 3687 if (err) 3688 return err; 3689 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 3690 err = -EBUSY; 3691 3692 if (!err) { 3693 mddev->recovery_cp = n; 3694 if (mddev->pers) 3695 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3696 } 3697 mddev_unlock(mddev); 3698 return err ?: len; 3699 } 3700 static struct md_sysfs_entry md_resync_start = 3701 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR, 3702 resync_start_show, resync_start_store); 3703 3704 /* 3705 * The array state can be: 3706 * 3707 * clear 3708 * No devices, no size, no level 3709 * Equivalent to STOP_ARRAY ioctl 3710 * inactive 3711 * May have some settings, but array is not active 3712 * all IO results in error 3713 * When written, doesn't tear down array, but just stops it 3714 * suspended (not supported yet) 3715 * All IO requests will block. The array can be reconfigured. 3716 * Writing this, if accepted, will block until array is quiescent 3717 * readonly 3718 * no resync can happen. no superblocks get written. 3719 * write requests fail 3720 * read-auto 3721 * like readonly, but behaves like 'clean' on a write request. 3722 * 3723 * clean - no pending writes, but otherwise active. 3724 * When written to inactive array, starts without resync 3725 * If a write request arrives then 3726 * if metadata is known, mark 'dirty' and switch to 'active'. 3727 * if not known, block and switch to write-pending 3728 * If written to an active array that has pending writes, then fails. 3729 * active 3730 * fully active: IO and resync can be happening. 3731 * When written to inactive array, starts with resync 3732 * 3733 * write-pending 3734 * clean, but writes are blocked waiting for 'active' to be written. 3735 * 3736 * active-idle 3737 * like active, but no writes have been seen for a while (100msec). 3738 * 3739 */ 3740 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 3741 write_pending, active_idle, bad_word}; 3742 static char *array_states[] = { 3743 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 3744 "write-pending", "active-idle", NULL }; 3745 3746 static int match_word(const char *word, char **list) 3747 { 3748 int n; 3749 for (n=0; list[n]; n++) 3750 if (cmd_match(word, list[n])) 3751 break; 3752 return n; 3753 } 3754 3755 static ssize_t 3756 array_state_show(struct mddev *mddev, char *page) 3757 { 3758 enum array_state st = inactive; 3759 3760 if (mddev->pers) 3761 switch(mddev->ro) { 3762 case 1: 3763 st = readonly; 3764 break; 3765 case 2: 3766 st = read_auto; 3767 break; 3768 case 0: 3769 if (mddev->in_sync) 3770 st = clean; 3771 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 3772 st = write_pending; 3773 else if (mddev->safemode) 3774 st = active_idle; 3775 else 3776 st = active; 3777 } 3778 else { 3779 if (list_empty(&mddev->disks) && 3780 mddev->raid_disks == 0 && 3781 mddev->dev_sectors == 0) 3782 st = clear; 3783 else 3784 st = inactive; 3785 } 3786 return sprintf(page, "%s\n", array_states[st]); 3787 } 3788 3789 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev); 3790 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev); 3791 static int do_md_run(struct mddev *mddev); 3792 static int restart_array(struct mddev *mddev); 3793 3794 static ssize_t 3795 array_state_store(struct mddev *mddev, const char *buf, size_t len) 3796 { 3797 int err; 3798 enum array_state st = match_word(buf, array_states); 3799 3800 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) { 3801 /* don't take reconfig_mutex when toggling between 3802 * clean and active 3803 */ 3804 spin_lock(&mddev->lock); 3805 if (st == active) { 3806 restart_array(mddev); 3807 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 3808 wake_up(&mddev->sb_wait); 3809 err = 0; 3810 } else /* st == clean */ { 3811 restart_array(mddev); 3812 if (atomic_read(&mddev->writes_pending) == 0) { 3813 if (mddev->in_sync == 0) { 3814 mddev->in_sync = 1; 3815 if (mddev->safemode == 1) 3816 mddev->safemode = 0; 3817 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3818 } 3819 err = 0; 3820 } else 3821 err = -EBUSY; 3822 } 3823 spin_unlock(&mddev->lock); 3824 return err ?: len; 3825 } 3826 err = mddev_lock(mddev); 3827 if (err) 3828 return err; 3829 err = -EINVAL; 3830 switch(st) { 3831 case bad_word: 3832 break; 3833 case clear: 3834 /* stopping an active array */ 3835 err = do_md_stop(mddev, 0, NULL); 3836 break; 3837 case inactive: 3838 /* stopping an active array */ 3839 if (mddev->pers) 3840 err = do_md_stop(mddev, 2, NULL); 3841 else 3842 err = 0; /* already inactive */ 3843 break; 3844 case suspended: 3845 break; /* not supported yet */ 3846 case readonly: 3847 if (mddev->pers) 3848 err = md_set_readonly(mddev, NULL); 3849 else { 3850 mddev->ro = 1; 3851 set_disk_ro(mddev->gendisk, 1); 3852 err = do_md_run(mddev); 3853 } 3854 break; 3855 case read_auto: 3856 if (mddev->pers) { 3857 if (mddev->ro == 0) 3858 err = md_set_readonly(mddev, NULL); 3859 else if (mddev->ro == 1) 3860 err = restart_array(mddev); 3861 if (err == 0) { 3862 mddev->ro = 2; 3863 set_disk_ro(mddev->gendisk, 0); 3864 } 3865 } else { 3866 mddev->ro = 2; 3867 err = do_md_run(mddev); 3868 } 3869 break; 3870 case clean: 3871 if (mddev->pers) { 3872 restart_array(mddev); 3873 spin_lock(&mddev->lock); 3874 if (atomic_read(&mddev->writes_pending) == 0) { 3875 if (mddev->in_sync == 0) { 3876 mddev->in_sync = 1; 3877 if (mddev->safemode == 1) 3878 mddev->safemode = 0; 3879 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3880 } 3881 err = 0; 3882 } else 3883 err = -EBUSY; 3884 spin_unlock(&mddev->lock); 3885 } else 3886 err = -EINVAL; 3887 break; 3888 case active: 3889 if (mddev->pers) { 3890 restart_array(mddev); 3891 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 3892 wake_up(&mddev->sb_wait); 3893 err = 0; 3894 } else { 3895 mddev->ro = 0; 3896 set_disk_ro(mddev->gendisk, 0); 3897 err = do_md_run(mddev); 3898 } 3899 break; 3900 case write_pending: 3901 case active_idle: 3902 /* these cannot be set */ 3903 break; 3904 } 3905 3906 if (!err) { 3907 if (mddev->hold_active == UNTIL_IOCTL) 3908 mddev->hold_active = 0; 3909 sysfs_notify_dirent_safe(mddev->sysfs_state); 3910 } 3911 mddev_unlock(mddev); 3912 return err ?: len; 3913 } 3914 static struct md_sysfs_entry md_array_state = 3915 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3916 3917 static ssize_t 3918 max_corrected_read_errors_show(struct mddev *mddev, char *page) { 3919 return sprintf(page, "%d\n", 3920 atomic_read(&mddev->max_corr_read_errors)); 3921 } 3922 3923 static ssize_t 3924 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) 3925 { 3926 unsigned int n; 3927 int rv; 3928 3929 rv = kstrtouint(buf, 10, &n); 3930 if (rv < 0) 3931 return rv; 3932 atomic_set(&mddev->max_corr_read_errors, n); 3933 return len; 3934 } 3935 3936 static struct md_sysfs_entry max_corr_read_errors = 3937 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 3938 max_corrected_read_errors_store); 3939 3940 static ssize_t 3941 null_show(struct mddev *mddev, char *page) 3942 { 3943 return -EINVAL; 3944 } 3945 3946 static ssize_t 3947 new_dev_store(struct mddev *mddev, const char *buf, size_t len) 3948 { 3949 /* buf must be %d:%d\n? giving major and minor numbers */ 3950 /* The new device is added to the array. 3951 * If the array has a persistent superblock, we read the 3952 * superblock to initialise info and check validity. 3953 * Otherwise, only checking done is that in bind_rdev_to_array, 3954 * which mainly checks size. 3955 */ 3956 char *e; 3957 int major = simple_strtoul(buf, &e, 10); 3958 int minor; 3959 dev_t dev; 3960 struct md_rdev *rdev; 3961 int err; 3962 3963 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 3964 return -EINVAL; 3965 minor = simple_strtoul(e+1, &e, 10); 3966 if (*e && *e != '\n') 3967 return -EINVAL; 3968 dev = MKDEV(major, minor); 3969 if (major != MAJOR(dev) || 3970 minor != MINOR(dev)) 3971 return -EOVERFLOW; 3972 3973 flush_workqueue(md_misc_wq); 3974 3975 err = mddev_lock(mddev); 3976 if (err) 3977 return err; 3978 if (mddev->persistent) { 3979 rdev = md_import_device(dev, mddev->major_version, 3980 mddev->minor_version); 3981 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 3982 struct md_rdev *rdev0 3983 = list_entry(mddev->disks.next, 3984 struct md_rdev, same_set); 3985 err = super_types[mddev->major_version] 3986 .load_super(rdev, rdev0, mddev->minor_version); 3987 if (err < 0) 3988 goto out; 3989 } 3990 } else if (mddev->external) 3991 rdev = md_import_device(dev, -2, -1); 3992 else 3993 rdev = md_import_device(dev, -1, -1); 3994 3995 if (IS_ERR(rdev)) { 3996 mddev_unlock(mddev); 3997 return PTR_ERR(rdev); 3998 } 3999 err = bind_rdev_to_array(rdev, mddev); 4000 out: 4001 if (err) 4002 export_rdev(rdev); 4003 mddev_unlock(mddev); 4004 return err ? err : len; 4005 } 4006 4007 static struct md_sysfs_entry md_new_device = 4008 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 4009 4010 static ssize_t 4011 bitmap_store(struct mddev *mddev, const char *buf, size_t len) 4012 { 4013 char *end; 4014 unsigned long chunk, end_chunk; 4015 int err; 4016 4017 err = mddev_lock(mddev); 4018 if (err) 4019 return err; 4020 if (!mddev->bitmap) 4021 goto out; 4022 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 4023 while (*buf) { 4024 chunk = end_chunk = simple_strtoul(buf, &end, 0); 4025 if (buf == end) break; 4026 if (*end == '-') { /* range */ 4027 buf = end + 1; 4028 end_chunk = simple_strtoul(buf, &end, 0); 4029 if (buf == end) break; 4030 } 4031 if (*end && !isspace(*end)) break; 4032 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 4033 buf = skip_spaces(end); 4034 } 4035 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 4036 out: 4037 mddev_unlock(mddev); 4038 return len; 4039 } 4040 4041 static struct md_sysfs_entry md_bitmap = 4042 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 4043 4044 static ssize_t 4045 size_show(struct mddev *mddev, char *page) 4046 { 4047 return sprintf(page, "%llu\n", 4048 (unsigned long long)mddev->dev_sectors / 2); 4049 } 4050 4051 static int update_size(struct mddev *mddev, sector_t num_sectors); 4052 4053 static ssize_t 4054 size_store(struct mddev *mddev, const char *buf, size_t len) 4055 { 4056 /* If array is inactive, we can reduce the component size, but 4057 * not increase it (except from 0). 4058 * If array is active, we can try an on-line resize 4059 */ 4060 sector_t sectors; 4061 int err = strict_blocks_to_sectors(buf, §ors); 4062 4063 if (err < 0) 4064 return err; 4065 err = mddev_lock(mddev); 4066 if (err) 4067 return err; 4068 if (mddev->pers) { 4069 if (mddev_is_clustered(mddev)) 4070 md_cluster_ops->metadata_update_start(mddev); 4071 err = update_size(mddev, sectors); 4072 md_update_sb(mddev, 1); 4073 if (mddev_is_clustered(mddev)) 4074 md_cluster_ops->metadata_update_finish(mddev); 4075 } else { 4076 if (mddev->dev_sectors == 0 || 4077 mddev->dev_sectors > sectors) 4078 mddev->dev_sectors = sectors; 4079 else 4080 err = -ENOSPC; 4081 } 4082 mddev_unlock(mddev); 4083 return err ? err : len; 4084 } 4085 4086 static struct md_sysfs_entry md_size = 4087 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 4088 4089 /* Metadata version. 4090 * This is one of 4091 * 'none' for arrays with no metadata (good luck...) 4092 * 'external' for arrays with externally managed metadata, 4093 * or N.M for internally known formats 4094 */ 4095 static ssize_t 4096 metadata_show(struct mddev *mddev, char *page) 4097 { 4098 if (mddev->persistent) 4099 return sprintf(page, "%d.%d\n", 4100 mddev->major_version, mddev->minor_version); 4101 else if (mddev->external) 4102 return sprintf(page, "external:%s\n", mddev->metadata_type); 4103 else 4104 return sprintf(page, "none\n"); 4105 } 4106 4107 static ssize_t 4108 metadata_store(struct mddev *mddev, const char *buf, size_t len) 4109 { 4110 int major, minor; 4111 char *e; 4112 int err; 4113 /* Changing the details of 'external' metadata is 4114 * always permitted. Otherwise there must be 4115 * no devices attached to the array. 4116 */ 4117 4118 err = mddev_lock(mddev); 4119 if (err) 4120 return err; 4121 err = -EBUSY; 4122 if (mddev->external && strncmp(buf, "external:", 9) == 0) 4123 ; 4124 else if (!list_empty(&mddev->disks)) 4125 goto out_unlock; 4126 4127 err = 0; 4128 if (cmd_match(buf, "none")) { 4129 mddev->persistent = 0; 4130 mddev->external = 0; 4131 mddev->major_version = 0; 4132 mddev->minor_version = 90; 4133 goto out_unlock; 4134 } 4135 if (strncmp(buf, "external:", 9) == 0) { 4136 size_t namelen = len-9; 4137 if (namelen >= sizeof(mddev->metadata_type)) 4138 namelen = sizeof(mddev->metadata_type)-1; 4139 strncpy(mddev->metadata_type, buf+9, namelen); 4140 mddev->metadata_type[namelen] = 0; 4141 if (namelen && mddev->metadata_type[namelen-1] == '\n') 4142 mddev->metadata_type[--namelen] = 0; 4143 mddev->persistent = 0; 4144 mddev->external = 1; 4145 mddev->major_version = 0; 4146 mddev->minor_version = 90; 4147 goto out_unlock; 4148 } 4149 major = simple_strtoul(buf, &e, 10); 4150 err = -EINVAL; 4151 if (e==buf || *e != '.') 4152 goto out_unlock; 4153 buf = e+1; 4154 minor = simple_strtoul(buf, &e, 10); 4155 if (e==buf || (*e && *e != '\n') ) 4156 goto out_unlock; 4157 err = -ENOENT; 4158 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 4159 goto out_unlock; 4160 mddev->major_version = major; 4161 mddev->minor_version = minor; 4162 mddev->persistent = 1; 4163 mddev->external = 0; 4164 err = 0; 4165 out_unlock: 4166 mddev_unlock(mddev); 4167 return err ?: len; 4168 } 4169 4170 static struct md_sysfs_entry md_metadata = 4171 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4172 4173 static ssize_t 4174 action_show(struct mddev *mddev, char *page) 4175 { 4176 char *type = "idle"; 4177 unsigned long recovery = mddev->recovery; 4178 if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 4179 type = "frozen"; 4180 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) || 4181 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) { 4182 if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 4183 type = "reshape"; 4184 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 4185 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) 4186 type = "resync"; 4187 else if (test_bit(MD_RECOVERY_CHECK, &recovery)) 4188 type = "check"; 4189 else 4190 type = "repair"; 4191 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 4192 type = "recover"; 4193 else if (mddev->reshape_position != MaxSector) 4194 type = "reshape"; 4195 } 4196 return sprintf(page, "%s\n", type); 4197 } 4198 4199 static ssize_t 4200 action_store(struct mddev *mddev, const char *page, size_t len) 4201 { 4202 if (!mddev->pers || !mddev->pers->sync_request) 4203 return -EINVAL; 4204 4205 4206 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4207 if (cmd_match(page, "frozen")) 4208 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4209 else 4210 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4211 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 4212 mddev_lock(mddev) == 0) { 4213 flush_workqueue(md_misc_wq); 4214 if (mddev->sync_thread) { 4215 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4216 md_reap_sync_thread(mddev); 4217 } 4218 mddev_unlock(mddev); 4219 } 4220 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 4221 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 4222 return -EBUSY; 4223 else if (cmd_match(page, "resync")) 4224 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4225 else if (cmd_match(page, "recover")) { 4226 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4227 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4228 } else if (cmd_match(page, "reshape")) { 4229 int err; 4230 if (mddev->pers->start_reshape == NULL) 4231 return -EINVAL; 4232 err = mddev_lock(mddev); 4233 if (!err) { 4234 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4235 err = mddev->pers->start_reshape(mddev); 4236 mddev_unlock(mddev); 4237 } 4238 if (err) 4239 return err; 4240 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4241 } else { 4242 if (cmd_match(page, "check")) 4243 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4244 else if (!cmd_match(page, "repair")) 4245 return -EINVAL; 4246 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4247 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4248 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4249 } 4250 if (mddev->ro == 2) { 4251 /* A write to sync_action is enough to justify 4252 * canceling read-auto mode 4253 */ 4254 mddev->ro = 0; 4255 md_wakeup_thread(mddev->sync_thread); 4256 } 4257 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4258 md_wakeup_thread(mddev->thread); 4259 sysfs_notify_dirent_safe(mddev->sysfs_action); 4260 return len; 4261 } 4262 4263 static struct md_sysfs_entry md_scan_mode = 4264 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 4265 4266 static ssize_t 4267 last_sync_action_show(struct mddev *mddev, char *page) 4268 { 4269 return sprintf(page, "%s\n", mddev->last_sync_action); 4270 } 4271 4272 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action); 4273 4274 static ssize_t 4275 mismatch_cnt_show(struct mddev *mddev, char *page) 4276 { 4277 return sprintf(page, "%llu\n", 4278 (unsigned long long) 4279 atomic64_read(&mddev->resync_mismatches)); 4280 } 4281 4282 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 4283 4284 static ssize_t 4285 sync_min_show(struct mddev *mddev, char *page) 4286 { 4287 return sprintf(page, "%d (%s)\n", speed_min(mddev), 4288 mddev->sync_speed_min ? "local": "system"); 4289 } 4290 4291 static ssize_t 4292 sync_min_store(struct mddev *mddev, const char *buf, size_t len) 4293 { 4294 unsigned int min; 4295 int rv; 4296 4297 if (strncmp(buf, "system", 6)==0) { 4298 min = 0; 4299 } else { 4300 rv = kstrtouint(buf, 10, &min); 4301 if (rv < 0) 4302 return rv; 4303 if (min == 0) 4304 return -EINVAL; 4305 } 4306 mddev->sync_speed_min = min; 4307 return len; 4308 } 4309 4310 static struct md_sysfs_entry md_sync_min = 4311 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 4312 4313 static ssize_t 4314 sync_max_show(struct mddev *mddev, char *page) 4315 { 4316 return sprintf(page, "%d (%s)\n", speed_max(mddev), 4317 mddev->sync_speed_max ? "local": "system"); 4318 } 4319 4320 static ssize_t 4321 sync_max_store(struct mddev *mddev, const char *buf, size_t len) 4322 { 4323 unsigned int max; 4324 int rv; 4325 4326 if (strncmp(buf, "system", 6)==0) { 4327 max = 0; 4328 } else { 4329 rv = kstrtouint(buf, 10, &max); 4330 if (rv < 0) 4331 return rv; 4332 if (max == 0) 4333 return -EINVAL; 4334 } 4335 mddev->sync_speed_max = max; 4336 return len; 4337 } 4338 4339 static struct md_sysfs_entry md_sync_max = 4340 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 4341 4342 static ssize_t 4343 degraded_show(struct mddev *mddev, char *page) 4344 { 4345 return sprintf(page, "%d\n", mddev->degraded); 4346 } 4347 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 4348 4349 static ssize_t 4350 sync_force_parallel_show(struct mddev *mddev, char *page) 4351 { 4352 return sprintf(page, "%d\n", mddev->parallel_resync); 4353 } 4354 4355 static ssize_t 4356 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) 4357 { 4358 long n; 4359 4360 if (kstrtol(buf, 10, &n)) 4361 return -EINVAL; 4362 4363 if (n != 0 && n != 1) 4364 return -EINVAL; 4365 4366 mddev->parallel_resync = n; 4367 4368 if (mddev->sync_thread) 4369 wake_up(&resync_wait); 4370 4371 return len; 4372 } 4373 4374 /* force parallel resync, even with shared block devices */ 4375 static struct md_sysfs_entry md_sync_force_parallel = 4376 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 4377 sync_force_parallel_show, sync_force_parallel_store); 4378 4379 static ssize_t 4380 sync_speed_show(struct mddev *mddev, char *page) 4381 { 4382 unsigned long resync, dt, db; 4383 if (mddev->curr_resync == 0) 4384 return sprintf(page, "none\n"); 4385 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 4386 dt = (jiffies - mddev->resync_mark) / HZ; 4387 if (!dt) dt++; 4388 db = resync - mddev->resync_mark_cnt; 4389 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 4390 } 4391 4392 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 4393 4394 static ssize_t 4395 sync_completed_show(struct mddev *mddev, char *page) 4396 { 4397 unsigned long long max_sectors, resync; 4398 4399 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4400 return sprintf(page, "none\n"); 4401 4402 if (mddev->curr_resync == 1 || 4403 mddev->curr_resync == 2) 4404 return sprintf(page, "delayed\n"); 4405 4406 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 4407 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4408 max_sectors = mddev->resync_max_sectors; 4409 else 4410 max_sectors = mddev->dev_sectors; 4411 4412 resync = mddev->curr_resync_completed; 4413 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 4414 } 4415 4416 static struct md_sysfs_entry md_sync_completed = 4417 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL); 4418 4419 static ssize_t 4420 min_sync_show(struct mddev *mddev, char *page) 4421 { 4422 return sprintf(page, "%llu\n", 4423 (unsigned long long)mddev->resync_min); 4424 } 4425 static ssize_t 4426 min_sync_store(struct mddev *mddev, const char *buf, size_t len) 4427 { 4428 unsigned long long min; 4429 int err; 4430 4431 if (kstrtoull(buf, 10, &min)) 4432 return -EINVAL; 4433 4434 spin_lock(&mddev->lock); 4435 err = -EINVAL; 4436 if (min > mddev->resync_max) 4437 goto out_unlock; 4438 4439 err = -EBUSY; 4440 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4441 goto out_unlock; 4442 4443 /* Round down to multiple of 4K for safety */ 4444 mddev->resync_min = round_down(min, 8); 4445 err = 0; 4446 4447 out_unlock: 4448 spin_unlock(&mddev->lock); 4449 return err ?: len; 4450 } 4451 4452 static struct md_sysfs_entry md_min_sync = 4453 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 4454 4455 static ssize_t 4456 max_sync_show(struct mddev *mddev, char *page) 4457 { 4458 if (mddev->resync_max == MaxSector) 4459 return sprintf(page, "max\n"); 4460 else 4461 return sprintf(page, "%llu\n", 4462 (unsigned long long)mddev->resync_max); 4463 } 4464 static ssize_t 4465 max_sync_store(struct mddev *mddev, const char *buf, size_t len) 4466 { 4467 int err; 4468 spin_lock(&mddev->lock); 4469 if (strncmp(buf, "max", 3) == 0) 4470 mddev->resync_max = MaxSector; 4471 else { 4472 unsigned long long max; 4473 int chunk; 4474 4475 err = -EINVAL; 4476 if (kstrtoull(buf, 10, &max)) 4477 goto out_unlock; 4478 if (max < mddev->resync_min) 4479 goto out_unlock; 4480 4481 err = -EBUSY; 4482 if (max < mddev->resync_max && 4483 mddev->ro == 0 && 4484 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4485 goto out_unlock; 4486 4487 /* Must be a multiple of chunk_size */ 4488 chunk = mddev->chunk_sectors; 4489 if (chunk) { 4490 sector_t temp = max; 4491 4492 err = -EINVAL; 4493 if (sector_div(temp, chunk)) 4494 goto out_unlock; 4495 } 4496 mddev->resync_max = max; 4497 } 4498 wake_up(&mddev->recovery_wait); 4499 err = 0; 4500 out_unlock: 4501 spin_unlock(&mddev->lock); 4502 return err ?: len; 4503 } 4504 4505 static struct md_sysfs_entry md_max_sync = 4506 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 4507 4508 static ssize_t 4509 suspend_lo_show(struct mddev *mddev, char *page) 4510 { 4511 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 4512 } 4513 4514 static ssize_t 4515 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) 4516 { 4517 unsigned long long old, new; 4518 int err; 4519 4520 err = kstrtoull(buf, 10, &new); 4521 if (err < 0) 4522 return err; 4523 if (new != (sector_t)new) 4524 return -EINVAL; 4525 4526 err = mddev_lock(mddev); 4527 if (err) 4528 return err; 4529 err = -EINVAL; 4530 if (mddev->pers == NULL || 4531 mddev->pers->quiesce == NULL) 4532 goto unlock; 4533 old = mddev->suspend_lo; 4534 mddev->suspend_lo = new; 4535 if (new >= old) 4536 /* Shrinking suspended region */ 4537 mddev->pers->quiesce(mddev, 2); 4538 else { 4539 /* Expanding suspended region - need to wait */ 4540 mddev->pers->quiesce(mddev, 1); 4541 mddev->pers->quiesce(mddev, 0); 4542 } 4543 err = 0; 4544 unlock: 4545 mddev_unlock(mddev); 4546 return err ?: len; 4547 } 4548 static struct md_sysfs_entry md_suspend_lo = 4549 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 4550 4551 static ssize_t 4552 suspend_hi_show(struct mddev *mddev, char *page) 4553 { 4554 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 4555 } 4556 4557 static ssize_t 4558 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) 4559 { 4560 unsigned long long old, new; 4561 int err; 4562 4563 err = kstrtoull(buf, 10, &new); 4564 if (err < 0) 4565 return err; 4566 if (new != (sector_t)new) 4567 return -EINVAL; 4568 4569 err = mddev_lock(mddev); 4570 if (err) 4571 return err; 4572 err = -EINVAL; 4573 if (mddev->pers == NULL || 4574 mddev->pers->quiesce == NULL) 4575 goto unlock; 4576 old = mddev->suspend_hi; 4577 mddev->suspend_hi = new; 4578 if (new <= old) 4579 /* Shrinking suspended region */ 4580 mddev->pers->quiesce(mddev, 2); 4581 else { 4582 /* Expanding suspended region - need to wait */ 4583 mddev->pers->quiesce(mddev, 1); 4584 mddev->pers->quiesce(mddev, 0); 4585 } 4586 err = 0; 4587 unlock: 4588 mddev_unlock(mddev); 4589 return err ?: len; 4590 } 4591 static struct md_sysfs_entry md_suspend_hi = 4592 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 4593 4594 static ssize_t 4595 reshape_position_show(struct mddev *mddev, char *page) 4596 { 4597 if (mddev->reshape_position != MaxSector) 4598 return sprintf(page, "%llu\n", 4599 (unsigned long long)mddev->reshape_position); 4600 strcpy(page, "none\n"); 4601 return 5; 4602 } 4603 4604 static ssize_t 4605 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) 4606 { 4607 struct md_rdev *rdev; 4608 unsigned long long new; 4609 int err; 4610 4611 err = kstrtoull(buf, 10, &new); 4612 if (err < 0) 4613 return err; 4614 if (new != (sector_t)new) 4615 return -EINVAL; 4616 err = mddev_lock(mddev); 4617 if (err) 4618 return err; 4619 err = -EBUSY; 4620 if (mddev->pers) 4621 goto unlock; 4622 mddev->reshape_position = new; 4623 mddev->delta_disks = 0; 4624 mddev->reshape_backwards = 0; 4625 mddev->new_level = mddev->level; 4626 mddev->new_layout = mddev->layout; 4627 mddev->new_chunk_sectors = mddev->chunk_sectors; 4628 rdev_for_each(rdev, mddev) 4629 rdev->new_data_offset = rdev->data_offset; 4630 err = 0; 4631 unlock: 4632 mddev_unlock(mddev); 4633 return err ?: len; 4634 } 4635 4636 static struct md_sysfs_entry md_reshape_position = 4637 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 4638 reshape_position_store); 4639 4640 static ssize_t 4641 reshape_direction_show(struct mddev *mddev, char *page) 4642 { 4643 return sprintf(page, "%s\n", 4644 mddev->reshape_backwards ? "backwards" : "forwards"); 4645 } 4646 4647 static ssize_t 4648 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) 4649 { 4650 int backwards = 0; 4651 int err; 4652 4653 if (cmd_match(buf, "forwards")) 4654 backwards = 0; 4655 else if (cmd_match(buf, "backwards")) 4656 backwards = 1; 4657 else 4658 return -EINVAL; 4659 if (mddev->reshape_backwards == backwards) 4660 return len; 4661 4662 err = mddev_lock(mddev); 4663 if (err) 4664 return err; 4665 /* check if we are allowed to change */ 4666 if (mddev->delta_disks) 4667 err = -EBUSY; 4668 else if (mddev->persistent && 4669 mddev->major_version == 0) 4670 err = -EINVAL; 4671 else 4672 mddev->reshape_backwards = backwards; 4673 mddev_unlock(mddev); 4674 return err ?: len; 4675 } 4676 4677 static struct md_sysfs_entry md_reshape_direction = 4678 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show, 4679 reshape_direction_store); 4680 4681 static ssize_t 4682 array_size_show(struct mddev *mddev, char *page) 4683 { 4684 if (mddev->external_size) 4685 return sprintf(page, "%llu\n", 4686 (unsigned long long)mddev->array_sectors/2); 4687 else 4688 return sprintf(page, "default\n"); 4689 } 4690 4691 static ssize_t 4692 array_size_store(struct mddev *mddev, const char *buf, size_t len) 4693 { 4694 sector_t sectors; 4695 int err; 4696 4697 err = mddev_lock(mddev); 4698 if (err) 4699 return err; 4700 4701 if (strncmp(buf, "default", 7) == 0) { 4702 if (mddev->pers) 4703 sectors = mddev->pers->size(mddev, 0, 0); 4704 else 4705 sectors = mddev->array_sectors; 4706 4707 mddev->external_size = 0; 4708 } else { 4709 if (strict_blocks_to_sectors(buf, §ors) < 0) 4710 err = -EINVAL; 4711 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 4712 err = -E2BIG; 4713 else 4714 mddev->external_size = 1; 4715 } 4716 4717 if (!err) { 4718 mddev->array_sectors = sectors; 4719 if (mddev->pers) { 4720 set_capacity(mddev->gendisk, mddev->array_sectors); 4721 revalidate_disk(mddev->gendisk); 4722 } 4723 } 4724 mddev_unlock(mddev); 4725 return err ?: len; 4726 } 4727 4728 static struct md_sysfs_entry md_array_size = 4729 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 4730 array_size_store); 4731 4732 static struct attribute *md_default_attrs[] = { 4733 &md_level.attr, 4734 &md_layout.attr, 4735 &md_raid_disks.attr, 4736 &md_chunk_size.attr, 4737 &md_size.attr, 4738 &md_resync_start.attr, 4739 &md_metadata.attr, 4740 &md_new_device.attr, 4741 &md_safe_delay.attr, 4742 &md_array_state.attr, 4743 &md_reshape_position.attr, 4744 &md_reshape_direction.attr, 4745 &md_array_size.attr, 4746 &max_corr_read_errors.attr, 4747 NULL, 4748 }; 4749 4750 static struct attribute *md_redundancy_attrs[] = { 4751 &md_scan_mode.attr, 4752 &md_last_scan_mode.attr, 4753 &md_mismatches.attr, 4754 &md_sync_min.attr, 4755 &md_sync_max.attr, 4756 &md_sync_speed.attr, 4757 &md_sync_force_parallel.attr, 4758 &md_sync_completed.attr, 4759 &md_min_sync.attr, 4760 &md_max_sync.attr, 4761 &md_suspend_lo.attr, 4762 &md_suspend_hi.attr, 4763 &md_bitmap.attr, 4764 &md_degraded.attr, 4765 NULL, 4766 }; 4767 static struct attribute_group md_redundancy_group = { 4768 .name = NULL, 4769 .attrs = md_redundancy_attrs, 4770 }; 4771 4772 static ssize_t 4773 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 4774 { 4775 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4776 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 4777 ssize_t rv; 4778 4779 if (!entry->show) 4780 return -EIO; 4781 spin_lock(&all_mddevs_lock); 4782 if (list_empty(&mddev->all_mddevs)) { 4783 spin_unlock(&all_mddevs_lock); 4784 return -EBUSY; 4785 } 4786 mddev_get(mddev); 4787 spin_unlock(&all_mddevs_lock); 4788 4789 rv = entry->show(mddev, page); 4790 mddev_put(mddev); 4791 return rv; 4792 } 4793 4794 static ssize_t 4795 md_attr_store(struct kobject *kobj, struct attribute *attr, 4796 const char *page, size_t length) 4797 { 4798 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4799 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 4800 ssize_t rv; 4801 4802 if (!entry->store) 4803 return -EIO; 4804 if (!capable(CAP_SYS_ADMIN)) 4805 return -EACCES; 4806 spin_lock(&all_mddevs_lock); 4807 if (list_empty(&mddev->all_mddevs)) { 4808 spin_unlock(&all_mddevs_lock); 4809 return -EBUSY; 4810 } 4811 mddev_get(mddev); 4812 spin_unlock(&all_mddevs_lock); 4813 rv = entry->store(mddev, page, length); 4814 mddev_put(mddev); 4815 return rv; 4816 } 4817 4818 static void md_free(struct kobject *ko) 4819 { 4820 struct mddev *mddev = container_of(ko, struct mddev, kobj); 4821 4822 if (mddev->sysfs_state) 4823 sysfs_put(mddev->sysfs_state); 4824 4825 if (mddev->queue) 4826 blk_cleanup_queue(mddev->queue); 4827 if (mddev->gendisk) { 4828 del_gendisk(mddev->gendisk); 4829 put_disk(mddev->gendisk); 4830 } 4831 4832 kfree(mddev); 4833 } 4834 4835 static const struct sysfs_ops md_sysfs_ops = { 4836 .show = md_attr_show, 4837 .store = md_attr_store, 4838 }; 4839 static struct kobj_type md_ktype = { 4840 .release = md_free, 4841 .sysfs_ops = &md_sysfs_ops, 4842 .default_attrs = md_default_attrs, 4843 }; 4844 4845 int mdp_major = 0; 4846 4847 static void mddev_delayed_delete(struct work_struct *ws) 4848 { 4849 struct mddev *mddev = container_of(ws, struct mddev, del_work); 4850 4851 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); 4852 kobject_del(&mddev->kobj); 4853 kobject_put(&mddev->kobj); 4854 } 4855 4856 static int md_alloc(dev_t dev, char *name) 4857 { 4858 static DEFINE_MUTEX(disks_mutex); 4859 struct mddev *mddev = mddev_find(dev); 4860 struct gendisk *disk; 4861 int partitioned; 4862 int shift; 4863 int unit; 4864 int error; 4865 4866 if (!mddev) 4867 return -ENODEV; 4868 4869 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 4870 shift = partitioned ? MdpMinorShift : 0; 4871 unit = MINOR(mddev->unit) >> shift; 4872 4873 /* wait for any previous instance of this device to be 4874 * completely removed (mddev_delayed_delete). 4875 */ 4876 flush_workqueue(md_misc_wq); 4877 4878 mutex_lock(&disks_mutex); 4879 error = -EEXIST; 4880 if (mddev->gendisk) 4881 goto abort; 4882 4883 if (name) { 4884 /* Need to ensure that 'name' is not a duplicate. 4885 */ 4886 struct mddev *mddev2; 4887 spin_lock(&all_mddevs_lock); 4888 4889 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 4890 if (mddev2->gendisk && 4891 strcmp(mddev2->gendisk->disk_name, name) == 0) { 4892 spin_unlock(&all_mddevs_lock); 4893 goto abort; 4894 } 4895 spin_unlock(&all_mddevs_lock); 4896 } 4897 4898 error = -ENOMEM; 4899 mddev->queue = blk_alloc_queue(GFP_KERNEL); 4900 if (!mddev->queue) 4901 goto abort; 4902 mddev->queue->queuedata = mddev; 4903 4904 blk_queue_make_request(mddev->queue, md_make_request); 4905 blk_set_stacking_limits(&mddev->queue->limits); 4906 4907 disk = alloc_disk(1 << shift); 4908 if (!disk) { 4909 blk_cleanup_queue(mddev->queue); 4910 mddev->queue = NULL; 4911 goto abort; 4912 } 4913 disk->major = MAJOR(mddev->unit); 4914 disk->first_minor = unit << shift; 4915 if (name) 4916 strcpy(disk->disk_name, name); 4917 else if (partitioned) 4918 sprintf(disk->disk_name, "md_d%d", unit); 4919 else 4920 sprintf(disk->disk_name, "md%d", unit); 4921 disk->fops = &md_fops; 4922 disk->private_data = mddev; 4923 disk->queue = mddev->queue; 4924 blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA); 4925 /* Allow extended partitions. This makes the 4926 * 'mdp' device redundant, but we can't really 4927 * remove it now. 4928 */ 4929 disk->flags |= GENHD_FL_EXT_DEVT; 4930 mddev->gendisk = disk; 4931 /* As soon as we call add_disk(), another thread could get 4932 * through to md_open, so make sure it doesn't get too far 4933 */ 4934 mutex_lock(&mddev->open_mutex); 4935 add_disk(disk); 4936 4937 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 4938 &disk_to_dev(disk)->kobj, "%s", "md"); 4939 if (error) { 4940 /* This isn't possible, but as kobject_init_and_add is marked 4941 * __must_check, we must do something with the result 4942 */ 4943 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 4944 disk->disk_name); 4945 error = 0; 4946 } 4947 if (mddev->kobj.sd && 4948 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) 4949 printk(KERN_DEBUG "pointless warning\n"); 4950 mutex_unlock(&mddev->open_mutex); 4951 abort: 4952 mutex_unlock(&disks_mutex); 4953 if (!error && mddev->kobj.sd) { 4954 kobject_uevent(&mddev->kobj, KOBJ_ADD); 4955 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 4956 } 4957 mddev_put(mddev); 4958 return error; 4959 } 4960 4961 static struct kobject *md_probe(dev_t dev, int *part, void *data) 4962 { 4963 md_alloc(dev, NULL); 4964 return NULL; 4965 } 4966 4967 static int add_named_array(const char *val, struct kernel_param *kp) 4968 { 4969 /* val must be "md_*" where * is not all digits. 4970 * We allocate an array with a large free minor number, and 4971 * set the name to val. val must not already be an active name. 4972 */ 4973 int len = strlen(val); 4974 char buf[DISK_NAME_LEN]; 4975 4976 while (len && val[len-1] == '\n') 4977 len--; 4978 if (len >= DISK_NAME_LEN) 4979 return -E2BIG; 4980 strlcpy(buf, val, len+1); 4981 if (strncmp(buf, "md_", 3) != 0) 4982 return -EINVAL; 4983 return md_alloc(0, buf); 4984 } 4985 4986 static void md_safemode_timeout(unsigned long data) 4987 { 4988 struct mddev *mddev = (struct mddev *) data; 4989 4990 if (!atomic_read(&mddev->writes_pending)) { 4991 mddev->safemode = 1; 4992 if (mddev->external) 4993 sysfs_notify_dirent_safe(mddev->sysfs_state); 4994 } 4995 md_wakeup_thread(mddev->thread); 4996 } 4997 4998 static int start_dirty_degraded; 4999 5000 int md_run(struct mddev *mddev) 5001 { 5002 int err; 5003 struct md_rdev *rdev; 5004 struct md_personality *pers; 5005 5006 if (list_empty(&mddev->disks)) 5007 /* cannot run an array with no devices.. */ 5008 return -EINVAL; 5009 5010 if (mddev->pers) 5011 return -EBUSY; 5012 /* Cannot run until previous stop completes properly */ 5013 if (mddev->sysfs_active) 5014 return -EBUSY; 5015 5016 /* 5017 * Analyze all RAID superblock(s) 5018 */ 5019 if (!mddev->raid_disks) { 5020 if (!mddev->persistent) 5021 return -EINVAL; 5022 analyze_sbs(mddev); 5023 } 5024 5025 if (mddev->level != LEVEL_NONE) 5026 request_module("md-level-%d", mddev->level); 5027 else if (mddev->clevel[0]) 5028 request_module("md-%s", mddev->clevel); 5029 5030 /* 5031 * Drop all container device buffers, from now on 5032 * the only valid external interface is through the md 5033 * device. 5034 */ 5035 rdev_for_each(rdev, mddev) { 5036 if (test_bit(Faulty, &rdev->flags)) 5037 continue; 5038 sync_blockdev(rdev->bdev); 5039 invalidate_bdev(rdev->bdev); 5040 5041 /* perform some consistency tests on the device. 5042 * We don't want the data to overlap the metadata, 5043 * Internal Bitmap issues have been handled elsewhere. 5044 */ 5045 if (rdev->meta_bdev) { 5046 /* Nothing to check */; 5047 } else if (rdev->data_offset < rdev->sb_start) { 5048 if (mddev->dev_sectors && 5049 rdev->data_offset + mddev->dev_sectors 5050 > rdev->sb_start) { 5051 printk("md: %s: data overlaps metadata\n", 5052 mdname(mddev)); 5053 return -EINVAL; 5054 } 5055 } else { 5056 if (rdev->sb_start + rdev->sb_size/512 5057 > rdev->data_offset) { 5058 printk("md: %s: metadata overlaps data\n", 5059 mdname(mddev)); 5060 return -EINVAL; 5061 } 5062 } 5063 sysfs_notify_dirent_safe(rdev->sysfs_state); 5064 } 5065 5066 if (mddev->bio_set == NULL) 5067 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0); 5068 5069 spin_lock(&pers_lock); 5070 pers = find_pers(mddev->level, mddev->clevel); 5071 if (!pers || !try_module_get(pers->owner)) { 5072 spin_unlock(&pers_lock); 5073 if (mddev->level != LEVEL_NONE) 5074 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 5075 mddev->level); 5076 else 5077 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 5078 mddev->clevel); 5079 return -EINVAL; 5080 } 5081 spin_unlock(&pers_lock); 5082 if (mddev->level != pers->level) { 5083 mddev->level = pers->level; 5084 mddev->new_level = pers->level; 5085 } 5086 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 5087 5088 if (mddev->reshape_position != MaxSector && 5089 pers->start_reshape == NULL) { 5090 /* This personality cannot handle reshaping... */ 5091 module_put(pers->owner); 5092 return -EINVAL; 5093 } 5094 5095 if (pers->sync_request) { 5096 /* Warn if this is a potentially silly 5097 * configuration. 5098 */ 5099 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5100 struct md_rdev *rdev2; 5101 int warned = 0; 5102 5103 rdev_for_each(rdev, mddev) 5104 rdev_for_each(rdev2, mddev) { 5105 if (rdev < rdev2 && 5106 rdev->bdev->bd_contains == 5107 rdev2->bdev->bd_contains) { 5108 printk(KERN_WARNING 5109 "%s: WARNING: %s appears to be" 5110 " on the same physical disk as" 5111 " %s.\n", 5112 mdname(mddev), 5113 bdevname(rdev->bdev,b), 5114 bdevname(rdev2->bdev,b2)); 5115 warned = 1; 5116 } 5117 } 5118 5119 if (warned) 5120 printk(KERN_WARNING 5121 "True protection against single-disk" 5122 " failure might be compromised.\n"); 5123 } 5124 5125 mddev->recovery = 0; 5126 /* may be over-ridden by personality */ 5127 mddev->resync_max_sectors = mddev->dev_sectors; 5128 5129 mddev->ok_start_degraded = start_dirty_degraded; 5130 5131 if (start_readonly && mddev->ro == 0) 5132 mddev->ro = 2; /* read-only, but switch on first write */ 5133 5134 err = pers->run(mddev); 5135 if (err) 5136 printk(KERN_ERR "md: pers->run() failed ...\n"); 5137 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { 5138 WARN_ONCE(!mddev->external_size, "%s: default size too small," 5139 " but 'external_size' not in effect?\n", __func__); 5140 printk(KERN_ERR 5141 "md: invalid array_size %llu > default size %llu\n", 5142 (unsigned long long)mddev->array_sectors / 2, 5143 (unsigned long long)pers->size(mddev, 0, 0) / 2); 5144 err = -EINVAL; 5145 } 5146 if (err == 0 && pers->sync_request && 5147 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { 5148 struct bitmap *bitmap; 5149 5150 bitmap = bitmap_create(mddev, -1); 5151 if (IS_ERR(bitmap)) { 5152 err = PTR_ERR(bitmap); 5153 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 5154 mdname(mddev), err); 5155 } else 5156 mddev->bitmap = bitmap; 5157 5158 } 5159 if (err) { 5160 mddev_detach(mddev); 5161 if (mddev->private) 5162 pers->free(mddev, mddev->private); 5163 mddev->private = NULL; 5164 module_put(pers->owner); 5165 bitmap_destroy(mddev); 5166 return err; 5167 } 5168 if (mddev->queue) { 5169 mddev->queue->backing_dev_info.congested_data = mddev; 5170 mddev->queue->backing_dev_info.congested_fn = md_congested; 5171 } 5172 if (pers->sync_request) { 5173 if (mddev->kobj.sd && 5174 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 5175 printk(KERN_WARNING 5176 "md: cannot register extra attributes for %s\n", 5177 mdname(mddev)); 5178 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 5179 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 5180 mddev->ro = 0; 5181 5182 atomic_set(&mddev->writes_pending,0); 5183 atomic_set(&mddev->max_corr_read_errors, 5184 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 5185 mddev->safemode = 0; 5186 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 5187 mddev->in_sync = 1; 5188 smp_wmb(); 5189 spin_lock(&mddev->lock); 5190 mddev->pers = pers; 5191 mddev->ready = 1; 5192 spin_unlock(&mddev->lock); 5193 rdev_for_each(rdev, mddev) 5194 if (rdev->raid_disk >= 0) 5195 if (sysfs_link_rdev(mddev, rdev)) 5196 /* failure here is OK */; 5197 5198 if (mddev->degraded && !mddev->ro) 5199 /* This ensures that recovering status is reported immediately 5200 * via sysfs - until a lack of spares is confirmed. 5201 */ 5202 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5203 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5204 5205 if (mddev->flags & MD_UPDATE_SB_FLAGS) 5206 md_update_sb(mddev, 0); 5207 5208 md_new_event(mddev); 5209 sysfs_notify_dirent_safe(mddev->sysfs_state); 5210 sysfs_notify_dirent_safe(mddev->sysfs_action); 5211 sysfs_notify(&mddev->kobj, NULL, "degraded"); 5212 return 0; 5213 } 5214 EXPORT_SYMBOL_GPL(md_run); 5215 5216 static int do_md_run(struct mddev *mddev) 5217 { 5218 int err; 5219 5220 err = md_run(mddev); 5221 if (err) 5222 goto out; 5223 err = bitmap_load(mddev); 5224 if (err) { 5225 bitmap_destroy(mddev); 5226 goto out; 5227 } 5228 5229 md_wakeup_thread(mddev->thread); 5230 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 5231 5232 set_capacity(mddev->gendisk, mddev->array_sectors); 5233 revalidate_disk(mddev->gendisk); 5234 mddev->changed = 1; 5235 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 5236 out: 5237 return err; 5238 } 5239 5240 static int restart_array(struct mddev *mddev) 5241 { 5242 struct gendisk *disk = mddev->gendisk; 5243 5244 /* Complain if it has no devices */ 5245 if (list_empty(&mddev->disks)) 5246 return -ENXIO; 5247 if (!mddev->pers) 5248 return -EINVAL; 5249 if (!mddev->ro) 5250 return -EBUSY; 5251 mddev->safemode = 0; 5252 mddev->ro = 0; 5253 set_disk_ro(disk, 0); 5254 printk(KERN_INFO "md: %s switched to read-write mode.\n", 5255 mdname(mddev)); 5256 /* Kick recovery or resync if necessary */ 5257 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5258 md_wakeup_thread(mddev->thread); 5259 md_wakeup_thread(mddev->sync_thread); 5260 sysfs_notify_dirent_safe(mddev->sysfs_state); 5261 return 0; 5262 } 5263 5264 static void md_clean(struct mddev *mddev) 5265 { 5266 mddev->array_sectors = 0; 5267 mddev->external_size = 0; 5268 mddev->dev_sectors = 0; 5269 mddev->raid_disks = 0; 5270 mddev->recovery_cp = 0; 5271 mddev->resync_min = 0; 5272 mddev->resync_max = MaxSector; 5273 mddev->reshape_position = MaxSector; 5274 mddev->external = 0; 5275 mddev->persistent = 0; 5276 mddev->level = LEVEL_NONE; 5277 mddev->clevel[0] = 0; 5278 mddev->flags = 0; 5279 mddev->ro = 0; 5280 mddev->metadata_type[0] = 0; 5281 mddev->chunk_sectors = 0; 5282 mddev->ctime = mddev->utime = 0; 5283 mddev->layout = 0; 5284 mddev->max_disks = 0; 5285 mddev->events = 0; 5286 mddev->can_decrease_events = 0; 5287 mddev->delta_disks = 0; 5288 mddev->reshape_backwards = 0; 5289 mddev->new_level = LEVEL_NONE; 5290 mddev->new_layout = 0; 5291 mddev->new_chunk_sectors = 0; 5292 mddev->curr_resync = 0; 5293 atomic64_set(&mddev->resync_mismatches, 0); 5294 mddev->suspend_lo = mddev->suspend_hi = 0; 5295 mddev->sync_speed_min = mddev->sync_speed_max = 0; 5296 mddev->recovery = 0; 5297 mddev->in_sync = 0; 5298 mddev->changed = 0; 5299 mddev->degraded = 0; 5300 mddev->safemode = 0; 5301 mddev->private = NULL; 5302 mddev->bitmap_info.offset = 0; 5303 mddev->bitmap_info.default_offset = 0; 5304 mddev->bitmap_info.default_space = 0; 5305 mddev->bitmap_info.chunksize = 0; 5306 mddev->bitmap_info.daemon_sleep = 0; 5307 mddev->bitmap_info.max_write_behind = 0; 5308 } 5309 5310 static void __md_stop_writes(struct mddev *mddev) 5311 { 5312 if (mddev_is_clustered(mddev)) 5313 md_cluster_ops->metadata_update_start(mddev); 5314 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5315 flush_workqueue(md_misc_wq); 5316 if (mddev->sync_thread) { 5317 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5318 md_reap_sync_thread(mddev); 5319 } 5320 5321 del_timer_sync(&mddev->safemode_timer); 5322 5323 bitmap_flush(mddev); 5324 md_super_wait(mddev); 5325 5326 if (mddev->ro == 0 && 5327 (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) { 5328 /* mark array as shutdown cleanly */ 5329 mddev->in_sync = 1; 5330 md_update_sb(mddev, 1); 5331 } 5332 if (mddev_is_clustered(mddev)) 5333 md_cluster_ops->metadata_update_finish(mddev); 5334 } 5335 5336 void md_stop_writes(struct mddev *mddev) 5337 { 5338 mddev_lock_nointr(mddev); 5339 __md_stop_writes(mddev); 5340 mddev_unlock(mddev); 5341 } 5342 EXPORT_SYMBOL_GPL(md_stop_writes); 5343 5344 static void mddev_detach(struct mddev *mddev) 5345 { 5346 struct bitmap *bitmap = mddev->bitmap; 5347 /* wait for behind writes to complete */ 5348 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) { 5349 printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n", 5350 mdname(mddev)); 5351 /* need to kick something here to make sure I/O goes? */ 5352 wait_event(bitmap->behind_wait, 5353 atomic_read(&bitmap->behind_writes) == 0); 5354 } 5355 if (mddev->pers && mddev->pers->quiesce) { 5356 mddev->pers->quiesce(mddev, 1); 5357 mddev->pers->quiesce(mddev, 0); 5358 } 5359 md_unregister_thread(&mddev->thread); 5360 if (mddev->queue) 5361 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 5362 } 5363 5364 static void __md_stop(struct mddev *mddev) 5365 { 5366 struct md_personality *pers = mddev->pers; 5367 mddev_detach(mddev); 5368 /* Ensure ->event_work is done */ 5369 flush_workqueue(md_misc_wq); 5370 spin_lock(&mddev->lock); 5371 mddev->ready = 0; 5372 mddev->pers = NULL; 5373 spin_unlock(&mddev->lock); 5374 pers->free(mddev, mddev->private); 5375 mddev->private = NULL; 5376 if (pers->sync_request && mddev->to_remove == NULL) 5377 mddev->to_remove = &md_redundancy_group; 5378 module_put(pers->owner); 5379 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5380 } 5381 5382 void md_stop(struct mddev *mddev) 5383 { 5384 /* stop the array and free an attached data structures. 5385 * This is called from dm-raid 5386 */ 5387 __md_stop(mddev); 5388 bitmap_destroy(mddev); 5389 if (mddev->bio_set) 5390 bioset_free(mddev->bio_set); 5391 } 5392 5393 EXPORT_SYMBOL_GPL(md_stop); 5394 5395 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) 5396 { 5397 int err = 0; 5398 int did_freeze = 0; 5399 5400 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 5401 did_freeze = 1; 5402 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5403 md_wakeup_thread(mddev->thread); 5404 } 5405 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5406 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5407 if (mddev->sync_thread) 5408 /* Thread might be blocked waiting for metadata update 5409 * which will now never happen */ 5410 wake_up_process(mddev->sync_thread->tsk); 5411 5412 mddev_unlock(mddev); 5413 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, 5414 &mddev->recovery)); 5415 mddev_lock_nointr(mddev); 5416 5417 mutex_lock(&mddev->open_mutex); 5418 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 5419 mddev->sync_thread || 5420 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 5421 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { 5422 printk("md: %s still in use.\n",mdname(mddev)); 5423 if (did_freeze) { 5424 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5425 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5426 md_wakeup_thread(mddev->thread); 5427 } 5428 err = -EBUSY; 5429 goto out; 5430 } 5431 if (mddev->pers) { 5432 __md_stop_writes(mddev); 5433 5434 err = -ENXIO; 5435 if (mddev->ro==1) 5436 goto out; 5437 mddev->ro = 1; 5438 set_disk_ro(mddev->gendisk, 1); 5439 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5440 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5441 md_wakeup_thread(mddev->thread); 5442 sysfs_notify_dirent_safe(mddev->sysfs_state); 5443 err = 0; 5444 } 5445 out: 5446 mutex_unlock(&mddev->open_mutex); 5447 return err; 5448 } 5449 5450 /* mode: 5451 * 0 - completely stop and dis-assemble array 5452 * 2 - stop but do not disassemble array 5453 */ 5454 static int do_md_stop(struct mddev *mddev, int mode, 5455 struct block_device *bdev) 5456 { 5457 struct gendisk *disk = mddev->gendisk; 5458 struct md_rdev *rdev; 5459 int did_freeze = 0; 5460 5461 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 5462 did_freeze = 1; 5463 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5464 md_wakeup_thread(mddev->thread); 5465 } 5466 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5467 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5468 if (mddev->sync_thread) 5469 /* Thread might be blocked waiting for metadata update 5470 * which will now never happen */ 5471 wake_up_process(mddev->sync_thread->tsk); 5472 5473 mddev_unlock(mddev); 5474 wait_event(resync_wait, (mddev->sync_thread == NULL && 5475 !test_bit(MD_RECOVERY_RUNNING, 5476 &mddev->recovery))); 5477 mddev_lock_nointr(mddev); 5478 5479 mutex_lock(&mddev->open_mutex); 5480 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 5481 mddev->sysfs_active || 5482 mddev->sync_thread || 5483 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 5484 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { 5485 printk("md: %s still in use.\n",mdname(mddev)); 5486 mutex_unlock(&mddev->open_mutex); 5487 if (did_freeze) { 5488 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5489 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5490 md_wakeup_thread(mddev->thread); 5491 } 5492 return -EBUSY; 5493 } 5494 if (mddev->pers) { 5495 if (mddev->ro) 5496 set_disk_ro(disk, 0); 5497 5498 __md_stop_writes(mddev); 5499 __md_stop(mddev); 5500 mddev->queue->backing_dev_info.congested_fn = NULL; 5501 5502 /* tell userspace to handle 'inactive' */ 5503 sysfs_notify_dirent_safe(mddev->sysfs_state); 5504 5505 rdev_for_each(rdev, mddev) 5506 if (rdev->raid_disk >= 0) 5507 sysfs_unlink_rdev(mddev, rdev); 5508 5509 set_capacity(disk, 0); 5510 mutex_unlock(&mddev->open_mutex); 5511 mddev->changed = 1; 5512 revalidate_disk(disk); 5513 5514 if (mddev->ro) 5515 mddev->ro = 0; 5516 } else 5517 mutex_unlock(&mddev->open_mutex); 5518 /* 5519 * Free resources if final stop 5520 */ 5521 if (mode == 0) { 5522 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 5523 5524 bitmap_destroy(mddev); 5525 if (mddev->bitmap_info.file) { 5526 struct file *f = mddev->bitmap_info.file; 5527 spin_lock(&mddev->lock); 5528 mddev->bitmap_info.file = NULL; 5529 spin_unlock(&mddev->lock); 5530 fput(f); 5531 } 5532 mddev->bitmap_info.offset = 0; 5533 5534 export_array(mddev); 5535 5536 md_clean(mddev); 5537 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 5538 if (mddev->hold_active == UNTIL_STOP) 5539 mddev->hold_active = 0; 5540 } 5541 blk_integrity_unregister(disk); 5542 md_new_event(mddev); 5543 sysfs_notify_dirent_safe(mddev->sysfs_state); 5544 return 0; 5545 } 5546 5547 #ifndef MODULE 5548 static void autorun_array(struct mddev *mddev) 5549 { 5550 struct md_rdev *rdev; 5551 int err; 5552 5553 if (list_empty(&mddev->disks)) 5554 return; 5555 5556 printk(KERN_INFO "md: running: "); 5557 5558 rdev_for_each(rdev, mddev) { 5559 char b[BDEVNAME_SIZE]; 5560 printk("<%s>", bdevname(rdev->bdev,b)); 5561 } 5562 printk("\n"); 5563 5564 err = do_md_run(mddev); 5565 if (err) { 5566 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 5567 do_md_stop(mddev, 0, NULL); 5568 } 5569 } 5570 5571 /* 5572 * lets try to run arrays based on all disks that have arrived 5573 * until now. (those are in pending_raid_disks) 5574 * 5575 * the method: pick the first pending disk, collect all disks with 5576 * the same UUID, remove all from the pending list and put them into 5577 * the 'same_array' list. Then order this list based on superblock 5578 * update time (freshest comes first), kick out 'old' disks and 5579 * compare superblocks. If everything's fine then run it. 5580 * 5581 * If "unit" is allocated, then bump its reference count 5582 */ 5583 static void autorun_devices(int part) 5584 { 5585 struct md_rdev *rdev0, *rdev, *tmp; 5586 struct mddev *mddev; 5587 char b[BDEVNAME_SIZE]; 5588 5589 printk(KERN_INFO "md: autorun ...\n"); 5590 while (!list_empty(&pending_raid_disks)) { 5591 int unit; 5592 dev_t dev; 5593 LIST_HEAD(candidates); 5594 rdev0 = list_entry(pending_raid_disks.next, 5595 struct md_rdev, same_set); 5596 5597 printk(KERN_INFO "md: considering %s ...\n", 5598 bdevname(rdev0->bdev,b)); 5599 INIT_LIST_HEAD(&candidates); 5600 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 5601 if (super_90_load(rdev, rdev0, 0) >= 0) { 5602 printk(KERN_INFO "md: adding %s ...\n", 5603 bdevname(rdev->bdev,b)); 5604 list_move(&rdev->same_set, &candidates); 5605 } 5606 /* 5607 * now we have a set of devices, with all of them having 5608 * mostly sane superblocks. It's time to allocate the 5609 * mddev. 5610 */ 5611 if (part) { 5612 dev = MKDEV(mdp_major, 5613 rdev0->preferred_minor << MdpMinorShift); 5614 unit = MINOR(dev) >> MdpMinorShift; 5615 } else { 5616 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 5617 unit = MINOR(dev); 5618 } 5619 if (rdev0->preferred_minor != unit) { 5620 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 5621 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 5622 break; 5623 } 5624 5625 md_probe(dev, NULL, NULL); 5626 mddev = mddev_find(dev); 5627 if (!mddev || !mddev->gendisk) { 5628 if (mddev) 5629 mddev_put(mddev); 5630 printk(KERN_ERR 5631 "md: cannot allocate memory for md drive.\n"); 5632 break; 5633 } 5634 if (mddev_lock(mddev)) 5635 printk(KERN_WARNING "md: %s locked, cannot run\n", 5636 mdname(mddev)); 5637 else if (mddev->raid_disks || mddev->major_version 5638 || !list_empty(&mddev->disks)) { 5639 printk(KERN_WARNING 5640 "md: %s already running, cannot run %s\n", 5641 mdname(mddev), bdevname(rdev0->bdev,b)); 5642 mddev_unlock(mddev); 5643 } else { 5644 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 5645 mddev->persistent = 1; 5646 rdev_for_each_list(rdev, tmp, &candidates) { 5647 list_del_init(&rdev->same_set); 5648 if (bind_rdev_to_array(rdev, mddev)) 5649 export_rdev(rdev); 5650 } 5651 autorun_array(mddev); 5652 mddev_unlock(mddev); 5653 } 5654 /* on success, candidates will be empty, on error 5655 * it won't... 5656 */ 5657 rdev_for_each_list(rdev, tmp, &candidates) { 5658 list_del_init(&rdev->same_set); 5659 export_rdev(rdev); 5660 } 5661 mddev_put(mddev); 5662 } 5663 printk(KERN_INFO "md: ... autorun DONE.\n"); 5664 } 5665 #endif /* !MODULE */ 5666 5667 static int get_version(void __user *arg) 5668 { 5669 mdu_version_t ver; 5670 5671 ver.major = MD_MAJOR_VERSION; 5672 ver.minor = MD_MINOR_VERSION; 5673 ver.patchlevel = MD_PATCHLEVEL_VERSION; 5674 5675 if (copy_to_user(arg, &ver, sizeof(ver))) 5676 return -EFAULT; 5677 5678 return 0; 5679 } 5680 5681 static int get_array_info(struct mddev *mddev, void __user *arg) 5682 { 5683 mdu_array_info_t info; 5684 int nr,working,insync,failed,spare; 5685 struct md_rdev *rdev; 5686 5687 nr = working = insync = failed = spare = 0; 5688 rcu_read_lock(); 5689 rdev_for_each_rcu(rdev, mddev) { 5690 nr++; 5691 if (test_bit(Faulty, &rdev->flags)) 5692 failed++; 5693 else { 5694 working++; 5695 if (test_bit(In_sync, &rdev->flags)) 5696 insync++; 5697 else 5698 spare++; 5699 } 5700 } 5701 rcu_read_unlock(); 5702 5703 info.major_version = mddev->major_version; 5704 info.minor_version = mddev->minor_version; 5705 info.patch_version = MD_PATCHLEVEL_VERSION; 5706 info.ctime = mddev->ctime; 5707 info.level = mddev->level; 5708 info.size = mddev->dev_sectors / 2; 5709 if (info.size != mddev->dev_sectors / 2) /* overflow */ 5710 info.size = -1; 5711 info.nr_disks = nr; 5712 info.raid_disks = mddev->raid_disks; 5713 info.md_minor = mddev->md_minor; 5714 info.not_persistent= !mddev->persistent; 5715 5716 info.utime = mddev->utime; 5717 info.state = 0; 5718 if (mddev->in_sync) 5719 info.state = (1<<MD_SB_CLEAN); 5720 if (mddev->bitmap && mddev->bitmap_info.offset) 5721 info.state |= (1<<MD_SB_BITMAP_PRESENT); 5722 if (mddev_is_clustered(mddev)) 5723 info.state |= (1<<MD_SB_CLUSTERED); 5724 info.active_disks = insync; 5725 info.working_disks = working; 5726 info.failed_disks = failed; 5727 info.spare_disks = spare; 5728 5729 info.layout = mddev->layout; 5730 info.chunk_size = mddev->chunk_sectors << 9; 5731 5732 if (copy_to_user(arg, &info, sizeof(info))) 5733 return -EFAULT; 5734 5735 return 0; 5736 } 5737 5738 static int get_bitmap_file(struct mddev *mddev, void __user * arg) 5739 { 5740 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 5741 char *ptr; 5742 int err; 5743 5744 file = kzalloc(sizeof(*file), GFP_NOIO); 5745 if (!file) 5746 return -ENOMEM; 5747 5748 err = 0; 5749 spin_lock(&mddev->lock); 5750 /* bitmap enabled */ 5751 if (mddev->bitmap_info.file) { 5752 ptr = file_path(mddev->bitmap_info.file, file->pathname, 5753 sizeof(file->pathname)); 5754 if (IS_ERR(ptr)) 5755 err = PTR_ERR(ptr); 5756 else 5757 memmove(file->pathname, ptr, 5758 sizeof(file->pathname)-(ptr-file->pathname)); 5759 } 5760 spin_unlock(&mddev->lock); 5761 5762 if (err == 0 && 5763 copy_to_user(arg, file, sizeof(*file))) 5764 err = -EFAULT; 5765 5766 kfree(file); 5767 return err; 5768 } 5769 5770 static int get_disk_info(struct mddev *mddev, void __user * arg) 5771 { 5772 mdu_disk_info_t info; 5773 struct md_rdev *rdev; 5774 5775 if (copy_from_user(&info, arg, sizeof(info))) 5776 return -EFAULT; 5777 5778 rcu_read_lock(); 5779 rdev = md_find_rdev_nr_rcu(mddev, info.number); 5780 if (rdev) { 5781 info.major = MAJOR(rdev->bdev->bd_dev); 5782 info.minor = MINOR(rdev->bdev->bd_dev); 5783 info.raid_disk = rdev->raid_disk; 5784 info.state = 0; 5785 if (test_bit(Faulty, &rdev->flags)) 5786 info.state |= (1<<MD_DISK_FAULTY); 5787 else if (test_bit(In_sync, &rdev->flags)) { 5788 info.state |= (1<<MD_DISK_ACTIVE); 5789 info.state |= (1<<MD_DISK_SYNC); 5790 } 5791 if (test_bit(WriteMostly, &rdev->flags)) 5792 info.state |= (1<<MD_DISK_WRITEMOSTLY); 5793 } else { 5794 info.major = info.minor = 0; 5795 info.raid_disk = -1; 5796 info.state = (1<<MD_DISK_REMOVED); 5797 } 5798 rcu_read_unlock(); 5799 5800 if (copy_to_user(arg, &info, sizeof(info))) 5801 return -EFAULT; 5802 5803 return 0; 5804 } 5805 5806 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) 5807 { 5808 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5809 struct md_rdev *rdev; 5810 dev_t dev = MKDEV(info->major,info->minor); 5811 5812 if (mddev_is_clustered(mddev) && 5813 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) { 5814 pr_err("%s: Cannot add to clustered mddev.\n", 5815 mdname(mddev)); 5816 return -EINVAL; 5817 } 5818 5819 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 5820 return -EOVERFLOW; 5821 5822 if (!mddev->raid_disks) { 5823 int err; 5824 /* expecting a device which has a superblock */ 5825 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 5826 if (IS_ERR(rdev)) { 5827 printk(KERN_WARNING 5828 "md: md_import_device returned %ld\n", 5829 PTR_ERR(rdev)); 5830 return PTR_ERR(rdev); 5831 } 5832 if (!list_empty(&mddev->disks)) { 5833 struct md_rdev *rdev0 5834 = list_entry(mddev->disks.next, 5835 struct md_rdev, same_set); 5836 err = super_types[mddev->major_version] 5837 .load_super(rdev, rdev0, mddev->minor_version); 5838 if (err < 0) { 5839 printk(KERN_WARNING 5840 "md: %s has different UUID to %s\n", 5841 bdevname(rdev->bdev,b), 5842 bdevname(rdev0->bdev,b2)); 5843 export_rdev(rdev); 5844 return -EINVAL; 5845 } 5846 } 5847 err = bind_rdev_to_array(rdev, mddev); 5848 if (err) 5849 export_rdev(rdev); 5850 return err; 5851 } 5852 5853 /* 5854 * add_new_disk can be used once the array is assembled 5855 * to add "hot spares". They must already have a superblock 5856 * written 5857 */ 5858 if (mddev->pers) { 5859 int err; 5860 if (!mddev->pers->hot_add_disk) { 5861 printk(KERN_WARNING 5862 "%s: personality does not support diskops!\n", 5863 mdname(mddev)); 5864 return -EINVAL; 5865 } 5866 if (mddev->persistent) 5867 rdev = md_import_device(dev, mddev->major_version, 5868 mddev->minor_version); 5869 else 5870 rdev = md_import_device(dev, -1, -1); 5871 if (IS_ERR(rdev)) { 5872 printk(KERN_WARNING 5873 "md: md_import_device returned %ld\n", 5874 PTR_ERR(rdev)); 5875 return PTR_ERR(rdev); 5876 } 5877 /* set saved_raid_disk if appropriate */ 5878 if (!mddev->persistent) { 5879 if (info->state & (1<<MD_DISK_SYNC) && 5880 info->raid_disk < mddev->raid_disks) { 5881 rdev->raid_disk = info->raid_disk; 5882 set_bit(In_sync, &rdev->flags); 5883 clear_bit(Bitmap_sync, &rdev->flags); 5884 } else 5885 rdev->raid_disk = -1; 5886 rdev->saved_raid_disk = rdev->raid_disk; 5887 } else 5888 super_types[mddev->major_version]. 5889 validate_super(mddev, rdev); 5890 if ((info->state & (1<<MD_DISK_SYNC)) && 5891 rdev->raid_disk != info->raid_disk) { 5892 /* This was a hot-add request, but events doesn't 5893 * match, so reject it. 5894 */ 5895 export_rdev(rdev); 5896 return -EINVAL; 5897 } 5898 5899 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 5900 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5901 set_bit(WriteMostly, &rdev->flags); 5902 else 5903 clear_bit(WriteMostly, &rdev->flags); 5904 5905 /* 5906 * check whether the device shows up in other nodes 5907 */ 5908 if (mddev_is_clustered(mddev)) { 5909 if (info->state & (1 << MD_DISK_CANDIDATE)) { 5910 /* Through --cluster-confirm */ 5911 set_bit(Candidate, &rdev->flags); 5912 err = md_cluster_ops->new_disk_ack(mddev, true); 5913 if (err) { 5914 export_rdev(rdev); 5915 return err; 5916 } 5917 } else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) { 5918 /* --add initiated by this node */ 5919 err = md_cluster_ops->add_new_disk_start(mddev, rdev); 5920 if (err) { 5921 md_cluster_ops->add_new_disk_finish(mddev); 5922 export_rdev(rdev); 5923 return err; 5924 } 5925 } 5926 } 5927 5928 rdev->raid_disk = -1; 5929 err = bind_rdev_to_array(rdev, mddev); 5930 if (err) 5931 export_rdev(rdev); 5932 else 5933 err = add_bound_rdev(rdev); 5934 if (mddev_is_clustered(mddev) && 5935 (info->state & (1 << MD_DISK_CLUSTER_ADD))) 5936 md_cluster_ops->add_new_disk_finish(mddev); 5937 return err; 5938 } 5939 5940 /* otherwise, add_new_disk is only allowed 5941 * for major_version==0 superblocks 5942 */ 5943 if (mddev->major_version != 0) { 5944 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 5945 mdname(mddev)); 5946 return -EINVAL; 5947 } 5948 5949 if (!(info->state & (1<<MD_DISK_FAULTY))) { 5950 int err; 5951 rdev = md_import_device(dev, -1, 0); 5952 if (IS_ERR(rdev)) { 5953 printk(KERN_WARNING 5954 "md: error, md_import_device() returned %ld\n", 5955 PTR_ERR(rdev)); 5956 return PTR_ERR(rdev); 5957 } 5958 rdev->desc_nr = info->number; 5959 if (info->raid_disk < mddev->raid_disks) 5960 rdev->raid_disk = info->raid_disk; 5961 else 5962 rdev->raid_disk = -1; 5963 5964 if (rdev->raid_disk < mddev->raid_disks) 5965 if (info->state & (1<<MD_DISK_SYNC)) 5966 set_bit(In_sync, &rdev->flags); 5967 5968 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5969 set_bit(WriteMostly, &rdev->flags); 5970 5971 if (!mddev->persistent) { 5972 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 5973 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 5974 } else 5975 rdev->sb_start = calc_dev_sboffset(rdev); 5976 rdev->sectors = rdev->sb_start; 5977 5978 err = bind_rdev_to_array(rdev, mddev); 5979 if (err) { 5980 export_rdev(rdev); 5981 return err; 5982 } 5983 } 5984 5985 return 0; 5986 } 5987 5988 static int hot_remove_disk(struct mddev *mddev, dev_t dev) 5989 { 5990 char b[BDEVNAME_SIZE]; 5991 struct md_rdev *rdev; 5992 5993 rdev = find_rdev(mddev, dev); 5994 if (!rdev) 5995 return -ENXIO; 5996 5997 if (mddev_is_clustered(mddev)) 5998 md_cluster_ops->metadata_update_start(mddev); 5999 6000 clear_bit(Blocked, &rdev->flags); 6001 remove_and_add_spares(mddev, rdev); 6002 6003 if (rdev->raid_disk >= 0) 6004 goto busy; 6005 6006 if (mddev_is_clustered(mddev)) 6007 md_cluster_ops->remove_disk(mddev, rdev); 6008 6009 md_kick_rdev_from_array(rdev); 6010 md_update_sb(mddev, 1); 6011 md_new_event(mddev); 6012 6013 if (mddev_is_clustered(mddev)) 6014 md_cluster_ops->metadata_update_finish(mddev); 6015 6016 return 0; 6017 busy: 6018 if (mddev_is_clustered(mddev)) 6019 md_cluster_ops->metadata_update_cancel(mddev); 6020 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", 6021 bdevname(rdev->bdev,b), mdname(mddev)); 6022 return -EBUSY; 6023 } 6024 6025 static int hot_add_disk(struct mddev *mddev, dev_t dev) 6026 { 6027 char b[BDEVNAME_SIZE]; 6028 int err; 6029 struct md_rdev *rdev; 6030 6031 if (!mddev->pers) 6032 return -ENODEV; 6033 6034 if (mddev->major_version != 0) { 6035 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 6036 " version-0 superblocks.\n", 6037 mdname(mddev)); 6038 return -EINVAL; 6039 } 6040 if (!mddev->pers->hot_add_disk) { 6041 printk(KERN_WARNING 6042 "%s: personality does not support diskops!\n", 6043 mdname(mddev)); 6044 return -EINVAL; 6045 } 6046 6047 rdev = md_import_device(dev, -1, 0); 6048 if (IS_ERR(rdev)) { 6049 printk(KERN_WARNING 6050 "md: error, md_import_device() returned %ld\n", 6051 PTR_ERR(rdev)); 6052 return -EINVAL; 6053 } 6054 6055 if (mddev->persistent) 6056 rdev->sb_start = calc_dev_sboffset(rdev); 6057 else 6058 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 6059 6060 rdev->sectors = rdev->sb_start; 6061 6062 if (test_bit(Faulty, &rdev->flags)) { 6063 printk(KERN_WARNING 6064 "md: can not hot-add faulty %s disk to %s!\n", 6065 bdevname(rdev->bdev,b), mdname(mddev)); 6066 err = -EINVAL; 6067 goto abort_export; 6068 } 6069 6070 if (mddev_is_clustered(mddev)) 6071 md_cluster_ops->metadata_update_start(mddev); 6072 clear_bit(In_sync, &rdev->flags); 6073 rdev->desc_nr = -1; 6074 rdev->saved_raid_disk = -1; 6075 err = bind_rdev_to_array(rdev, mddev); 6076 if (err) 6077 goto abort_clustered; 6078 6079 /* 6080 * The rest should better be atomic, we can have disk failures 6081 * noticed in interrupt contexts ... 6082 */ 6083 6084 rdev->raid_disk = -1; 6085 6086 md_update_sb(mddev, 1); 6087 6088 if (mddev_is_clustered(mddev)) 6089 md_cluster_ops->metadata_update_finish(mddev); 6090 /* 6091 * Kick recovery, maybe this spare has to be added to the 6092 * array immediately. 6093 */ 6094 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6095 md_wakeup_thread(mddev->thread); 6096 md_new_event(mddev); 6097 return 0; 6098 6099 abort_clustered: 6100 if (mddev_is_clustered(mddev)) 6101 md_cluster_ops->metadata_update_cancel(mddev); 6102 abort_export: 6103 export_rdev(rdev); 6104 return err; 6105 } 6106 6107 static int set_bitmap_file(struct mddev *mddev, int fd) 6108 { 6109 int err = 0; 6110 6111 if (mddev->pers) { 6112 if (!mddev->pers->quiesce || !mddev->thread) 6113 return -EBUSY; 6114 if (mddev->recovery || mddev->sync_thread) 6115 return -EBUSY; 6116 /* we should be able to change the bitmap.. */ 6117 } 6118 6119 if (fd >= 0) { 6120 struct inode *inode; 6121 struct file *f; 6122 6123 if (mddev->bitmap || mddev->bitmap_info.file) 6124 return -EEXIST; /* cannot add when bitmap is present */ 6125 f = fget(fd); 6126 6127 if (f == NULL) { 6128 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 6129 mdname(mddev)); 6130 return -EBADF; 6131 } 6132 6133 inode = f->f_mapping->host; 6134 if (!S_ISREG(inode->i_mode)) { 6135 printk(KERN_ERR "%s: error: bitmap file must be a regular file\n", 6136 mdname(mddev)); 6137 err = -EBADF; 6138 } else if (!(f->f_mode & FMODE_WRITE)) { 6139 printk(KERN_ERR "%s: error: bitmap file must open for write\n", 6140 mdname(mddev)); 6141 err = -EBADF; 6142 } else if (atomic_read(&inode->i_writecount) != 1) { 6143 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 6144 mdname(mddev)); 6145 err = -EBUSY; 6146 } 6147 if (err) { 6148 fput(f); 6149 return err; 6150 } 6151 mddev->bitmap_info.file = f; 6152 mddev->bitmap_info.offset = 0; /* file overrides offset */ 6153 } else if (mddev->bitmap == NULL) 6154 return -ENOENT; /* cannot remove what isn't there */ 6155 err = 0; 6156 if (mddev->pers) { 6157 mddev->pers->quiesce(mddev, 1); 6158 if (fd >= 0) { 6159 struct bitmap *bitmap; 6160 6161 bitmap = bitmap_create(mddev, -1); 6162 if (!IS_ERR(bitmap)) { 6163 mddev->bitmap = bitmap; 6164 err = bitmap_load(mddev); 6165 } else 6166 err = PTR_ERR(bitmap); 6167 } 6168 if (fd < 0 || err) { 6169 bitmap_destroy(mddev); 6170 fd = -1; /* make sure to put the file */ 6171 } 6172 mddev->pers->quiesce(mddev, 0); 6173 } 6174 if (fd < 0) { 6175 struct file *f = mddev->bitmap_info.file; 6176 if (f) { 6177 spin_lock(&mddev->lock); 6178 mddev->bitmap_info.file = NULL; 6179 spin_unlock(&mddev->lock); 6180 fput(f); 6181 } 6182 } 6183 6184 return err; 6185 } 6186 6187 /* 6188 * set_array_info is used two different ways 6189 * The original usage is when creating a new array. 6190 * In this usage, raid_disks is > 0 and it together with 6191 * level, size, not_persistent,layout,chunksize determine the 6192 * shape of the array. 6193 * This will always create an array with a type-0.90.0 superblock. 6194 * The newer usage is when assembling an array. 6195 * In this case raid_disks will be 0, and the major_version field is 6196 * use to determine which style super-blocks are to be found on the devices. 6197 * The minor and patch _version numbers are also kept incase the 6198 * super_block handler wishes to interpret them. 6199 */ 6200 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) 6201 { 6202 6203 if (info->raid_disks == 0) { 6204 /* just setting version number for superblock loading */ 6205 if (info->major_version < 0 || 6206 info->major_version >= ARRAY_SIZE(super_types) || 6207 super_types[info->major_version].name == NULL) { 6208 /* maybe try to auto-load a module? */ 6209 printk(KERN_INFO 6210 "md: superblock version %d not known\n", 6211 info->major_version); 6212 return -EINVAL; 6213 } 6214 mddev->major_version = info->major_version; 6215 mddev->minor_version = info->minor_version; 6216 mddev->patch_version = info->patch_version; 6217 mddev->persistent = !info->not_persistent; 6218 /* ensure mddev_put doesn't delete this now that there 6219 * is some minimal configuration. 6220 */ 6221 mddev->ctime = get_seconds(); 6222 return 0; 6223 } 6224 mddev->major_version = MD_MAJOR_VERSION; 6225 mddev->minor_version = MD_MINOR_VERSION; 6226 mddev->patch_version = MD_PATCHLEVEL_VERSION; 6227 mddev->ctime = get_seconds(); 6228 6229 mddev->level = info->level; 6230 mddev->clevel[0] = 0; 6231 mddev->dev_sectors = 2 * (sector_t)info->size; 6232 mddev->raid_disks = info->raid_disks; 6233 /* don't set md_minor, it is determined by which /dev/md* was 6234 * openned 6235 */ 6236 if (info->state & (1<<MD_SB_CLEAN)) 6237 mddev->recovery_cp = MaxSector; 6238 else 6239 mddev->recovery_cp = 0; 6240 mddev->persistent = ! info->not_persistent; 6241 mddev->external = 0; 6242 6243 mddev->layout = info->layout; 6244 mddev->chunk_sectors = info->chunk_size >> 9; 6245 6246 mddev->max_disks = MD_SB_DISKS; 6247 6248 if (mddev->persistent) 6249 mddev->flags = 0; 6250 set_bit(MD_CHANGE_DEVS, &mddev->flags); 6251 6252 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 6253 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 6254 mddev->bitmap_info.offset = 0; 6255 6256 mddev->reshape_position = MaxSector; 6257 6258 /* 6259 * Generate a 128 bit UUID 6260 */ 6261 get_random_bytes(mddev->uuid, 16); 6262 6263 mddev->new_level = mddev->level; 6264 mddev->new_chunk_sectors = mddev->chunk_sectors; 6265 mddev->new_layout = mddev->layout; 6266 mddev->delta_disks = 0; 6267 mddev->reshape_backwards = 0; 6268 6269 return 0; 6270 } 6271 6272 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) 6273 { 6274 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); 6275 6276 if (mddev->external_size) 6277 return; 6278 6279 mddev->array_sectors = array_sectors; 6280 } 6281 EXPORT_SYMBOL(md_set_array_sectors); 6282 6283 static int update_size(struct mddev *mddev, sector_t num_sectors) 6284 { 6285 struct md_rdev *rdev; 6286 int rv; 6287 int fit = (num_sectors == 0); 6288 6289 if (mddev->pers->resize == NULL) 6290 return -EINVAL; 6291 /* The "num_sectors" is the number of sectors of each device that 6292 * is used. This can only make sense for arrays with redundancy. 6293 * linear and raid0 always use whatever space is available. We can only 6294 * consider changing this number if no resync or reconstruction is 6295 * happening, and if the new size is acceptable. It must fit before the 6296 * sb_start or, if that is <data_offset, it must fit before the size 6297 * of each device. If num_sectors is zero, we find the largest size 6298 * that fits. 6299 */ 6300 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 6301 mddev->sync_thread) 6302 return -EBUSY; 6303 if (mddev->ro) 6304 return -EROFS; 6305 6306 rdev_for_each(rdev, mddev) { 6307 sector_t avail = rdev->sectors; 6308 6309 if (fit && (num_sectors == 0 || num_sectors > avail)) 6310 num_sectors = avail; 6311 if (avail < num_sectors) 6312 return -ENOSPC; 6313 } 6314 rv = mddev->pers->resize(mddev, num_sectors); 6315 if (!rv) 6316 revalidate_disk(mddev->gendisk); 6317 return rv; 6318 } 6319 6320 static int update_raid_disks(struct mddev *mddev, int raid_disks) 6321 { 6322 int rv; 6323 struct md_rdev *rdev; 6324 /* change the number of raid disks */ 6325 if (mddev->pers->check_reshape == NULL) 6326 return -EINVAL; 6327 if (mddev->ro) 6328 return -EROFS; 6329 if (raid_disks <= 0 || 6330 (mddev->max_disks && raid_disks >= mddev->max_disks)) 6331 return -EINVAL; 6332 if (mddev->sync_thread || 6333 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 6334 mddev->reshape_position != MaxSector) 6335 return -EBUSY; 6336 6337 rdev_for_each(rdev, mddev) { 6338 if (mddev->raid_disks < raid_disks && 6339 rdev->data_offset < rdev->new_data_offset) 6340 return -EINVAL; 6341 if (mddev->raid_disks > raid_disks && 6342 rdev->data_offset > rdev->new_data_offset) 6343 return -EINVAL; 6344 } 6345 6346 mddev->delta_disks = raid_disks - mddev->raid_disks; 6347 if (mddev->delta_disks < 0) 6348 mddev->reshape_backwards = 1; 6349 else if (mddev->delta_disks > 0) 6350 mddev->reshape_backwards = 0; 6351 6352 rv = mddev->pers->check_reshape(mddev); 6353 if (rv < 0) { 6354 mddev->delta_disks = 0; 6355 mddev->reshape_backwards = 0; 6356 } 6357 return rv; 6358 } 6359 6360 /* 6361 * update_array_info is used to change the configuration of an 6362 * on-line array. 6363 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 6364 * fields in the info are checked against the array. 6365 * Any differences that cannot be handled will cause an error. 6366 * Normally, only one change can be managed at a time. 6367 */ 6368 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) 6369 { 6370 int rv = 0; 6371 int cnt = 0; 6372 int state = 0; 6373 6374 /* calculate expected state,ignoring low bits */ 6375 if (mddev->bitmap && mddev->bitmap_info.offset) 6376 state |= (1 << MD_SB_BITMAP_PRESENT); 6377 6378 if (mddev->major_version != info->major_version || 6379 mddev->minor_version != info->minor_version || 6380 /* mddev->patch_version != info->patch_version || */ 6381 mddev->ctime != info->ctime || 6382 mddev->level != info->level || 6383 /* mddev->layout != info->layout || */ 6384 mddev->persistent != !info->not_persistent || 6385 mddev->chunk_sectors != info->chunk_size >> 9 || 6386 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 6387 ((state^info->state) & 0xfffffe00) 6388 ) 6389 return -EINVAL; 6390 /* Check there is only one change */ 6391 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 6392 cnt++; 6393 if (mddev->raid_disks != info->raid_disks) 6394 cnt++; 6395 if (mddev->layout != info->layout) 6396 cnt++; 6397 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 6398 cnt++; 6399 if (cnt == 0) 6400 return 0; 6401 if (cnt > 1) 6402 return -EINVAL; 6403 6404 if (mddev->layout != info->layout) { 6405 /* Change layout 6406 * we don't need to do anything at the md level, the 6407 * personality will take care of it all. 6408 */ 6409 if (mddev->pers->check_reshape == NULL) 6410 return -EINVAL; 6411 else { 6412 mddev->new_layout = info->layout; 6413 rv = mddev->pers->check_reshape(mddev); 6414 if (rv) 6415 mddev->new_layout = mddev->layout; 6416 return rv; 6417 } 6418 } 6419 if (mddev_is_clustered(mddev)) 6420 md_cluster_ops->metadata_update_start(mddev); 6421 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 6422 rv = update_size(mddev, (sector_t)info->size * 2); 6423 6424 if (mddev->raid_disks != info->raid_disks) 6425 rv = update_raid_disks(mddev, info->raid_disks); 6426 6427 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 6428 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { 6429 rv = -EINVAL; 6430 goto err; 6431 } 6432 if (mddev->recovery || mddev->sync_thread) { 6433 rv = -EBUSY; 6434 goto err; 6435 } 6436 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 6437 struct bitmap *bitmap; 6438 /* add the bitmap */ 6439 if (mddev->bitmap) { 6440 rv = -EEXIST; 6441 goto err; 6442 } 6443 if (mddev->bitmap_info.default_offset == 0) { 6444 rv = -EINVAL; 6445 goto err; 6446 } 6447 mddev->bitmap_info.offset = 6448 mddev->bitmap_info.default_offset; 6449 mddev->bitmap_info.space = 6450 mddev->bitmap_info.default_space; 6451 mddev->pers->quiesce(mddev, 1); 6452 bitmap = bitmap_create(mddev, -1); 6453 if (!IS_ERR(bitmap)) { 6454 mddev->bitmap = bitmap; 6455 rv = bitmap_load(mddev); 6456 } else 6457 rv = PTR_ERR(bitmap); 6458 if (rv) 6459 bitmap_destroy(mddev); 6460 mddev->pers->quiesce(mddev, 0); 6461 } else { 6462 /* remove the bitmap */ 6463 if (!mddev->bitmap) { 6464 rv = -ENOENT; 6465 goto err; 6466 } 6467 if (mddev->bitmap->storage.file) { 6468 rv = -EINVAL; 6469 goto err; 6470 } 6471 mddev->pers->quiesce(mddev, 1); 6472 bitmap_destroy(mddev); 6473 mddev->pers->quiesce(mddev, 0); 6474 mddev->bitmap_info.offset = 0; 6475 } 6476 } 6477 md_update_sb(mddev, 1); 6478 if (mddev_is_clustered(mddev)) 6479 md_cluster_ops->metadata_update_finish(mddev); 6480 return rv; 6481 err: 6482 if (mddev_is_clustered(mddev)) 6483 md_cluster_ops->metadata_update_cancel(mddev); 6484 return rv; 6485 } 6486 6487 static int set_disk_faulty(struct mddev *mddev, dev_t dev) 6488 { 6489 struct md_rdev *rdev; 6490 int err = 0; 6491 6492 if (mddev->pers == NULL) 6493 return -ENODEV; 6494 6495 rcu_read_lock(); 6496 rdev = find_rdev_rcu(mddev, dev); 6497 if (!rdev) 6498 err = -ENODEV; 6499 else { 6500 md_error(mddev, rdev); 6501 if (!test_bit(Faulty, &rdev->flags)) 6502 err = -EBUSY; 6503 } 6504 rcu_read_unlock(); 6505 return err; 6506 } 6507 6508 /* 6509 * We have a problem here : there is no easy way to give a CHS 6510 * virtual geometry. We currently pretend that we have a 2 heads 6511 * 4 sectors (with a BIG number of cylinders...). This drives 6512 * dosfs just mad... ;-) 6513 */ 6514 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 6515 { 6516 struct mddev *mddev = bdev->bd_disk->private_data; 6517 6518 geo->heads = 2; 6519 geo->sectors = 4; 6520 geo->cylinders = mddev->array_sectors / 8; 6521 return 0; 6522 } 6523 6524 static inline bool md_ioctl_valid(unsigned int cmd) 6525 { 6526 switch (cmd) { 6527 case ADD_NEW_DISK: 6528 case BLKROSET: 6529 case GET_ARRAY_INFO: 6530 case GET_BITMAP_FILE: 6531 case GET_DISK_INFO: 6532 case HOT_ADD_DISK: 6533 case HOT_REMOVE_DISK: 6534 case RAID_AUTORUN: 6535 case RAID_VERSION: 6536 case RESTART_ARRAY_RW: 6537 case RUN_ARRAY: 6538 case SET_ARRAY_INFO: 6539 case SET_BITMAP_FILE: 6540 case SET_DISK_FAULTY: 6541 case STOP_ARRAY: 6542 case STOP_ARRAY_RO: 6543 case CLUSTERED_DISK_NACK: 6544 return true; 6545 default: 6546 return false; 6547 } 6548 } 6549 6550 static int md_ioctl(struct block_device *bdev, fmode_t mode, 6551 unsigned int cmd, unsigned long arg) 6552 { 6553 int err = 0; 6554 void __user *argp = (void __user *)arg; 6555 struct mddev *mddev = NULL; 6556 int ro; 6557 6558 if (!md_ioctl_valid(cmd)) 6559 return -ENOTTY; 6560 6561 switch (cmd) { 6562 case RAID_VERSION: 6563 case GET_ARRAY_INFO: 6564 case GET_DISK_INFO: 6565 break; 6566 default: 6567 if (!capable(CAP_SYS_ADMIN)) 6568 return -EACCES; 6569 } 6570 6571 /* 6572 * Commands dealing with the RAID driver but not any 6573 * particular array: 6574 */ 6575 switch (cmd) { 6576 case RAID_VERSION: 6577 err = get_version(argp); 6578 goto out; 6579 6580 #ifndef MODULE 6581 case RAID_AUTORUN: 6582 err = 0; 6583 autostart_arrays(arg); 6584 goto out; 6585 #endif 6586 default:; 6587 } 6588 6589 /* 6590 * Commands creating/starting a new array: 6591 */ 6592 6593 mddev = bdev->bd_disk->private_data; 6594 6595 if (!mddev) { 6596 BUG(); 6597 goto out; 6598 } 6599 6600 /* Some actions do not requires the mutex */ 6601 switch (cmd) { 6602 case GET_ARRAY_INFO: 6603 if (!mddev->raid_disks && !mddev->external) 6604 err = -ENODEV; 6605 else 6606 err = get_array_info(mddev, argp); 6607 goto out; 6608 6609 case GET_DISK_INFO: 6610 if (!mddev->raid_disks && !mddev->external) 6611 err = -ENODEV; 6612 else 6613 err = get_disk_info(mddev, argp); 6614 goto out; 6615 6616 case SET_DISK_FAULTY: 6617 err = set_disk_faulty(mddev, new_decode_dev(arg)); 6618 goto out; 6619 6620 case GET_BITMAP_FILE: 6621 err = get_bitmap_file(mddev, argp); 6622 goto out; 6623 6624 } 6625 6626 if (cmd == ADD_NEW_DISK) 6627 /* need to ensure md_delayed_delete() has completed */ 6628 flush_workqueue(md_misc_wq); 6629 6630 if (cmd == HOT_REMOVE_DISK) 6631 /* need to ensure recovery thread has run */ 6632 wait_event_interruptible_timeout(mddev->sb_wait, 6633 !test_bit(MD_RECOVERY_NEEDED, 6634 &mddev->flags), 6635 msecs_to_jiffies(5000)); 6636 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { 6637 /* Need to flush page cache, and ensure no-one else opens 6638 * and writes 6639 */ 6640 mutex_lock(&mddev->open_mutex); 6641 if (mddev->pers && atomic_read(&mddev->openers) > 1) { 6642 mutex_unlock(&mddev->open_mutex); 6643 err = -EBUSY; 6644 goto out; 6645 } 6646 set_bit(MD_STILL_CLOSED, &mddev->flags); 6647 mutex_unlock(&mddev->open_mutex); 6648 sync_blockdev(bdev); 6649 } 6650 err = mddev_lock(mddev); 6651 if (err) { 6652 printk(KERN_INFO 6653 "md: ioctl lock interrupted, reason %d, cmd %d\n", 6654 err, cmd); 6655 goto out; 6656 } 6657 6658 if (cmd == SET_ARRAY_INFO) { 6659 mdu_array_info_t info; 6660 if (!arg) 6661 memset(&info, 0, sizeof(info)); 6662 else if (copy_from_user(&info, argp, sizeof(info))) { 6663 err = -EFAULT; 6664 goto unlock; 6665 } 6666 if (mddev->pers) { 6667 err = update_array_info(mddev, &info); 6668 if (err) { 6669 printk(KERN_WARNING "md: couldn't update" 6670 " array info. %d\n", err); 6671 goto unlock; 6672 } 6673 goto unlock; 6674 } 6675 if (!list_empty(&mddev->disks)) { 6676 printk(KERN_WARNING 6677 "md: array %s already has disks!\n", 6678 mdname(mddev)); 6679 err = -EBUSY; 6680 goto unlock; 6681 } 6682 if (mddev->raid_disks) { 6683 printk(KERN_WARNING 6684 "md: array %s already initialised!\n", 6685 mdname(mddev)); 6686 err = -EBUSY; 6687 goto unlock; 6688 } 6689 err = set_array_info(mddev, &info); 6690 if (err) { 6691 printk(KERN_WARNING "md: couldn't set" 6692 " array info. %d\n", err); 6693 goto unlock; 6694 } 6695 goto unlock; 6696 } 6697 6698 /* 6699 * Commands querying/configuring an existing array: 6700 */ 6701 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 6702 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 6703 if ((!mddev->raid_disks && !mddev->external) 6704 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 6705 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 6706 && cmd != GET_BITMAP_FILE) { 6707 err = -ENODEV; 6708 goto unlock; 6709 } 6710 6711 /* 6712 * Commands even a read-only array can execute: 6713 */ 6714 switch (cmd) { 6715 case RESTART_ARRAY_RW: 6716 err = restart_array(mddev); 6717 goto unlock; 6718 6719 case STOP_ARRAY: 6720 err = do_md_stop(mddev, 0, bdev); 6721 goto unlock; 6722 6723 case STOP_ARRAY_RO: 6724 err = md_set_readonly(mddev, bdev); 6725 goto unlock; 6726 6727 case HOT_REMOVE_DISK: 6728 err = hot_remove_disk(mddev, new_decode_dev(arg)); 6729 goto unlock; 6730 6731 case ADD_NEW_DISK: 6732 /* We can support ADD_NEW_DISK on read-only arrays 6733 * on if we are re-adding a preexisting device. 6734 * So require mddev->pers and MD_DISK_SYNC. 6735 */ 6736 if (mddev->pers) { 6737 mdu_disk_info_t info; 6738 if (copy_from_user(&info, argp, sizeof(info))) 6739 err = -EFAULT; 6740 else if (!(info.state & (1<<MD_DISK_SYNC))) 6741 /* Need to clear read-only for this */ 6742 break; 6743 else 6744 err = add_new_disk(mddev, &info); 6745 goto unlock; 6746 } 6747 break; 6748 6749 case BLKROSET: 6750 if (get_user(ro, (int __user *)(arg))) { 6751 err = -EFAULT; 6752 goto unlock; 6753 } 6754 err = -EINVAL; 6755 6756 /* if the bdev is going readonly the value of mddev->ro 6757 * does not matter, no writes are coming 6758 */ 6759 if (ro) 6760 goto unlock; 6761 6762 /* are we are already prepared for writes? */ 6763 if (mddev->ro != 1) 6764 goto unlock; 6765 6766 /* transitioning to readauto need only happen for 6767 * arrays that call md_write_start 6768 */ 6769 if (mddev->pers) { 6770 err = restart_array(mddev); 6771 if (err == 0) { 6772 mddev->ro = 2; 6773 set_disk_ro(mddev->gendisk, 0); 6774 } 6775 } 6776 goto unlock; 6777 } 6778 6779 /* 6780 * The remaining ioctls are changing the state of the 6781 * superblock, so we do not allow them on read-only arrays. 6782 */ 6783 if (mddev->ro && mddev->pers) { 6784 if (mddev->ro == 2) { 6785 mddev->ro = 0; 6786 sysfs_notify_dirent_safe(mddev->sysfs_state); 6787 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6788 /* mddev_unlock will wake thread */ 6789 /* If a device failed while we were read-only, we 6790 * need to make sure the metadata is updated now. 6791 */ 6792 if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) { 6793 mddev_unlock(mddev); 6794 wait_event(mddev->sb_wait, 6795 !test_bit(MD_CHANGE_DEVS, &mddev->flags) && 6796 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 6797 mddev_lock_nointr(mddev); 6798 } 6799 } else { 6800 err = -EROFS; 6801 goto unlock; 6802 } 6803 } 6804 6805 switch (cmd) { 6806 case ADD_NEW_DISK: 6807 { 6808 mdu_disk_info_t info; 6809 if (copy_from_user(&info, argp, sizeof(info))) 6810 err = -EFAULT; 6811 else 6812 err = add_new_disk(mddev, &info); 6813 goto unlock; 6814 } 6815 6816 case CLUSTERED_DISK_NACK: 6817 if (mddev_is_clustered(mddev)) 6818 md_cluster_ops->new_disk_ack(mddev, false); 6819 else 6820 err = -EINVAL; 6821 goto unlock; 6822 6823 case HOT_ADD_DISK: 6824 err = hot_add_disk(mddev, new_decode_dev(arg)); 6825 goto unlock; 6826 6827 case RUN_ARRAY: 6828 err = do_md_run(mddev); 6829 goto unlock; 6830 6831 case SET_BITMAP_FILE: 6832 err = set_bitmap_file(mddev, (int)arg); 6833 goto unlock; 6834 6835 default: 6836 err = -EINVAL; 6837 goto unlock; 6838 } 6839 6840 unlock: 6841 if (mddev->hold_active == UNTIL_IOCTL && 6842 err != -EINVAL) 6843 mddev->hold_active = 0; 6844 mddev_unlock(mddev); 6845 out: 6846 return err; 6847 } 6848 #ifdef CONFIG_COMPAT 6849 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, 6850 unsigned int cmd, unsigned long arg) 6851 { 6852 switch (cmd) { 6853 case HOT_REMOVE_DISK: 6854 case HOT_ADD_DISK: 6855 case SET_DISK_FAULTY: 6856 case SET_BITMAP_FILE: 6857 /* These take in integer arg, do not convert */ 6858 break; 6859 default: 6860 arg = (unsigned long)compat_ptr(arg); 6861 break; 6862 } 6863 6864 return md_ioctl(bdev, mode, cmd, arg); 6865 } 6866 #endif /* CONFIG_COMPAT */ 6867 6868 static int md_open(struct block_device *bdev, fmode_t mode) 6869 { 6870 /* 6871 * Succeed if we can lock the mddev, which confirms that 6872 * it isn't being stopped right now. 6873 */ 6874 struct mddev *mddev = mddev_find(bdev->bd_dev); 6875 int err; 6876 6877 if (!mddev) 6878 return -ENODEV; 6879 6880 if (mddev->gendisk != bdev->bd_disk) { 6881 /* we are racing with mddev_put which is discarding this 6882 * bd_disk. 6883 */ 6884 mddev_put(mddev); 6885 /* Wait until bdev->bd_disk is definitely gone */ 6886 flush_workqueue(md_misc_wq); 6887 /* Then retry the open from the top */ 6888 return -ERESTARTSYS; 6889 } 6890 BUG_ON(mddev != bdev->bd_disk->private_data); 6891 6892 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) 6893 goto out; 6894 6895 err = 0; 6896 atomic_inc(&mddev->openers); 6897 clear_bit(MD_STILL_CLOSED, &mddev->flags); 6898 mutex_unlock(&mddev->open_mutex); 6899 6900 check_disk_change(bdev); 6901 out: 6902 return err; 6903 } 6904 6905 static void md_release(struct gendisk *disk, fmode_t mode) 6906 { 6907 struct mddev *mddev = disk->private_data; 6908 6909 BUG_ON(!mddev); 6910 atomic_dec(&mddev->openers); 6911 mddev_put(mddev); 6912 } 6913 6914 static int md_media_changed(struct gendisk *disk) 6915 { 6916 struct mddev *mddev = disk->private_data; 6917 6918 return mddev->changed; 6919 } 6920 6921 static int md_revalidate(struct gendisk *disk) 6922 { 6923 struct mddev *mddev = disk->private_data; 6924 6925 mddev->changed = 0; 6926 return 0; 6927 } 6928 static const struct block_device_operations md_fops = 6929 { 6930 .owner = THIS_MODULE, 6931 .open = md_open, 6932 .release = md_release, 6933 .ioctl = md_ioctl, 6934 #ifdef CONFIG_COMPAT 6935 .compat_ioctl = md_compat_ioctl, 6936 #endif 6937 .getgeo = md_getgeo, 6938 .media_changed = md_media_changed, 6939 .revalidate_disk= md_revalidate, 6940 }; 6941 6942 static int md_thread(void *arg) 6943 { 6944 struct md_thread *thread = arg; 6945 6946 /* 6947 * md_thread is a 'system-thread', it's priority should be very 6948 * high. We avoid resource deadlocks individually in each 6949 * raid personality. (RAID5 does preallocation) We also use RR and 6950 * the very same RT priority as kswapd, thus we will never get 6951 * into a priority inversion deadlock. 6952 * 6953 * we definitely have to have equal or higher priority than 6954 * bdflush, otherwise bdflush will deadlock if there are too 6955 * many dirty RAID5 blocks. 6956 */ 6957 6958 allow_signal(SIGKILL); 6959 while (!kthread_should_stop()) { 6960 6961 /* We need to wait INTERRUPTIBLE so that 6962 * we don't add to the load-average. 6963 * That means we need to be sure no signals are 6964 * pending 6965 */ 6966 if (signal_pending(current)) 6967 flush_signals(current); 6968 6969 wait_event_interruptible_timeout 6970 (thread->wqueue, 6971 test_bit(THREAD_WAKEUP, &thread->flags) 6972 || kthread_should_stop(), 6973 thread->timeout); 6974 6975 clear_bit(THREAD_WAKEUP, &thread->flags); 6976 if (!kthread_should_stop()) 6977 thread->run(thread); 6978 } 6979 6980 return 0; 6981 } 6982 6983 void md_wakeup_thread(struct md_thread *thread) 6984 { 6985 if (thread) { 6986 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); 6987 set_bit(THREAD_WAKEUP, &thread->flags); 6988 wake_up(&thread->wqueue); 6989 } 6990 } 6991 EXPORT_SYMBOL(md_wakeup_thread); 6992 6993 struct md_thread *md_register_thread(void (*run) (struct md_thread *), 6994 struct mddev *mddev, const char *name) 6995 { 6996 struct md_thread *thread; 6997 6998 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL); 6999 if (!thread) 7000 return NULL; 7001 7002 init_waitqueue_head(&thread->wqueue); 7003 7004 thread->run = run; 7005 thread->mddev = mddev; 7006 thread->timeout = MAX_SCHEDULE_TIMEOUT; 7007 thread->tsk = kthread_run(md_thread, thread, 7008 "%s_%s", 7009 mdname(thread->mddev), 7010 name); 7011 if (IS_ERR(thread->tsk)) { 7012 kfree(thread); 7013 return NULL; 7014 } 7015 return thread; 7016 } 7017 EXPORT_SYMBOL(md_register_thread); 7018 7019 void md_unregister_thread(struct md_thread **threadp) 7020 { 7021 struct md_thread *thread = *threadp; 7022 if (!thread) 7023 return; 7024 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 7025 /* Locking ensures that mddev_unlock does not wake_up a 7026 * non-existent thread 7027 */ 7028 spin_lock(&pers_lock); 7029 *threadp = NULL; 7030 spin_unlock(&pers_lock); 7031 7032 kthread_stop(thread->tsk); 7033 kfree(thread); 7034 } 7035 EXPORT_SYMBOL(md_unregister_thread); 7036 7037 void md_error(struct mddev *mddev, struct md_rdev *rdev) 7038 { 7039 if (!rdev || test_bit(Faulty, &rdev->flags)) 7040 return; 7041 7042 if (!mddev->pers || !mddev->pers->error_handler) 7043 return; 7044 mddev->pers->error_handler(mddev,rdev); 7045 if (mddev->degraded) 7046 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7047 sysfs_notify_dirent_safe(rdev->sysfs_state); 7048 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7049 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7050 md_wakeup_thread(mddev->thread); 7051 if (mddev->event_work.func) 7052 queue_work(md_misc_wq, &mddev->event_work); 7053 md_new_event_inintr(mddev); 7054 } 7055 EXPORT_SYMBOL(md_error); 7056 7057 /* seq_file implementation /proc/mdstat */ 7058 7059 static void status_unused(struct seq_file *seq) 7060 { 7061 int i = 0; 7062 struct md_rdev *rdev; 7063 7064 seq_printf(seq, "unused devices: "); 7065 7066 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 7067 char b[BDEVNAME_SIZE]; 7068 i++; 7069 seq_printf(seq, "%s ", 7070 bdevname(rdev->bdev,b)); 7071 } 7072 if (!i) 7073 seq_printf(seq, "<none>"); 7074 7075 seq_printf(seq, "\n"); 7076 } 7077 7078 static int status_resync(struct seq_file *seq, struct mddev *mddev) 7079 { 7080 sector_t max_sectors, resync, res; 7081 unsigned long dt, db; 7082 sector_t rt; 7083 int scale; 7084 unsigned int per_milli; 7085 7086 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 7087 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7088 max_sectors = mddev->resync_max_sectors; 7089 else 7090 max_sectors = mddev->dev_sectors; 7091 7092 resync = mddev->curr_resync; 7093 if (resync <= 3) { 7094 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7095 /* Still cleaning up */ 7096 resync = max_sectors; 7097 } else 7098 resync -= atomic_read(&mddev->recovery_active); 7099 7100 if (resync == 0) { 7101 if (mddev->recovery_cp < MaxSector) { 7102 seq_printf(seq, "\tresync=PENDING"); 7103 return 1; 7104 } 7105 return 0; 7106 } 7107 if (resync < 3) { 7108 seq_printf(seq, "\tresync=DELAYED"); 7109 return 1; 7110 } 7111 7112 WARN_ON(max_sectors == 0); 7113 /* Pick 'scale' such that (resync>>scale)*1000 will fit 7114 * in a sector_t, and (max_sectors>>scale) will fit in a 7115 * u32, as those are the requirements for sector_div. 7116 * Thus 'scale' must be at least 10 7117 */ 7118 scale = 10; 7119 if (sizeof(sector_t) > sizeof(unsigned long)) { 7120 while ( max_sectors/2 > (1ULL<<(scale+32))) 7121 scale++; 7122 } 7123 res = (resync>>scale)*1000; 7124 sector_div(res, (u32)((max_sectors>>scale)+1)); 7125 7126 per_milli = res; 7127 { 7128 int i, x = per_milli/50, y = 20-x; 7129 seq_printf(seq, "["); 7130 for (i = 0; i < x; i++) 7131 seq_printf(seq, "="); 7132 seq_printf(seq, ">"); 7133 for (i = 0; i < y; i++) 7134 seq_printf(seq, "."); 7135 seq_printf(seq, "] "); 7136 } 7137 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 7138 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 7139 "reshape" : 7140 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 7141 "check" : 7142 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 7143 "resync" : "recovery"))), 7144 per_milli/10, per_milli % 10, 7145 (unsigned long long) resync/2, 7146 (unsigned long long) max_sectors/2); 7147 7148 /* 7149 * dt: time from mark until now 7150 * db: blocks written from mark until now 7151 * rt: remaining time 7152 * 7153 * rt is a sector_t, so could be 32bit or 64bit. 7154 * So we divide before multiply in case it is 32bit and close 7155 * to the limit. 7156 * We scale the divisor (db) by 32 to avoid losing precision 7157 * near the end of resync when the number of remaining sectors 7158 * is close to 'db'. 7159 * We then divide rt by 32 after multiplying by db to compensate. 7160 * The '+1' avoids division by zero if db is very small. 7161 */ 7162 dt = ((jiffies - mddev->resync_mark) / HZ); 7163 if (!dt) dt++; 7164 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) 7165 - mddev->resync_mark_cnt; 7166 7167 rt = max_sectors - resync; /* number of remaining sectors */ 7168 sector_div(rt, db/32+1); 7169 rt *= dt; 7170 rt >>= 5; 7171 7172 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 7173 ((unsigned long)rt % 60)/6); 7174 7175 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 7176 return 1; 7177 } 7178 7179 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 7180 { 7181 struct list_head *tmp; 7182 loff_t l = *pos; 7183 struct mddev *mddev; 7184 7185 if (l >= 0x10000) 7186 return NULL; 7187 if (!l--) 7188 /* header */ 7189 return (void*)1; 7190 7191 spin_lock(&all_mddevs_lock); 7192 list_for_each(tmp,&all_mddevs) 7193 if (!l--) { 7194 mddev = list_entry(tmp, struct mddev, all_mddevs); 7195 mddev_get(mddev); 7196 spin_unlock(&all_mddevs_lock); 7197 return mddev; 7198 } 7199 spin_unlock(&all_mddevs_lock); 7200 if (!l--) 7201 return (void*)2;/* tail */ 7202 return NULL; 7203 } 7204 7205 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 7206 { 7207 struct list_head *tmp; 7208 struct mddev *next_mddev, *mddev = v; 7209 7210 ++*pos; 7211 if (v == (void*)2) 7212 return NULL; 7213 7214 spin_lock(&all_mddevs_lock); 7215 if (v == (void*)1) 7216 tmp = all_mddevs.next; 7217 else 7218 tmp = mddev->all_mddevs.next; 7219 if (tmp != &all_mddevs) 7220 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs)); 7221 else { 7222 next_mddev = (void*)2; 7223 *pos = 0x10000; 7224 } 7225 spin_unlock(&all_mddevs_lock); 7226 7227 if (v != (void*)1) 7228 mddev_put(mddev); 7229 return next_mddev; 7230 7231 } 7232 7233 static void md_seq_stop(struct seq_file *seq, void *v) 7234 { 7235 struct mddev *mddev = v; 7236 7237 if (mddev && v != (void*)1 && v != (void*)2) 7238 mddev_put(mddev); 7239 } 7240 7241 static int md_seq_show(struct seq_file *seq, void *v) 7242 { 7243 struct mddev *mddev = v; 7244 sector_t sectors; 7245 struct md_rdev *rdev; 7246 7247 if (v == (void*)1) { 7248 struct md_personality *pers; 7249 seq_printf(seq, "Personalities : "); 7250 spin_lock(&pers_lock); 7251 list_for_each_entry(pers, &pers_list, list) 7252 seq_printf(seq, "[%s] ", pers->name); 7253 7254 spin_unlock(&pers_lock); 7255 seq_printf(seq, "\n"); 7256 seq->poll_event = atomic_read(&md_event_count); 7257 return 0; 7258 } 7259 if (v == (void*)2) { 7260 status_unused(seq); 7261 return 0; 7262 } 7263 7264 spin_lock(&mddev->lock); 7265 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 7266 seq_printf(seq, "%s : %sactive", mdname(mddev), 7267 mddev->pers ? "" : "in"); 7268 if (mddev->pers) { 7269 if (mddev->ro==1) 7270 seq_printf(seq, " (read-only)"); 7271 if (mddev->ro==2) 7272 seq_printf(seq, " (auto-read-only)"); 7273 seq_printf(seq, " %s", mddev->pers->name); 7274 } 7275 7276 sectors = 0; 7277 rcu_read_lock(); 7278 rdev_for_each_rcu(rdev, mddev) { 7279 char b[BDEVNAME_SIZE]; 7280 seq_printf(seq, " %s[%d]", 7281 bdevname(rdev->bdev,b), rdev->desc_nr); 7282 if (test_bit(WriteMostly, &rdev->flags)) 7283 seq_printf(seq, "(W)"); 7284 if (test_bit(Faulty, &rdev->flags)) { 7285 seq_printf(seq, "(F)"); 7286 continue; 7287 } 7288 if (rdev->raid_disk < 0) 7289 seq_printf(seq, "(S)"); /* spare */ 7290 if (test_bit(Replacement, &rdev->flags)) 7291 seq_printf(seq, "(R)"); 7292 sectors += rdev->sectors; 7293 } 7294 rcu_read_unlock(); 7295 7296 if (!list_empty(&mddev->disks)) { 7297 if (mddev->pers) 7298 seq_printf(seq, "\n %llu blocks", 7299 (unsigned long long) 7300 mddev->array_sectors / 2); 7301 else 7302 seq_printf(seq, "\n %llu blocks", 7303 (unsigned long long)sectors / 2); 7304 } 7305 if (mddev->persistent) { 7306 if (mddev->major_version != 0 || 7307 mddev->minor_version != 90) { 7308 seq_printf(seq," super %d.%d", 7309 mddev->major_version, 7310 mddev->minor_version); 7311 } 7312 } else if (mddev->external) 7313 seq_printf(seq, " super external:%s", 7314 mddev->metadata_type); 7315 else 7316 seq_printf(seq, " super non-persistent"); 7317 7318 if (mddev->pers) { 7319 mddev->pers->status(seq, mddev); 7320 seq_printf(seq, "\n "); 7321 if (mddev->pers->sync_request) { 7322 if (status_resync(seq, mddev)) 7323 seq_printf(seq, "\n "); 7324 } 7325 } else 7326 seq_printf(seq, "\n "); 7327 7328 bitmap_status(seq, mddev->bitmap); 7329 7330 seq_printf(seq, "\n"); 7331 } 7332 spin_unlock(&mddev->lock); 7333 7334 return 0; 7335 } 7336 7337 static const struct seq_operations md_seq_ops = { 7338 .start = md_seq_start, 7339 .next = md_seq_next, 7340 .stop = md_seq_stop, 7341 .show = md_seq_show, 7342 }; 7343 7344 static int md_seq_open(struct inode *inode, struct file *file) 7345 { 7346 struct seq_file *seq; 7347 int error; 7348 7349 error = seq_open(file, &md_seq_ops); 7350 if (error) 7351 return error; 7352 7353 seq = file->private_data; 7354 seq->poll_event = atomic_read(&md_event_count); 7355 return error; 7356 } 7357 7358 static int md_unloading; 7359 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 7360 { 7361 struct seq_file *seq = filp->private_data; 7362 int mask; 7363 7364 if (md_unloading) 7365 return POLLIN|POLLRDNORM|POLLERR|POLLPRI; 7366 poll_wait(filp, &md_event_waiters, wait); 7367 7368 /* always allow read */ 7369 mask = POLLIN | POLLRDNORM; 7370 7371 if (seq->poll_event != atomic_read(&md_event_count)) 7372 mask |= POLLERR | POLLPRI; 7373 return mask; 7374 } 7375 7376 static const struct file_operations md_seq_fops = { 7377 .owner = THIS_MODULE, 7378 .open = md_seq_open, 7379 .read = seq_read, 7380 .llseek = seq_lseek, 7381 .release = seq_release_private, 7382 .poll = mdstat_poll, 7383 }; 7384 7385 int register_md_personality(struct md_personality *p) 7386 { 7387 printk(KERN_INFO "md: %s personality registered for level %d\n", 7388 p->name, p->level); 7389 spin_lock(&pers_lock); 7390 list_add_tail(&p->list, &pers_list); 7391 spin_unlock(&pers_lock); 7392 return 0; 7393 } 7394 EXPORT_SYMBOL(register_md_personality); 7395 7396 int unregister_md_personality(struct md_personality *p) 7397 { 7398 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 7399 spin_lock(&pers_lock); 7400 list_del_init(&p->list); 7401 spin_unlock(&pers_lock); 7402 return 0; 7403 } 7404 EXPORT_SYMBOL(unregister_md_personality); 7405 7406 int register_md_cluster_operations(struct md_cluster_operations *ops, 7407 struct module *module) 7408 { 7409 int ret = 0; 7410 spin_lock(&pers_lock); 7411 if (md_cluster_ops != NULL) 7412 ret = -EALREADY; 7413 else { 7414 md_cluster_ops = ops; 7415 md_cluster_mod = module; 7416 } 7417 spin_unlock(&pers_lock); 7418 return ret; 7419 } 7420 EXPORT_SYMBOL(register_md_cluster_operations); 7421 7422 int unregister_md_cluster_operations(void) 7423 { 7424 spin_lock(&pers_lock); 7425 md_cluster_ops = NULL; 7426 spin_unlock(&pers_lock); 7427 return 0; 7428 } 7429 EXPORT_SYMBOL(unregister_md_cluster_operations); 7430 7431 int md_setup_cluster(struct mddev *mddev, int nodes) 7432 { 7433 int err; 7434 7435 err = request_module("md-cluster"); 7436 if (err) { 7437 pr_err("md-cluster module not found.\n"); 7438 return -ENOENT; 7439 } 7440 7441 spin_lock(&pers_lock); 7442 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) { 7443 spin_unlock(&pers_lock); 7444 return -ENOENT; 7445 } 7446 spin_unlock(&pers_lock); 7447 7448 return md_cluster_ops->join(mddev, nodes); 7449 } 7450 7451 void md_cluster_stop(struct mddev *mddev) 7452 { 7453 if (!md_cluster_ops) 7454 return; 7455 md_cluster_ops->leave(mddev); 7456 module_put(md_cluster_mod); 7457 } 7458 7459 static int is_mddev_idle(struct mddev *mddev, int init) 7460 { 7461 struct md_rdev *rdev; 7462 int idle; 7463 int curr_events; 7464 7465 idle = 1; 7466 rcu_read_lock(); 7467 rdev_for_each_rcu(rdev, mddev) { 7468 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 7469 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + 7470 (int)part_stat_read(&disk->part0, sectors[1]) - 7471 atomic_read(&disk->sync_io); 7472 /* sync IO will cause sync_io to increase before the disk_stats 7473 * as sync_io is counted when a request starts, and 7474 * disk_stats is counted when it completes. 7475 * So resync activity will cause curr_events to be smaller than 7476 * when there was no such activity. 7477 * non-sync IO will cause disk_stat to increase without 7478 * increasing sync_io so curr_events will (eventually) 7479 * be larger than it was before. Once it becomes 7480 * substantially larger, the test below will cause 7481 * the array to appear non-idle, and resync will slow 7482 * down. 7483 * If there is a lot of outstanding resync activity when 7484 * we set last_event to curr_events, then all that activity 7485 * completing might cause the array to appear non-idle 7486 * and resync will be slowed down even though there might 7487 * not have been non-resync activity. This will only 7488 * happen once though. 'last_events' will soon reflect 7489 * the state where there is little or no outstanding 7490 * resync requests, and further resync activity will 7491 * always make curr_events less than last_events. 7492 * 7493 */ 7494 if (init || curr_events - rdev->last_events > 64) { 7495 rdev->last_events = curr_events; 7496 idle = 0; 7497 } 7498 } 7499 rcu_read_unlock(); 7500 return idle; 7501 } 7502 7503 void md_done_sync(struct mddev *mddev, int blocks, int ok) 7504 { 7505 /* another "blocks" (512byte) blocks have been synced */ 7506 atomic_sub(blocks, &mddev->recovery_active); 7507 wake_up(&mddev->recovery_wait); 7508 if (!ok) { 7509 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7510 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); 7511 md_wakeup_thread(mddev->thread); 7512 // stop recovery, signal do_sync .... 7513 } 7514 } 7515 EXPORT_SYMBOL(md_done_sync); 7516 7517 /* md_write_start(mddev, bi) 7518 * If we need to update some array metadata (e.g. 'active' flag 7519 * in superblock) before writing, schedule a superblock update 7520 * and wait for it to complete. 7521 */ 7522 void md_write_start(struct mddev *mddev, struct bio *bi) 7523 { 7524 int did_change = 0; 7525 if (bio_data_dir(bi) != WRITE) 7526 return; 7527 7528 BUG_ON(mddev->ro == 1); 7529 if (mddev->ro == 2) { 7530 /* need to switch to read/write */ 7531 mddev->ro = 0; 7532 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7533 md_wakeup_thread(mddev->thread); 7534 md_wakeup_thread(mddev->sync_thread); 7535 did_change = 1; 7536 } 7537 atomic_inc(&mddev->writes_pending); 7538 if (mddev->safemode == 1) 7539 mddev->safemode = 0; 7540 if (mddev->in_sync) { 7541 spin_lock(&mddev->lock); 7542 if (mddev->in_sync) { 7543 mddev->in_sync = 0; 7544 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7545 set_bit(MD_CHANGE_PENDING, &mddev->flags); 7546 md_wakeup_thread(mddev->thread); 7547 did_change = 1; 7548 } 7549 spin_unlock(&mddev->lock); 7550 } 7551 if (did_change) 7552 sysfs_notify_dirent_safe(mddev->sysfs_state); 7553 wait_event(mddev->sb_wait, 7554 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 7555 } 7556 EXPORT_SYMBOL(md_write_start); 7557 7558 void md_write_end(struct mddev *mddev) 7559 { 7560 if (atomic_dec_and_test(&mddev->writes_pending)) { 7561 if (mddev->safemode == 2) 7562 md_wakeup_thread(mddev->thread); 7563 else if (mddev->safemode_delay) 7564 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 7565 } 7566 } 7567 EXPORT_SYMBOL(md_write_end); 7568 7569 /* md_allow_write(mddev) 7570 * Calling this ensures that the array is marked 'active' so that writes 7571 * may proceed without blocking. It is important to call this before 7572 * attempting a GFP_KERNEL allocation while holding the mddev lock. 7573 * Must be called with mddev_lock held. 7574 * 7575 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock 7576 * is dropped, so return -EAGAIN after notifying userspace. 7577 */ 7578 int md_allow_write(struct mddev *mddev) 7579 { 7580 if (!mddev->pers) 7581 return 0; 7582 if (mddev->ro) 7583 return 0; 7584 if (!mddev->pers->sync_request) 7585 return 0; 7586 7587 spin_lock(&mddev->lock); 7588 if (mddev->in_sync) { 7589 mddev->in_sync = 0; 7590 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7591 set_bit(MD_CHANGE_PENDING, &mddev->flags); 7592 if (mddev->safemode_delay && 7593 mddev->safemode == 0) 7594 mddev->safemode = 1; 7595 spin_unlock(&mddev->lock); 7596 if (mddev_is_clustered(mddev)) 7597 md_cluster_ops->metadata_update_start(mddev); 7598 md_update_sb(mddev, 0); 7599 if (mddev_is_clustered(mddev)) 7600 md_cluster_ops->metadata_update_finish(mddev); 7601 sysfs_notify_dirent_safe(mddev->sysfs_state); 7602 } else 7603 spin_unlock(&mddev->lock); 7604 7605 if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 7606 return -EAGAIN; 7607 else 7608 return 0; 7609 } 7610 EXPORT_SYMBOL_GPL(md_allow_write); 7611 7612 #define SYNC_MARKS 10 7613 #define SYNC_MARK_STEP (3*HZ) 7614 #define UPDATE_FREQUENCY (5*60*HZ) 7615 void md_do_sync(struct md_thread *thread) 7616 { 7617 struct mddev *mddev = thread->mddev; 7618 struct mddev *mddev2; 7619 unsigned int currspeed = 0, 7620 window; 7621 sector_t max_sectors,j, io_sectors, recovery_done; 7622 unsigned long mark[SYNC_MARKS]; 7623 unsigned long update_time; 7624 sector_t mark_cnt[SYNC_MARKS]; 7625 int last_mark,m; 7626 struct list_head *tmp; 7627 sector_t last_check; 7628 int skipped = 0; 7629 struct md_rdev *rdev; 7630 char *desc, *action = NULL; 7631 struct blk_plug plug; 7632 7633 /* just incase thread restarts... */ 7634 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7635 return; 7636 if (mddev->ro) {/* never try to sync a read-only array */ 7637 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7638 return; 7639 } 7640 7641 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7642 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 7643 desc = "data-check"; 7644 action = "check"; 7645 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7646 desc = "requested-resync"; 7647 action = "repair"; 7648 } else 7649 desc = "resync"; 7650 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7651 desc = "reshape"; 7652 else 7653 desc = "recovery"; 7654 7655 mddev->last_sync_action = action ?: desc; 7656 7657 /* we overload curr_resync somewhat here. 7658 * 0 == not engaged in resync at all 7659 * 2 == checking that there is no conflict with another sync 7660 * 1 == like 2, but have yielded to allow conflicting resync to 7661 * commense 7662 * other == active in resync - this many blocks 7663 * 7664 * Before starting a resync we must have set curr_resync to 7665 * 2, and then checked that every "conflicting" array has curr_resync 7666 * less than ours. When we find one that is the same or higher 7667 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 7668 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 7669 * This will mean we have to start checking from the beginning again. 7670 * 7671 */ 7672 7673 do { 7674 mddev->curr_resync = 2; 7675 7676 try_again: 7677 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7678 goto skip; 7679 for_each_mddev(mddev2, tmp) { 7680 if (mddev2 == mddev) 7681 continue; 7682 if (!mddev->parallel_resync 7683 && mddev2->curr_resync 7684 && match_mddev_units(mddev, mddev2)) { 7685 DEFINE_WAIT(wq); 7686 if (mddev < mddev2 && mddev->curr_resync == 2) { 7687 /* arbitrarily yield */ 7688 mddev->curr_resync = 1; 7689 wake_up(&resync_wait); 7690 } 7691 if (mddev > mddev2 && mddev->curr_resync == 1) 7692 /* no need to wait here, we can wait the next 7693 * time 'round when curr_resync == 2 7694 */ 7695 continue; 7696 /* We need to wait 'interruptible' so as not to 7697 * contribute to the load average, and not to 7698 * be caught by 'softlockup' 7699 */ 7700 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 7701 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7702 mddev2->curr_resync >= mddev->curr_resync) { 7703 printk(KERN_INFO "md: delaying %s of %s" 7704 " until %s has finished (they" 7705 " share one or more physical units)\n", 7706 desc, mdname(mddev), mdname(mddev2)); 7707 mddev_put(mddev2); 7708 if (signal_pending(current)) 7709 flush_signals(current); 7710 schedule(); 7711 finish_wait(&resync_wait, &wq); 7712 goto try_again; 7713 } 7714 finish_wait(&resync_wait, &wq); 7715 } 7716 } 7717 } while (mddev->curr_resync < 2); 7718 7719 j = 0; 7720 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7721 /* resync follows the size requested by the personality, 7722 * which defaults to physical size, but can be virtual size 7723 */ 7724 max_sectors = mddev->resync_max_sectors; 7725 atomic64_set(&mddev->resync_mismatches, 0); 7726 /* we don't use the checkpoint if there's a bitmap */ 7727 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7728 j = mddev->resync_min; 7729 else if (!mddev->bitmap) 7730 j = mddev->recovery_cp; 7731 7732 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7733 max_sectors = mddev->resync_max_sectors; 7734 else { 7735 /* recovery follows the physical size of devices */ 7736 max_sectors = mddev->dev_sectors; 7737 j = MaxSector; 7738 rcu_read_lock(); 7739 rdev_for_each_rcu(rdev, mddev) 7740 if (rdev->raid_disk >= 0 && 7741 !test_bit(Faulty, &rdev->flags) && 7742 !test_bit(In_sync, &rdev->flags) && 7743 rdev->recovery_offset < j) 7744 j = rdev->recovery_offset; 7745 rcu_read_unlock(); 7746 7747 /* If there is a bitmap, we need to make sure all 7748 * writes that started before we added a spare 7749 * complete before we start doing a recovery. 7750 * Otherwise the write might complete and (via 7751 * bitmap_endwrite) set a bit in the bitmap after the 7752 * recovery has checked that bit and skipped that 7753 * region. 7754 */ 7755 if (mddev->bitmap) { 7756 mddev->pers->quiesce(mddev, 1); 7757 mddev->pers->quiesce(mddev, 0); 7758 } 7759 } 7760 7761 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); 7762 printk(KERN_INFO "md: minimum _guaranteed_ speed:" 7763 " %d KB/sec/disk.\n", speed_min(mddev)); 7764 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 7765 "(but not more than %d KB/sec) for %s.\n", 7766 speed_max(mddev), desc); 7767 7768 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 7769 7770 io_sectors = 0; 7771 for (m = 0; m < SYNC_MARKS; m++) { 7772 mark[m] = jiffies; 7773 mark_cnt[m] = io_sectors; 7774 } 7775 last_mark = 0; 7776 mddev->resync_mark = mark[last_mark]; 7777 mddev->resync_mark_cnt = mark_cnt[last_mark]; 7778 7779 /* 7780 * Tune reconstruction: 7781 */ 7782 window = 32*(PAGE_SIZE/512); 7783 printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n", 7784 window/2, (unsigned long long)max_sectors/2); 7785 7786 atomic_set(&mddev->recovery_active, 0); 7787 last_check = 0; 7788 7789 if (j>2) { 7790 printk(KERN_INFO 7791 "md: resuming %s of %s from checkpoint.\n", 7792 desc, mdname(mddev)); 7793 mddev->curr_resync = j; 7794 } else 7795 mddev->curr_resync = 3; /* no longer delayed */ 7796 mddev->curr_resync_completed = j; 7797 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 7798 md_new_event(mddev); 7799 update_time = jiffies; 7800 7801 if (mddev_is_clustered(mddev)) 7802 md_cluster_ops->resync_start(mddev, j, max_sectors); 7803 7804 blk_start_plug(&plug); 7805 while (j < max_sectors) { 7806 sector_t sectors; 7807 7808 skipped = 0; 7809 7810 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 7811 ((mddev->curr_resync > mddev->curr_resync_completed && 7812 (mddev->curr_resync - mddev->curr_resync_completed) 7813 > (max_sectors >> 4)) || 7814 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) || 7815 (j - mddev->curr_resync_completed)*2 7816 >= mddev->resync_max - mddev->curr_resync_completed || 7817 mddev->curr_resync_completed > mddev->resync_max 7818 )) { 7819 /* time to update curr_resync_completed */ 7820 wait_event(mddev->recovery_wait, 7821 atomic_read(&mddev->recovery_active) == 0); 7822 mddev->curr_resync_completed = j; 7823 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 7824 j > mddev->recovery_cp) 7825 mddev->recovery_cp = j; 7826 update_time = jiffies; 7827 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7828 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 7829 } 7830 7831 while (j >= mddev->resync_max && 7832 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7833 /* As this condition is controlled by user-space, 7834 * we can block indefinitely, so use '_interruptible' 7835 * to avoid triggering warnings. 7836 */ 7837 flush_signals(current); /* just in case */ 7838 wait_event_interruptible(mddev->recovery_wait, 7839 mddev->resync_max > j 7840 || test_bit(MD_RECOVERY_INTR, 7841 &mddev->recovery)); 7842 } 7843 7844 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7845 break; 7846 7847 sectors = mddev->pers->sync_request(mddev, j, &skipped); 7848 if (sectors == 0) { 7849 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7850 break; 7851 } 7852 7853 if (!skipped) { /* actual IO requested */ 7854 io_sectors += sectors; 7855 atomic_add(sectors, &mddev->recovery_active); 7856 } 7857 7858 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7859 break; 7860 7861 j += sectors; 7862 if (j > max_sectors) 7863 /* when skipping, extra large numbers can be returned. */ 7864 j = max_sectors; 7865 if (j > 2) 7866 mddev->curr_resync = j; 7867 if (mddev_is_clustered(mddev)) 7868 md_cluster_ops->resync_info_update(mddev, j, max_sectors); 7869 mddev->curr_mark_cnt = io_sectors; 7870 if (last_check == 0) 7871 /* this is the earliest that rebuild will be 7872 * visible in /proc/mdstat 7873 */ 7874 md_new_event(mddev); 7875 7876 if (last_check + window > io_sectors || j == max_sectors) 7877 continue; 7878 7879 last_check = io_sectors; 7880 repeat: 7881 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 7882 /* step marks */ 7883 int next = (last_mark+1) % SYNC_MARKS; 7884 7885 mddev->resync_mark = mark[next]; 7886 mddev->resync_mark_cnt = mark_cnt[next]; 7887 mark[next] = jiffies; 7888 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 7889 last_mark = next; 7890 } 7891 7892 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7893 break; 7894 7895 /* 7896 * this loop exits only if either when we are slower than 7897 * the 'hard' speed limit, or the system was IO-idle for 7898 * a jiffy. 7899 * the system might be non-idle CPU-wise, but we only care 7900 * about not overloading the IO subsystem. (things like an 7901 * e2fsck being done on the RAID array should execute fast) 7902 */ 7903 cond_resched(); 7904 7905 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); 7906 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 7907 /((jiffies-mddev->resync_mark)/HZ +1) +1; 7908 7909 if (currspeed > speed_min(mddev)) { 7910 if (currspeed > speed_max(mddev)) { 7911 msleep(500); 7912 goto repeat; 7913 } 7914 if (!is_mddev_idle(mddev, 0)) { 7915 /* 7916 * Give other IO more of a chance. 7917 * The faster the devices, the less we wait. 7918 */ 7919 wait_event(mddev->recovery_wait, 7920 !atomic_read(&mddev->recovery_active)); 7921 } 7922 } 7923 } 7924 printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc, 7925 test_bit(MD_RECOVERY_INTR, &mddev->recovery) 7926 ? "interrupted" : "done"); 7927 /* 7928 * this also signals 'finished resyncing' to md_stop 7929 */ 7930 blk_finish_plug(&plug); 7931 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 7932 7933 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 7934 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7935 mddev->curr_resync > 2) { 7936 mddev->curr_resync_completed = mddev->curr_resync; 7937 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 7938 } 7939 /* tell personality that we are finished */ 7940 mddev->pers->sync_request(mddev, max_sectors, &skipped); 7941 7942 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 7943 mddev->curr_resync > 2) { 7944 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7945 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7946 if (mddev->curr_resync >= mddev->recovery_cp) { 7947 printk(KERN_INFO 7948 "md: checkpointing %s of %s.\n", 7949 desc, mdname(mddev)); 7950 if (test_bit(MD_RECOVERY_ERROR, 7951 &mddev->recovery)) 7952 mddev->recovery_cp = 7953 mddev->curr_resync_completed; 7954 else 7955 mddev->recovery_cp = 7956 mddev->curr_resync; 7957 } 7958 } else 7959 mddev->recovery_cp = MaxSector; 7960 } else { 7961 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7962 mddev->curr_resync = MaxSector; 7963 rcu_read_lock(); 7964 rdev_for_each_rcu(rdev, mddev) 7965 if (rdev->raid_disk >= 0 && 7966 mddev->delta_disks >= 0 && 7967 !test_bit(Faulty, &rdev->flags) && 7968 !test_bit(In_sync, &rdev->flags) && 7969 rdev->recovery_offset < mddev->curr_resync) 7970 rdev->recovery_offset = mddev->curr_resync; 7971 rcu_read_unlock(); 7972 } 7973 } 7974 skip: 7975 if (mddev_is_clustered(mddev)) 7976 md_cluster_ops->resync_finish(mddev); 7977 7978 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7979 7980 spin_lock(&mddev->lock); 7981 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7982 /* We completed so min/max setting can be forgotten if used. */ 7983 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7984 mddev->resync_min = 0; 7985 mddev->resync_max = MaxSector; 7986 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7987 mddev->resync_min = mddev->curr_resync_completed; 7988 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 7989 mddev->curr_resync = 0; 7990 spin_unlock(&mddev->lock); 7991 7992 wake_up(&resync_wait); 7993 md_wakeup_thread(mddev->thread); 7994 return; 7995 } 7996 EXPORT_SYMBOL_GPL(md_do_sync); 7997 7998 static int remove_and_add_spares(struct mddev *mddev, 7999 struct md_rdev *this) 8000 { 8001 struct md_rdev *rdev; 8002 int spares = 0; 8003 int removed = 0; 8004 8005 rdev_for_each(rdev, mddev) 8006 if ((this == NULL || rdev == this) && 8007 rdev->raid_disk >= 0 && 8008 !test_bit(Blocked, &rdev->flags) && 8009 (test_bit(Faulty, &rdev->flags) || 8010 ! test_bit(In_sync, &rdev->flags)) && 8011 atomic_read(&rdev->nr_pending)==0) { 8012 if (mddev->pers->hot_remove_disk( 8013 mddev, rdev) == 0) { 8014 sysfs_unlink_rdev(mddev, rdev); 8015 rdev->raid_disk = -1; 8016 removed++; 8017 } 8018 } 8019 if (removed && mddev->kobj.sd) 8020 sysfs_notify(&mddev->kobj, NULL, "degraded"); 8021 8022 if (this) 8023 goto no_add; 8024 8025 rdev_for_each(rdev, mddev) { 8026 if (rdev->raid_disk >= 0 && 8027 !test_bit(In_sync, &rdev->flags) && 8028 !test_bit(Faulty, &rdev->flags)) 8029 spares++; 8030 if (rdev->raid_disk >= 0) 8031 continue; 8032 if (test_bit(Faulty, &rdev->flags)) 8033 continue; 8034 if (mddev->ro && 8035 ! (rdev->saved_raid_disk >= 0 && 8036 !test_bit(Bitmap_sync, &rdev->flags))) 8037 continue; 8038 8039 if (rdev->saved_raid_disk < 0) 8040 rdev->recovery_offset = 0; 8041 if (mddev->pers-> 8042 hot_add_disk(mddev, rdev) == 0) { 8043 if (sysfs_link_rdev(mddev, rdev)) 8044 /* failure here is OK */; 8045 spares++; 8046 md_new_event(mddev); 8047 set_bit(MD_CHANGE_DEVS, &mddev->flags); 8048 } 8049 } 8050 no_add: 8051 if (removed) 8052 set_bit(MD_CHANGE_DEVS, &mddev->flags); 8053 return spares; 8054 } 8055 8056 static void md_start_sync(struct work_struct *ws) 8057 { 8058 struct mddev *mddev = container_of(ws, struct mddev, del_work); 8059 8060 mddev->sync_thread = md_register_thread(md_do_sync, 8061 mddev, 8062 "resync"); 8063 if (!mddev->sync_thread) { 8064 printk(KERN_ERR "%s: could not start resync" 8065 " thread...\n", 8066 mdname(mddev)); 8067 /* leave the spares where they are, it shouldn't hurt */ 8068 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8069 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8070 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 8071 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 8072 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8073 wake_up(&resync_wait); 8074 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 8075 &mddev->recovery)) 8076 if (mddev->sysfs_action) 8077 sysfs_notify_dirent_safe(mddev->sysfs_action); 8078 } else 8079 md_wakeup_thread(mddev->sync_thread); 8080 sysfs_notify_dirent_safe(mddev->sysfs_action); 8081 md_new_event(mddev); 8082 } 8083 8084 /* 8085 * This routine is regularly called by all per-raid-array threads to 8086 * deal with generic issues like resync and super-block update. 8087 * Raid personalities that don't have a thread (linear/raid0) do not 8088 * need this as they never do any recovery or update the superblock. 8089 * 8090 * It does not do any resync itself, but rather "forks" off other threads 8091 * to do that as needed. 8092 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 8093 * "->recovery" and create a thread at ->sync_thread. 8094 * When the thread finishes it sets MD_RECOVERY_DONE 8095 * and wakeups up this thread which will reap the thread and finish up. 8096 * This thread also removes any faulty devices (with nr_pending == 0). 8097 * 8098 * The overall approach is: 8099 * 1/ if the superblock needs updating, update it. 8100 * 2/ If a recovery thread is running, don't do anything else. 8101 * 3/ If recovery has finished, clean up, possibly marking spares active. 8102 * 4/ If there are any faulty devices, remove them. 8103 * 5/ If array is degraded, try to add spares devices 8104 * 6/ If array has spares or is not in-sync, start a resync thread. 8105 */ 8106 void md_check_recovery(struct mddev *mddev) 8107 { 8108 if (mddev->suspended) 8109 return; 8110 8111 if (mddev->bitmap) 8112 bitmap_daemon_work(mddev); 8113 8114 if (signal_pending(current)) { 8115 if (mddev->pers->sync_request && !mddev->external) { 8116 printk(KERN_INFO "md: %s in immediate safe mode\n", 8117 mdname(mddev)); 8118 mddev->safemode = 2; 8119 } 8120 flush_signals(current); 8121 } 8122 8123 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 8124 return; 8125 if ( ! ( 8126 (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) || 8127 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 8128 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 8129 (mddev->external == 0 && mddev->safemode == 1) || 8130 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 8131 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 8132 )) 8133 return; 8134 8135 if (mddev_trylock(mddev)) { 8136 int spares = 0; 8137 8138 if (mddev->ro) { 8139 struct md_rdev *rdev; 8140 if (!mddev->external && mddev->in_sync) 8141 /* 'Blocked' flag not needed as failed devices 8142 * will be recorded if array switched to read/write. 8143 * Leaving it set will prevent the device 8144 * from being removed. 8145 */ 8146 rdev_for_each(rdev, mddev) 8147 clear_bit(Blocked, &rdev->flags); 8148 /* On a read-only array we can: 8149 * - remove failed devices 8150 * - add already-in_sync devices if the array itself 8151 * is in-sync. 8152 * As we only add devices that are already in-sync, 8153 * we can activate the spares immediately. 8154 */ 8155 remove_and_add_spares(mddev, NULL); 8156 /* There is no thread, but we need to call 8157 * ->spare_active and clear saved_raid_disk 8158 */ 8159 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8160 md_reap_sync_thread(mddev); 8161 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8162 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8163 goto unlock; 8164 } 8165 8166 if (!mddev->external) { 8167 int did_change = 0; 8168 spin_lock(&mddev->lock); 8169 if (mddev->safemode && 8170 !atomic_read(&mddev->writes_pending) && 8171 !mddev->in_sync && 8172 mddev->recovery_cp == MaxSector) { 8173 mddev->in_sync = 1; 8174 did_change = 1; 8175 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 8176 } 8177 if (mddev->safemode == 1) 8178 mddev->safemode = 0; 8179 spin_unlock(&mddev->lock); 8180 if (did_change) 8181 sysfs_notify_dirent_safe(mddev->sysfs_state); 8182 } 8183 8184 if (mddev->flags & MD_UPDATE_SB_FLAGS) { 8185 if (mddev_is_clustered(mddev)) 8186 md_cluster_ops->metadata_update_start(mddev); 8187 md_update_sb(mddev, 0); 8188 if (mddev_is_clustered(mddev)) 8189 md_cluster_ops->metadata_update_finish(mddev); 8190 } 8191 8192 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 8193 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 8194 /* resync/recovery still happening */ 8195 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8196 goto unlock; 8197 } 8198 if (mddev->sync_thread) { 8199 md_reap_sync_thread(mddev); 8200 goto unlock; 8201 } 8202 /* Set RUNNING before clearing NEEDED to avoid 8203 * any transients in the value of "sync_action". 8204 */ 8205 mddev->curr_resync_completed = 0; 8206 spin_lock(&mddev->lock); 8207 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8208 spin_unlock(&mddev->lock); 8209 /* Clear some bits that don't mean anything, but 8210 * might be left set 8211 */ 8212 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 8213 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 8214 8215 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 8216 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 8217 goto not_running; 8218 /* no recovery is running. 8219 * remove any failed drives, then 8220 * add spares if possible. 8221 * Spares are also removed and re-added, to allow 8222 * the personality to fail the re-add. 8223 */ 8224 8225 if (mddev->reshape_position != MaxSector) { 8226 if (mddev->pers->check_reshape == NULL || 8227 mddev->pers->check_reshape(mddev) != 0) 8228 /* Cannot proceed */ 8229 goto not_running; 8230 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8231 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8232 } else if ((spares = remove_and_add_spares(mddev, NULL))) { 8233 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8234 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 8235 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 8236 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8237 } else if (mddev->recovery_cp < MaxSector) { 8238 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8239 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8240 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 8241 /* nothing to be done ... */ 8242 goto not_running; 8243 8244 if (mddev->pers->sync_request) { 8245 if (spares) { 8246 /* We are adding a device or devices to an array 8247 * which has the bitmap stored on all devices. 8248 * So make sure all bitmap pages get written 8249 */ 8250 bitmap_write_all(mddev->bitmap); 8251 } 8252 INIT_WORK(&mddev->del_work, md_start_sync); 8253 queue_work(md_misc_wq, &mddev->del_work); 8254 goto unlock; 8255 } 8256 not_running: 8257 if (!mddev->sync_thread) { 8258 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8259 wake_up(&resync_wait); 8260 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 8261 &mddev->recovery)) 8262 if (mddev->sysfs_action) 8263 sysfs_notify_dirent_safe(mddev->sysfs_action); 8264 } 8265 unlock: 8266 wake_up(&mddev->sb_wait); 8267 mddev_unlock(mddev); 8268 } 8269 } 8270 EXPORT_SYMBOL(md_check_recovery); 8271 8272 void md_reap_sync_thread(struct mddev *mddev) 8273 { 8274 struct md_rdev *rdev; 8275 8276 /* resync has finished, collect result */ 8277 md_unregister_thread(&mddev->sync_thread); 8278 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8279 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 8280 /* success...*/ 8281 /* activate any spares */ 8282 if (mddev->pers->spare_active(mddev)) { 8283 sysfs_notify(&mddev->kobj, NULL, 8284 "degraded"); 8285 set_bit(MD_CHANGE_DEVS, &mddev->flags); 8286 } 8287 } 8288 if (mddev_is_clustered(mddev)) 8289 md_cluster_ops->metadata_update_start(mddev); 8290 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8291 mddev->pers->finish_reshape) 8292 mddev->pers->finish_reshape(mddev); 8293 8294 /* If array is no-longer degraded, then any saved_raid_disk 8295 * information must be scrapped. 8296 */ 8297 if (!mddev->degraded) 8298 rdev_for_each(rdev, mddev) 8299 rdev->saved_raid_disk = -1; 8300 8301 md_update_sb(mddev, 1); 8302 if (mddev_is_clustered(mddev)) 8303 md_cluster_ops->metadata_update_finish(mddev); 8304 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8305 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 8306 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8307 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8308 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 8309 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 8310 wake_up(&resync_wait); 8311 /* flag recovery needed just to double check */ 8312 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8313 sysfs_notify_dirent_safe(mddev->sysfs_action); 8314 md_new_event(mddev); 8315 if (mddev->event_work.func) 8316 queue_work(md_misc_wq, &mddev->event_work); 8317 } 8318 EXPORT_SYMBOL(md_reap_sync_thread); 8319 8320 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) 8321 { 8322 sysfs_notify_dirent_safe(rdev->sysfs_state); 8323 wait_event_timeout(rdev->blocked_wait, 8324 !test_bit(Blocked, &rdev->flags) && 8325 !test_bit(BlockedBadBlocks, &rdev->flags), 8326 msecs_to_jiffies(5000)); 8327 rdev_dec_pending(rdev, mddev); 8328 } 8329 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 8330 8331 void md_finish_reshape(struct mddev *mddev) 8332 { 8333 /* called be personality module when reshape completes. */ 8334 struct md_rdev *rdev; 8335 8336 rdev_for_each(rdev, mddev) { 8337 if (rdev->data_offset > rdev->new_data_offset) 8338 rdev->sectors += rdev->data_offset - rdev->new_data_offset; 8339 else 8340 rdev->sectors -= rdev->new_data_offset - rdev->data_offset; 8341 rdev->data_offset = rdev->new_data_offset; 8342 } 8343 } 8344 EXPORT_SYMBOL(md_finish_reshape); 8345 8346 /* Bad block management. 8347 * We can record which blocks on each device are 'bad' and so just 8348 * fail those blocks, or that stripe, rather than the whole device. 8349 * Entries in the bad-block table are 64bits wide. This comprises: 8350 * Length of bad-range, in sectors: 0-511 for lengths 1-512 8351 * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes) 8352 * A 'shift' can be set so that larger blocks are tracked and 8353 * consequently larger devices can be covered. 8354 * 'Acknowledged' flag - 1 bit. - the most significant bit. 8355 * 8356 * Locking of the bad-block table uses a seqlock so md_is_badblock 8357 * might need to retry if it is very unlucky. 8358 * We will sometimes want to check for bad blocks in a bi_end_io function, 8359 * so we use the write_seqlock_irq variant. 8360 * 8361 * When looking for a bad block we specify a range and want to 8362 * know if any block in the range is bad. So we binary-search 8363 * to the last range that starts at-or-before the given endpoint, 8364 * (or "before the sector after the target range") 8365 * then see if it ends after the given start. 8366 * We return 8367 * 0 if there are no known bad blocks in the range 8368 * 1 if there are known bad block which are all acknowledged 8369 * -1 if there are bad blocks which have not yet been acknowledged in metadata. 8370 * plus the start/length of the first bad section we overlap. 8371 */ 8372 int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, 8373 sector_t *first_bad, int *bad_sectors) 8374 { 8375 int hi; 8376 int lo; 8377 u64 *p = bb->page; 8378 int rv; 8379 sector_t target = s + sectors; 8380 unsigned seq; 8381 8382 if (bb->shift > 0) { 8383 /* round the start down, and the end up */ 8384 s >>= bb->shift; 8385 target += (1<<bb->shift) - 1; 8386 target >>= bb->shift; 8387 sectors = target - s; 8388 } 8389 /* 'target' is now the first block after the bad range */ 8390 8391 retry: 8392 seq = read_seqbegin(&bb->lock); 8393 lo = 0; 8394 rv = 0; 8395 hi = bb->count; 8396 8397 /* Binary search between lo and hi for 'target' 8398 * i.e. for the last range that starts before 'target' 8399 */ 8400 /* INVARIANT: ranges before 'lo' and at-or-after 'hi' 8401 * are known not to be the last range before target. 8402 * VARIANT: hi-lo is the number of possible 8403 * ranges, and decreases until it reaches 1 8404 */ 8405 while (hi - lo > 1) { 8406 int mid = (lo + hi) / 2; 8407 sector_t a = BB_OFFSET(p[mid]); 8408 if (a < target) 8409 /* This could still be the one, earlier ranges 8410 * could not. */ 8411 lo = mid; 8412 else 8413 /* This and later ranges are definitely out. */ 8414 hi = mid; 8415 } 8416 /* 'lo' might be the last that started before target, but 'hi' isn't */ 8417 if (hi > lo) { 8418 /* need to check all range that end after 's' to see if 8419 * any are unacknowledged. 8420 */ 8421 while (lo >= 0 && 8422 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { 8423 if (BB_OFFSET(p[lo]) < target) { 8424 /* starts before the end, and finishes after 8425 * the start, so they must overlap 8426 */ 8427 if (rv != -1 && BB_ACK(p[lo])) 8428 rv = 1; 8429 else 8430 rv = -1; 8431 *first_bad = BB_OFFSET(p[lo]); 8432 *bad_sectors = BB_LEN(p[lo]); 8433 } 8434 lo--; 8435 } 8436 } 8437 8438 if (read_seqretry(&bb->lock, seq)) 8439 goto retry; 8440 8441 return rv; 8442 } 8443 EXPORT_SYMBOL_GPL(md_is_badblock); 8444 8445 /* 8446 * Add a range of bad blocks to the table. 8447 * This might extend the table, or might contract it 8448 * if two adjacent ranges can be merged. 8449 * We binary-search to find the 'insertion' point, then 8450 * decide how best to handle it. 8451 */ 8452 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, 8453 int acknowledged) 8454 { 8455 u64 *p; 8456 int lo, hi; 8457 int rv = 1; 8458 unsigned long flags; 8459 8460 if (bb->shift < 0) 8461 /* badblocks are disabled */ 8462 return 0; 8463 8464 if (bb->shift) { 8465 /* round the start down, and the end up */ 8466 sector_t next = s + sectors; 8467 s >>= bb->shift; 8468 next += (1<<bb->shift) - 1; 8469 next >>= bb->shift; 8470 sectors = next - s; 8471 } 8472 8473 write_seqlock_irqsave(&bb->lock, flags); 8474 8475 p = bb->page; 8476 lo = 0; 8477 hi = bb->count; 8478 /* Find the last range that starts at-or-before 's' */ 8479 while (hi - lo > 1) { 8480 int mid = (lo + hi) / 2; 8481 sector_t a = BB_OFFSET(p[mid]); 8482 if (a <= s) 8483 lo = mid; 8484 else 8485 hi = mid; 8486 } 8487 if (hi > lo && BB_OFFSET(p[lo]) > s) 8488 hi = lo; 8489 8490 if (hi > lo) { 8491 /* we found a range that might merge with the start 8492 * of our new range 8493 */ 8494 sector_t a = BB_OFFSET(p[lo]); 8495 sector_t e = a + BB_LEN(p[lo]); 8496 int ack = BB_ACK(p[lo]); 8497 if (e >= s) { 8498 /* Yes, we can merge with a previous range */ 8499 if (s == a && s + sectors >= e) 8500 /* new range covers old */ 8501 ack = acknowledged; 8502 else 8503 ack = ack && acknowledged; 8504 8505 if (e < s + sectors) 8506 e = s + sectors; 8507 if (e - a <= BB_MAX_LEN) { 8508 p[lo] = BB_MAKE(a, e-a, ack); 8509 s = e; 8510 } else { 8511 /* does not all fit in one range, 8512 * make p[lo] maximal 8513 */ 8514 if (BB_LEN(p[lo]) != BB_MAX_LEN) 8515 p[lo] = BB_MAKE(a, BB_MAX_LEN, ack); 8516 s = a + BB_MAX_LEN; 8517 } 8518 sectors = e - s; 8519 } 8520 } 8521 if (sectors && hi < bb->count) { 8522 /* 'hi' points to the first range that starts after 's'. 8523 * Maybe we can merge with the start of that range */ 8524 sector_t a = BB_OFFSET(p[hi]); 8525 sector_t e = a + BB_LEN(p[hi]); 8526 int ack = BB_ACK(p[hi]); 8527 if (a <= s + sectors) { 8528 /* merging is possible */ 8529 if (e <= s + sectors) { 8530 /* full overlap */ 8531 e = s + sectors; 8532 ack = acknowledged; 8533 } else 8534 ack = ack && acknowledged; 8535 8536 a = s; 8537 if (e - a <= BB_MAX_LEN) { 8538 p[hi] = BB_MAKE(a, e-a, ack); 8539 s = e; 8540 } else { 8541 p[hi] = BB_MAKE(a, BB_MAX_LEN, ack); 8542 s = a + BB_MAX_LEN; 8543 } 8544 sectors = e - s; 8545 lo = hi; 8546 hi++; 8547 } 8548 } 8549 if (sectors == 0 && hi < bb->count) { 8550 /* we might be able to combine lo and hi */ 8551 /* Note: 's' is at the end of 'lo' */ 8552 sector_t a = BB_OFFSET(p[hi]); 8553 int lolen = BB_LEN(p[lo]); 8554 int hilen = BB_LEN(p[hi]); 8555 int newlen = lolen + hilen - (s - a); 8556 if (s >= a && newlen < BB_MAX_LEN) { 8557 /* yes, we can combine them */ 8558 int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]); 8559 p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack); 8560 memmove(p + hi, p + hi + 1, 8561 (bb->count - hi - 1) * 8); 8562 bb->count--; 8563 } 8564 } 8565 while (sectors) { 8566 /* didn't merge (it all). 8567 * Need to add a range just before 'hi' */ 8568 if (bb->count >= MD_MAX_BADBLOCKS) { 8569 /* No room for more */ 8570 rv = 0; 8571 break; 8572 } else { 8573 int this_sectors = sectors; 8574 memmove(p + hi + 1, p + hi, 8575 (bb->count - hi) * 8); 8576 bb->count++; 8577 8578 if (this_sectors > BB_MAX_LEN) 8579 this_sectors = BB_MAX_LEN; 8580 p[hi] = BB_MAKE(s, this_sectors, acknowledged); 8581 sectors -= this_sectors; 8582 s += this_sectors; 8583 } 8584 } 8585 8586 bb->changed = 1; 8587 if (!acknowledged) 8588 bb->unacked_exist = 1; 8589 write_sequnlock_irqrestore(&bb->lock, flags); 8590 8591 return rv; 8592 } 8593 8594 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 8595 int is_new) 8596 { 8597 int rv; 8598 if (is_new) 8599 s += rdev->new_data_offset; 8600 else 8601 s += rdev->data_offset; 8602 rv = md_set_badblocks(&rdev->badblocks, 8603 s, sectors, 0); 8604 if (rv) { 8605 /* Make sure they get written out promptly */ 8606 sysfs_notify_dirent_safe(rdev->sysfs_state); 8607 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); 8608 set_bit(MD_CHANGE_PENDING, &rdev->mddev->flags); 8609 md_wakeup_thread(rdev->mddev->thread); 8610 } 8611 return rv; 8612 } 8613 EXPORT_SYMBOL_GPL(rdev_set_badblocks); 8614 8615 /* 8616 * Remove a range of bad blocks from the table. 8617 * This may involve extending the table if we spilt a region, 8618 * but it must not fail. So if the table becomes full, we just 8619 * drop the remove request. 8620 */ 8621 static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors) 8622 { 8623 u64 *p; 8624 int lo, hi; 8625 sector_t target = s + sectors; 8626 int rv = 0; 8627 8628 if (bb->shift > 0) { 8629 /* When clearing we round the start up and the end down. 8630 * This should not matter as the shift should align with 8631 * the block size and no rounding should ever be needed. 8632 * However it is better the think a block is bad when it 8633 * isn't than to think a block is not bad when it is. 8634 */ 8635 s += (1<<bb->shift) - 1; 8636 s >>= bb->shift; 8637 target >>= bb->shift; 8638 sectors = target - s; 8639 } 8640 8641 write_seqlock_irq(&bb->lock); 8642 8643 p = bb->page; 8644 lo = 0; 8645 hi = bb->count; 8646 /* Find the last range that starts before 'target' */ 8647 while (hi - lo > 1) { 8648 int mid = (lo + hi) / 2; 8649 sector_t a = BB_OFFSET(p[mid]); 8650 if (a < target) 8651 lo = mid; 8652 else 8653 hi = mid; 8654 } 8655 if (hi > lo) { 8656 /* p[lo] is the last range that could overlap the 8657 * current range. Earlier ranges could also overlap, 8658 * but only this one can overlap the end of the range. 8659 */ 8660 if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) { 8661 /* Partial overlap, leave the tail of this range */ 8662 int ack = BB_ACK(p[lo]); 8663 sector_t a = BB_OFFSET(p[lo]); 8664 sector_t end = a + BB_LEN(p[lo]); 8665 8666 if (a < s) { 8667 /* we need to split this range */ 8668 if (bb->count >= MD_MAX_BADBLOCKS) { 8669 rv = -ENOSPC; 8670 goto out; 8671 } 8672 memmove(p+lo+1, p+lo, (bb->count - lo) * 8); 8673 bb->count++; 8674 p[lo] = BB_MAKE(a, s-a, ack); 8675 lo++; 8676 } 8677 p[lo] = BB_MAKE(target, end - target, ack); 8678 /* there is no longer an overlap */ 8679 hi = lo; 8680 lo--; 8681 } 8682 while (lo >= 0 && 8683 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { 8684 /* This range does overlap */ 8685 if (BB_OFFSET(p[lo]) < s) { 8686 /* Keep the early parts of this range. */ 8687 int ack = BB_ACK(p[lo]); 8688 sector_t start = BB_OFFSET(p[lo]); 8689 p[lo] = BB_MAKE(start, s - start, ack); 8690 /* now low doesn't overlap, so.. */ 8691 break; 8692 } 8693 lo--; 8694 } 8695 /* 'lo' is strictly before, 'hi' is strictly after, 8696 * anything between needs to be discarded 8697 */ 8698 if (hi - lo > 1) { 8699 memmove(p+lo+1, p+hi, (bb->count - hi) * 8); 8700 bb->count -= (hi - lo - 1); 8701 } 8702 } 8703 8704 bb->changed = 1; 8705 out: 8706 write_sequnlock_irq(&bb->lock); 8707 return rv; 8708 } 8709 8710 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 8711 int is_new) 8712 { 8713 if (is_new) 8714 s += rdev->new_data_offset; 8715 else 8716 s += rdev->data_offset; 8717 return md_clear_badblocks(&rdev->badblocks, 8718 s, sectors); 8719 } 8720 EXPORT_SYMBOL_GPL(rdev_clear_badblocks); 8721 8722 /* 8723 * Acknowledge all bad blocks in a list. 8724 * This only succeeds if ->changed is clear. It is used by 8725 * in-kernel metadata updates 8726 */ 8727 void md_ack_all_badblocks(struct badblocks *bb) 8728 { 8729 if (bb->page == NULL || bb->changed) 8730 /* no point even trying */ 8731 return; 8732 write_seqlock_irq(&bb->lock); 8733 8734 if (bb->changed == 0 && bb->unacked_exist) { 8735 u64 *p = bb->page; 8736 int i; 8737 for (i = 0; i < bb->count ; i++) { 8738 if (!BB_ACK(p[i])) { 8739 sector_t start = BB_OFFSET(p[i]); 8740 int len = BB_LEN(p[i]); 8741 p[i] = BB_MAKE(start, len, 1); 8742 } 8743 } 8744 bb->unacked_exist = 0; 8745 } 8746 write_sequnlock_irq(&bb->lock); 8747 } 8748 EXPORT_SYMBOL_GPL(md_ack_all_badblocks); 8749 8750 /* sysfs access to bad-blocks list. 8751 * We present two files. 8752 * 'bad-blocks' lists sector numbers and lengths of ranges that 8753 * are recorded as bad. The list is truncated to fit within 8754 * the one-page limit of sysfs. 8755 * Writing "sector length" to this file adds an acknowledged 8756 * bad block list. 8757 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet 8758 * been acknowledged. Writing to this file adds bad blocks 8759 * without acknowledging them. This is largely for testing. 8760 */ 8761 8762 static ssize_t 8763 badblocks_show(struct badblocks *bb, char *page, int unack) 8764 { 8765 size_t len; 8766 int i; 8767 u64 *p = bb->page; 8768 unsigned seq; 8769 8770 if (bb->shift < 0) 8771 return 0; 8772 8773 retry: 8774 seq = read_seqbegin(&bb->lock); 8775 8776 len = 0; 8777 i = 0; 8778 8779 while (len < PAGE_SIZE && i < bb->count) { 8780 sector_t s = BB_OFFSET(p[i]); 8781 unsigned int length = BB_LEN(p[i]); 8782 int ack = BB_ACK(p[i]); 8783 i++; 8784 8785 if (unack && ack) 8786 continue; 8787 8788 len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n", 8789 (unsigned long long)s << bb->shift, 8790 length << bb->shift); 8791 } 8792 if (unack && len == 0) 8793 bb->unacked_exist = 0; 8794 8795 if (read_seqretry(&bb->lock, seq)) 8796 goto retry; 8797 8798 return len; 8799 } 8800 8801 #define DO_DEBUG 1 8802 8803 static ssize_t 8804 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack) 8805 { 8806 unsigned long long sector; 8807 int length; 8808 char newline; 8809 #ifdef DO_DEBUG 8810 /* Allow clearing via sysfs *only* for testing/debugging. 8811 * Normally only a successful write may clear a badblock 8812 */ 8813 int clear = 0; 8814 if (page[0] == '-') { 8815 clear = 1; 8816 page++; 8817 } 8818 #endif /* DO_DEBUG */ 8819 8820 switch (sscanf(page, "%llu %d%c", §or, &length, &newline)) { 8821 case 3: 8822 if (newline != '\n') 8823 return -EINVAL; 8824 case 2: 8825 if (length <= 0) 8826 return -EINVAL; 8827 break; 8828 default: 8829 return -EINVAL; 8830 } 8831 8832 #ifdef DO_DEBUG 8833 if (clear) { 8834 md_clear_badblocks(bb, sector, length); 8835 return len; 8836 } 8837 #endif /* DO_DEBUG */ 8838 if (md_set_badblocks(bb, sector, length, !unack)) 8839 return len; 8840 else 8841 return -ENOSPC; 8842 } 8843 8844 static int md_notify_reboot(struct notifier_block *this, 8845 unsigned long code, void *x) 8846 { 8847 struct list_head *tmp; 8848 struct mddev *mddev; 8849 int need_delay = 0; 8850 8851 for_each_mddev(mddev, tmp) { 8852 if (mddev_trylock(mddev)) { 8853 if (mddev->pers) 8854 __md_stop_writes(mddev); 8855 if (mddev->persistent) 8856 mddev->safemode = 2; 8857 mddev_unlock(mddev); 8858 } 8859 need_delay = 1; 8860 } 8861 /* 8862 * certain more exotic SCSI devices are known to be 8863 * volatile wrt too early system reboots. While the 8864 * right place to handle this issue is the given 8865 * driver, we do want to have a safe RAID driver ... 8866 */ 8867 if (need_delay) 8868 mdelay(1000*1); 8869 8870 return NOTIFY_DONE; 8871 } 8872 8873 static struct notifier_block md_notifier = { 8874 .notifier_call = md_notify_reboot, 8875 .next = NULL, 8876 .priority = INT_MAX, /* before any real devices */ 8877 }; 8878 8879 static void md_geninit(void) 8880 { 8881 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 8882 8883 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); 8884 } 8885 8886 static int __init md_init(void) 8887 { 8888 int ret = -ENOMEM; 8889 8890 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); 8891 if (!md_wq) 8892 goto err_wq; 8893 8894 md_misc_wq = alloc_workqueue("md_misc", 0, 0); 8895 if (!md_misc_wq) 8896 goto err_misc_wq; 8897 8898 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0) 8899 goto err_md; 8900 8901 if ((ret = register_blkdev(0, "mdp")) < 0) 8902 goto err_mdp; 8903 mdp_major = ret; 8904 8905 blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE, 8906 md_probe, NULL, NULL); 8907 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 8908 md_probe, NULL, NULL); 8909 8910 register_reboot_notifier(&md_notifier); 8911 raid_table_header = register_sysctl_table(raid_root_table); 8912 8913 md_geninit(); 8914 return 0; 8915 8916 err_mdp: 8917 unregister_blkdev(MD_MAJOR, "md"); 8918 err_md: 8919 destroy_workqueue(md_misc_wq); 8920 err_misc_wq: 8921 destroy_workqueue(md_wq); 8922 err_wq: 8923 return ret; 8924 } 8925 8926 void md_reload_sb(struct mddev *mddev) 8927 { 8928 struct md_rdev *rdev, *tmp; 8929 8930 rdev_for_each_safe(rdev, tmp, mddev) { 8931 rdev->sb_loaded = 0; 8932 ClearPageUptodate(rdev->sb_page); 8933 } 8934 mddev->raid_disks = 0; 8935 analyze_sbs(mddev); 8936 rdev_for_each_safe(rdev, tmp, mddev) { 8937 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 8938 /* since we don't write to faulty devices, we figure out if the 8939 * disk is faulty by comparing events 8940 */ 8941 if (mddev->events > sb->events) 8942 set_bit(Faulty, &rdev->flags); 8943 } 8944 8945 } 8946 EXPORT_SYMBOL(md_reload_sb); 8947 8948 #ifndef MODULE 8949 8950 /* 8951 * Searches all registered partitions for autorun RAID arrays 8952 * at boot time. 8953 */ 8954 8955 static LIST_HEAD(all_detected_devices); 8956 struct detected_devices_node { 8957 struct list_head list; 8958 dev_t dev; 8959 }; 8960 8961 void md_autodetect_dev(dev_t dev) 8962 { 8963 struct detected_devices_node *node_detected_dev; 8964 8965 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 8966 if (node_detected_dev) { 8967 node_detected_dev->dev = dev; 8968 list_add_tail(&node_detected_dev->list, &all_detected_devices); 8969 } else { 8970 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed" 8971 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev)); 8972 } 8973 } 8974 8975 static void autostart_arrays(int part) 8976 { 8977 struct md_rdev *rdev; 8978 struct detected_devices_node *node_detected_dev; 8979 dev_t dev; 8980 int i_scanned, i_passed; 8981 8982 i_scanned = 0; 8983 i_passed = 0; 8984 8985 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 8986 8987 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 8988 i_scanned++; 8989 node_detected_dev = list_entry(all_detected_devices.next, 8990 struct detected_devices_node, list); 8991 list_del(&node_detected_dev->list); 8992 dev = node_detected_dev->dev; 8993 kfree(node_detected_dev); 8994 rdev = md_import_device(dev,0, 90); 8995 if (IS_ERR(rdev)) 8996 continue; 8997 8998 if (test_bit(Faulty, &rdev->flags)) 8999 continue; 9000 9001 set_bit(AutoDetected, &rdev->flags); 9002 list_add(&rdev->same_set, &pending_raid_disks); 9003 i_passed++; 9004 } 9005 9006 printk(KERN_INFO "md: Scanned %d and added %d devices.\n", 9007 i_scanned, i_passed); 9008 9009 autorun_devices(part); 9010 } 9011 9012 #endif /* !MODULE */ 9013 9014 static __exit void md_exit(void) 9015 { 9016 struct mddev *mddev; 9017 struct list_head *tmp; 9018 int delay = 1; 9019 9020 blk_unregister_region(MKDEV(MD_MAJOR,0), 512); 9021 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 9022 9023 unregister_blkdev(MD_MAJOR,"md"); 9024 unregister_blkdev(mdp_major, "mdp"); 9025 unregister_reboot_notifier(&md_notifier); 9026 unregister_sysctl_table(raid_table_header); 9027 9028 /* We cannot unload the modules while some process is 9029 * waiting for us in select() or poll() - wake them up 9030 */ 9031 md_unloading = 1; 9032 while (waitqueue_active(&md_event_waiters)) { 9033 /* not safe to leave yet */ 9034 wake_up(&md_event_waiters); 9035 msleep(delay); 9036 delay += delay; 9037 } 9038 remove_proc_entry("mdstat", NULL); 9039 9040 for_each_mddev(mddev, tmp) { 9041 export_array(mddev); 9042 mddev->hold_active = 0; 9043 } 9044 destroy_workqueue(md_misc_wq); 9045 destroy_workqueue(md_wq); 9046 } 9047 9048 subsys_initcall(md_init); 9049 module_exit(md_exit) 9050 9051 static int get_ro(char *buffer, struct kernel_param *kp) 9052 { 9053 return sprintf(buffer, "%d", start_readonly); 9054 } 9055 static int set_ro(const char *val, struct kernel_param *kp) 9056 { 9057 return kstrtouint(val, 10, (unsigned int *)&start_readonly); 9058 } 9059 9060 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 9061 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 9062 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 9063 9064 MODULE_LICENSE("GPL"); 9065 MODULE_DESCRIPTION("MD RAID framework"); 9066 MODULE_ALIAS("md"); 9067 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 9068