1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 #include <linux/kthread.h> 36 #include <linux/blkdev.h> 37 #include <linux/sysctl.h> 38 #include <linux/seq_file.h> 39 #include <linux/fs.h> 40 #include <linux/poll.h> 41 #include <linux/ctype.h> 42 #include <linux/string.h> 43 #include <linux/hdreg.h> 44 #include <linux/proc_fs.h> 45 #include <linux/random.h> 46 #include <linux/module.h> 47 #include <linux/reboot.h> 48 #include <linux/file.h> 49 #include <linux/compat.h> 50 #include <linux/delay.h> 51 #include <linux/raid/md_p.h> 52 #include <linux/raid/md_u.h> 53 #include <linux/slab.h> 54 #include "md.h" 55 #include "bitmap.h" 56 #include "md-cluster.h" 57 58 #ifndef MODULE 59 static void autostart_arrays(int part); 60 #endif 61 62 /* pers_list is a list of registered personalities protected 63 * by pers_lock. 64 * pers_lock does extra service to protect accesses to 65 * mddev->thread when the mutex cannot be held. 66 */ 67 static LIST_HEAD(pers_list); 68 static DEFINE_SPINLOCK(pers_lock); 69 70 struct md_cluster_operations *md_cluster_ops; 71 EXPORT_SYMBOL(md_cluster_ops); 72 struct module *md_cluster_mod; 73 EXPORT_SYMBOL(md_cluster_mod); 74 75 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 76 static struct workqueue_struct *md_wq; 77 static struct workqueue_struct *md_misc_wq; 78 79 static int remove_and_add_spares(struct mddev *mddev, 80 struct md_rdev *this); 81 static void mddev_detach(struct mddev *mddev); 82 83 /* 84 * Default number of read corrections we'll attempt on an rdev 85 * before ejecting it from the array. We divide the read error 86 * count by 2 for every hour elapsed between read errors. 87 */ 88 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 89 /* 90 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 91 * is 1000 KB/sec, so the extra system load does not show up that much. 92 * Increase it if you want to have more _guaranteed_ speed. Note that 93 * the RAID driver will use the maximum available bandwidth if the IO 94 * subsystem is idle. There is also an 'absolute maximum' reconstruction 95 * speed limit - in case reconstruction slows down your system despite 96 * idle IO detection. 97 * 98 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 99 * or /sys/block/mdX/md/sync_speed_{min,max} 100 */ 101 102 static int sysctl_speed_limit_min = 1000; 103 static int sysctl_speed_limit_max = 200000; 104 static inline int speed_min(struct mddev *mddev) 105 { 106 return mddev->sync_speed_min ? 107 mddev->sync_speed_min : sysctl_speed_limit_min; 108 } 109 110 static inline int speed_max(struct mddev *mddev) 111 { 112 return mddev->sync_speed_max ? 113 mddev->sync_speed_max : sysctl_speed_limit_max; 114 } 115 116 static struct ctl_table_header *raid_table_header; 117 118 static struct ctl_table raid_table[] = { 119 { 120 .procname = "speed_limit_min", 121 .data = &sysctl_speed_limit_min, 122 .maxlen = sizeof(int), 123 .mode = S_IRUGO|S_IWUSR, 124 .proc_handler = proc_dointvec, 125 }, 126 { 127 .procname = "speed_limit_max", 128 .data = &sysctl_speed_limit_max, 129 .maxlen = sizeof(int), 130 .mode = S_IRUGO|S_IWUSR, 131 .proc_handler = proc_dointvec, 132 }, 133 { } 134 }; 135 136 static struct ctl_table raid_dir_table[] = { 137 { 138 .procname = "raid", 139 .maxlen = 0, 140 .mode = S_IRUGO|S_IXUGO, 141 .child = raid_table, 142 }, 143 { } 144 }; 145 146 static struct ctl_table raid_root_table[] = { 147 { 148 .procname = "dev", 149 .maxlen = 0, 150 .mode = 0555, 151 .child = raid_dir_table, 152 }, 153 { } 154 }; 155 156 static const struct block_device_operations md_fops; 157 158 static int start_readonly; 159 160 /* bio_clone_mddev 161 * like bio_clone, but with a local bio set 162 */ 163 164 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 165 struct mddev *mddev) 166 { 167 struct bio *b; 168 169 if (!mddev || !mddev->bio_set) 170 return bio_alloc(gfp_mask, nr_iovecs); 171 172 b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set); 173 if (!b) 174 return NULL; 175 return b; 176 } 177 EXPORT_SYMBOL_GPL(bio_alloc_mddev); 178 179 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, 180 struct mddev *mddev) 181 { 182 if (!mddev || !mddev->bio_set) 183 return bio_clone(bio, gfp_mask); 184 185 return bio_clone_bioset(bio, gfp_mask, mddev->bio_set); 186 } 187 EXPORT_SYMBOL_GPL(bio_clone_mddev); 188 189 /* 190 * We have a system wide 'event count' that is incremented 191 * on any 'interesting' event, and readers of /proc/mdstat 192 * can use 'poll' or 'select' to find out when the event 193 * count increases. 194 * 195 * Events are: 196 * start array, stop array, error, add device, remove device, 197 * start build, activate spare 198 */ 199 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 200 static atomic_t md_event_count; 201 void md_new_event(struct mddev *mddev) 202 { 203 atomic_inc(&md_event_count); 204 wake_up(&md_event_waiters); 205 } 206 EXPORT_SYMBOL_GPL(md_new_event); 207 208 /* Alternate version that can be called from interrupts 209 * when calling sysfs_notify isn't needed. 210 */ 211 static void md_new_event_inintr(struct mddev *mddev) 212 { 213 atomic_inc(&md_event_count); 214 wake_up(&md_event_waiters); 215 } 216 217 /* 218 * Enables to iterate over all existing md arrays 219 * all_mddevs_lock protects this list. 220 */ 221 static LIST_HEAD(all_mddevs); 222 static DEFINE_SPINLOCK(all_mddevs_lock); 223 224 /* 225 * iterates through all used mddevs in the system. 226 * We take care to grab the all_mddevs_lock whenever navigating 227 * the list, and to always hold a refcount when unlocked. 228 * Any code which breaks out of this loop while own 229 * a reference to the current mddev and must mddev_put it. 230 */ 231 #define for_each_mddev(_mddev,_tmp) \ 232 \ 233 for (({ spin_lock(&all_mddevs_lock); \ 234 _tmp = all_mddevs.next; \ 235 _mddev = NULL;}); \ 236 ({ if (_tmp != &all_mddevs) \ 237 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\ 238 spin_unlock(&all_mddevs_lock); \ 239 if (_mddev) mddev_put(_mddev); \ 240 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \ 241 _tmp != &all_mddevs;}); \ 242 ({ spin_lock(&all_mddevs_lock); \ 243 _tmp = _tmp->next;}) \ 244 ) 245 246 /* Rather than calling directly into the personality make_request function, 247 * IO requests come here first so that we can check if the device is 248 * being suspended pending a reconfiguration. 249 * We hold a refcount over the call to ->make_request. By the time that 250 * call has finished, the bio has been linked into some internal structure 251 * and so is visible to ->quiesce(), so we don't need the refcount any more. 252 */ 253 static void md_make_request(struct request_queue *q, struct bio *bio) 254 { 255 const int rw = bio_data_dir(bio); 256 struct mddev *mddev = q->queuedata; 257 unsigned int sectors; 258 int cpu; 259 260 if (mddev == NULL || mddev->pers == NULL 261 || !mddev->ready) { 262 bio_io_error(bio); 263 return; 264 } 265 if (mddev->ro == 1 && unlikely(rw == WRITE)) { 266 bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS); 267 return; 268 } 269 smp_rmb(); /* Ensure implications of 'active' are visible */ 270 rcu_read_lock(); 271 if (mddev->suspended) { 272 DEFINE_WAIT(__wait); 273 for (;;) { 274 prepare_to_wait(&mddev->sb_wait, &__wait, 275 TASK_UNINTERRUPTIBLE); 276 if (!mddev->suspended) 277 break; 278 rcu_read_unlock(); 279 schedule(); 280 rcu_read_lock(); 281 } 282 finish_wait(&mddev->sb_wait, &__wait); 283 } 284 atomic_inc(&mddev->active_io); 285 rcu_read_unlock(); 286 287 /* 288 * save the sectors now since our bio can 289 * go away inside make_request 290 */ 291 sectors = bio_sectors(bio); 292 mddev->pers->make_request(mddev, bio); 293 294 cpu = part_stat_lock(); 295 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 296 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); 297 part_stat_unlock(); 298 299 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 300 wake_up(&mddev->sb_wait); 301 } 302 303 /* mddev_suspend makes sure no new requests are submitted 304 * to the device, and that any requests that have been submitted 305 * are completely handled. 306 * Once mddev_detach() is called and completes, the module will be 307 * completely unused. 308 */ 309 void mddev_suspend(struct mddev *mddev) 310 { 311 BUG_ON(mddev->suspended); 312 mddev->suspended = 1; 313 synchronize_rcu(); 314 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 315 mddev->pers->quiesce(mddev, 1); 316 317 del_timer_sync(&mddev->safemode_timer); 318 } 319 EXPORT_SYMBOL_GPL(mddev_suspend); 320 321 void mddev_resume(struct mddev *mddev) 322 { 323 mddev->suspended = 0; 324 wake_up(&mddev->sb_wait); 325 mddev->pers->quiesce(mddev, 0); 326 327 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 328 md_wakeup_thread(mddev->thread); 329 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 330 } 331 EXPORT_SYMBOL_GPL(mddev_resume); 332 333 int mddev_congested(struct mddev *mddev, int bits) 334 { 335 struct md_personality *pers = mddev->pers; 336 int ret = 0; 337 338 rcu_read_lock(); 339 if (mddev->suspended) 340 ret = 1; 341 else if (pers && pers->congested) 342 ret = pers->congested(mddev, bits); 343 rcu_read_unlock(); 344 return ret; 345 } 346 EXPORT_SYMBOL_GPL(mddev_congested); 347 static int md_congested(void *data, int bits) 348 { 349 struct mddev *mddev = data; 350 return mddev_congested(mddev, bits); 351 } 352 353 static int md_mergeable_bvec(struct request_queue *q, 354 struct bvec_merge_data *bvm, 355 struct bio_vec *biovec) 356 { 357 struct mddev *mddev = q->queuedata; 358 int ret; 359 rcu_read_lock(); 360 if (mddev->suspended) { 361 /* Must always allow one vec */ 362 if (bvm->bi_size == 0) 363 ret = biovec->bv_len; 364 else 365 ret = 0; 366 } else { 367 struct md_personality *pers = mddev->pers; 368 if (pers && pers->mergeable_bvec) 369 ret = pers->mergeable_bvec(mddev, bvm, biovec); 370 else 371 ret = biovec->bv_len; 372 } 373 rcu_read_unlock(); 374 return ret; 375 } 376 /* 377 * Generic flush handling for md 378 */ 379 380 static void md_end_flush(struct bio *bio, int err) 381 { 382 struct md_rdev *rdev = bio->bi_private; 383 struct mddev *mddev = rdev->mddev; 384 385 rdev_dec_pending(rdev, mddev); 386 387 if (atomic_dec_and_test(&mddev->flush_pending)) { 388 /* The pre-request flush has finished */ 389 queue_work(md_wq, &mddev->flush_work); 390 } 391 bio_put(bio); 392 } 393 394 static void md_submit_flush_data(struct work_struct *ws); 395 396 static void submit_flushes(struct work_struct *ws) 397 { 398 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 399 struct md_rdev *rdev; 400 401 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 402 atomic_set(&mddev->flush_pending, 1); 403 rcu_read_lock(); 404 rdev_for_each_rcu(rdev, mddev) 405 if (rdev->raid_disk >= 0 && 406 !test_bit(Faulty, &rdev->flags)) { 407 /* Take two references, one is dropped 408 * when request finishes, one after 409 * we reclaim rcu_read_lock 410 */ 411 struct bio *bi; 412 atomic_inc(&rdev->nr_pending); 413 atomic_inc(&rdev->nr_pending); 414 rcu_read_unlock(); 415 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); 416 bi->bi_end_io = md_end_flush; 417 bi->bi_private = rdev; 418 bi->bi_bdev = rdev->bdev; 419 atomic_inc(&mddev->flush_pending); 420 submit_bio(WRITE_FLUSH, bi); 421 rcu_read_lock(); 422 rdev_dec_pending(rdev, mddev); 423 } 424 rcu_read_unlock(); 425 if (atomic_dec_and_test(&mddev->flush_pending)) 426 queue_work(md_wq, &mddev->flush_work); 427 } 428 429 static void md_submit_flush_data(struct work_struct *ws) 430 { 431 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 432 struct bio *bio = mddev->flush_bio; 433 434 if (bio->bi_iter.bi_size == 0) 435 /* an empty barrier - all done */ 436 bio_endio(bio, 0); 437 else { 438 bio->bi_rw &= ~REQ_FLUSH; 439 mddev->pers->make_request(mddev, bio); 440 } 441 442 mddev->flush_bio = NULL; 443 wake_up(&mddev->sb_wait); 444 } 445 446 void md_flush_request(struct mddev *mddev, struct bio *bio) 447 { 448 spin_lock_irq(&mddev->lock); 449 wait_event_lock_irq(mddev->sb_wait, 450 !mddev->flush_bio, 451 mddev->lock); 452 mddev->flush_bio = bio; 453 spin_unlock_irq(&mddev->lock); 454 455 INIT_WORK(&mddev->flush_work, submit_flushes); 456 queue_work(md_wq, &mddev->flush_work); 457 } 458 EXPORT_SYMBOL(md_flush_request); 459 460 void md_unplug(struct blk_plug_cb *cb, bool from_schedule) 461 { 462 struct mddev *mddev = cb->data; 463 md_wakeup_thread(mddev->thread); 464 kfree(cb); 465 } 466 EXPORT_SYMBOL(md_unplug); 467 468 static inline struct mddev *mddev_get(struct mddev *mddev) 469 { 470 atomic_inc(&mddev->active); 471 return mddev; 472 } 473 474 static void mddev_delayed_delete(struct work_struct *ws); 475 476 static void mddev_put(struct mddev *mddev) 477 { 478 struct bio_set *bs = NULL; 479 480 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 481 return; 482 if (!mddev->raid_disks && list_empty(&mddev->disks) && 483 mddev->ctime == 0 && !mddev->hold_active) { 484 /* Array is not configured at all, and not held active, 485 * so destroy it */ 486 list_del_init(&mddev->all_mddevs); 487 bs = mddev->bio_set; 488 mddev->bio_set = NULL; 489 if (mddev->gendisk) { 490 /* We did a probe so need to clean up. Call 491 * queue_work inside the spinlock so that 492 * flush_workqueue() after mddev_find will 493 * succeed in waiting for the work to be done. 494 */ 495 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 496 queue_work(md_misc_wq, &mddev->del_work); 497 } else 498 kfree(mddev); 499 } 500 spin_unlock(&all_mddevs_lock); 501 if (bs) 502 bioset_free(bs); 503 } 504 505 void mddev_init(struct mddev *mddev) 506 { 507 mutex_init(&mddev->open_mutex); 508 mutex_init(&mddev->reconfig_mutex); 509 mutex_init(&mddev->bitmap_info.mutex); 510 INIT_LIST_HEAD(&mddev->disks); 511 INIT_LIST_HEAD(&mddev->all_mddevs); 512 init_timer(&mddev->safemode_timer); 513 atomic_set(&mddev->active, 1); 514 atomic_set(&mddev->openers, 0); 515 atomic_set(&mddev->active_io, 0); 516 spin_lock_init(&mddev->lock); 517 atomic_set(&mddev->flush_pending, 0); 518 init_waitqueue_head(&mddev->sb_wait); 519 init_waitqueue_head(&mddev->recovery_wait); 520 mddev->reshape_position = MaxSector; 521 mddev->reshape_backwards = 0; 522 mddev->last_sync_action = "none"; 523 mddev->resync_min = 0; 524 mddev->resync_max = MaxSector; 525 mddev->level = LEVEL_NONE; 526 } 527 EXPORT_SYMBOL_GPL(mddev_init); 528 529 static struct mddev *mddev_find(dev_t unit) 530 { 531 struct mddev *mddev, *new = NULL; 532 533 if (unit && MAJOR(unit) != MD_MAJOR) 534 unit &= ~((1<<MdpMinorShift)-1); 535 536 retry: 537 spin_lock(&all_mddevs_lock); 538 539 if (unit) { 540 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 541 if (mddev->unit == unit) { 542 mddev_get(mddev); 543 spin_unlock(&all_mddevs_lock); 544 kfree(new); 545 return mddev; 546 } 547 548 if (new) { 549 list_add(&new->all_mddevs, &all_mddevs); 550 spin_unlock(&all_mddevs_lock); 551 new->hold_active = UNTIL_IOCTL; 552 return new; 553 } 554 } else if (new) { 555 /* find an unused unit number */ 556 static int next_minor = 512; 557 int start = next_minor; 558 int is_free = 0; 559 int dev = 0; 560 while (!is_free) { 561 dev = MKDEV(MD_MAJOR, next_minor); 562 next_minor++; 563 if (next_minor > MINORMASK) 564 next_minor = 0; 565 if (next_minor == start) { 566 /* Oh dear, all in use. */ 567 spin_unlock(&all_mddevs_lock); 568 kfree(new); 569 return NULL; 570 } 571 572 is_free = 1; 573 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 574 if (mddev->unit == dev) { 575 is_free = 0; 576 break; 577 } 578 } 579 new->unit = dev; 580 new->md_minor = MINOR(dev); 581 new->hold_active = UNTIL_STOP; 582 list_add(&new->all_mddevs, &all_mddevs); 583 spin_unlock(&all_mddevs_lock); 584 return new; 585 } 586 spin_unlock(&all_mddevs_lock); 587 588 new = kzalloc(sizeof(*new), GFP_KERNEL); 589 if (!new) 590 return NULL; 591 592 new->unit = unit; 593 if (MAJOR(unit) == MD_MAJOR) 594 new->md_minor = MINOR(unit); 595 else 596 new->md_minor = MINOR(unit) >> MdpMinorShift; 597 598 mddev_init(new); 599 600 goto retry; 601 } 602 603 static struct attribute_group md_redundancy_group; 604 605 void mddev_unlock(struct mddev *mddev) 606 { 607 if (mddev->to_remove) { 608 /* These cannot be removed under reconfig_mutex as 609 * an access to the files will try to take reconfig_mutex 610 * while holding the file unremovable, which leads to 611 * a deadlock. 612 * So hold set sysfs_active while the remove in happeing, 613 * and anything else which might set ->to_remove or my 614 * otherwise change the sysfs namespace will fail with 615 * -EBUSY if sysfs_active is still set. 616 * We set sysfs_active under reconfig_mutex and elsewhere 617 * test it under the same mutex to ensure its correct value 618 * is seen. 619 */ 620 struct attribute_group *to_remove = mddev->to_remove; 621 mddev->to_remove = NULL; 622 mddev->sysfs_active = 1; 623 mutex_unlock(&mddev->reconfig_mutex); 624 625 if (mddev->kobj.sd) { 626 if (to_remove != &md_redundancy_group) 627 sysfs_remove_group(&mddev->kobj, to_remove); 628 if (mddev->pers == NULL || 629 mddev->pers->sync_request == NULL) { 630 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 631 if (mddev->sysfs_action) 632 sysfs_put(mddev->sysfs_action); 633 mddev->sysfs_action = NULL; 634 } 635 } 636 mddev->sysfs_active = 0; 637 } else 638 mutex_unlock(&mddev->reconfig_mutex); 639 640 /* As we've dropped the mutex we need a spinlock to 641 * make sure the thread doesn't disappear 642 */ 643 spin_lock(&pers_lock); 644 md_wakeup_thread(mddev->thread); 645 spin_unlock(&pers_lock); 646 } 647 EXPORT_SYMBOL_GPL(mddev_unlock); 648 649 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) 650 { 651 struct md_rdev *rdev; 652 653 rdev_for_each_rcu(rdev, mddev) 654 if (rdev->desc_nr == nr) 655 return rdev; 656 657 return NULL; 658 } 659 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu); 660 661 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) 662 { 663 struct md_rdev *rdev; 664 665 rdev_for_each(rdev, mddev) 666 if (rdev->bdev->bd_dev == dev) 667 return rdev; 668 669 return NULL; 670 } 671 672 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev) 673 { 674 struct md_rdev *rdev; 675 676 rdev_for_each_rcu(rdev, mddev) 677 if (rdev->bdev->bd_dev == dev) 678 return rdev; 679 680 return NULL; 681 } 682 683 static struct md_personality *find_pers(int level, char *clevel) 684 { 685 struct md_personality *pers; 686 list_for_each_entry(pers, &pers_list, list) { 687 if (level != LEVEL_NONE && pers->level == level) 688 return pers; 689 if (strcmp(pers->name, clevel)==0) 690 return pers; 691 } 692 return NULL; 693 } 694 695 /* return the offset of the super block in 512byte sectors */ 696 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) 697 { 698 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; 699 return MD_NEW_SIZE_SECTORS(num_sectors); 700 } 701 702 static int alloc_disk_sb(struct md_rdev *rdev) 703 { 704 rdev->sb_page = alloc_page(GFP_KERNEL); 705 if (!rdev->sb_page) { 706 printk(KERN_ALERT "md: out of memory.\n"); 707 return -ENOMEM; 708 } 709 710 return 0; 711 } 712 713 void md_rdev_clear(struct md_rdev *rdev) 714 { 715 if (rdev->sb_page) { 716 put_page(rdev->sb_page); 717 rdev->sb_loaded = 0; 718 rdev->sb_page = NULL; 719 rdev->sb_start = 0; 720 rdev->sectors = 0; 721 } 722 if (rdev->bb_page) { 723 put_page(rdev->bb_page); 724 rdev->bb_page = NULL; 725 } 726 kfree(rdev->badblocks.page); 727 rdev->badblocks.page = NULL; 728 } 729 EXPORT_SYMBOL_GPL(md_rdev_clear); 730 731 static void super_written(struct bio *bio, int error) 732 { 733 struct md_rdev *rdev = bio->bi_private; 734 struct mddev *mddev = rdev->mddev; 735 736 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 737 printk("md: super_written gets error=%d, uptodate=%d\n", 738 error, test_bit(BIO_UPTODATE, &bio->bi_flags)); 739 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags)); 740 md_error(mddev, rdev); 741 } 742 743 if (atomic_dec_and_test(&mddev->pending_writes)) 744 wake_up(&mddev->sb_wait); 745 bio_put(bio); 746 } 747 748 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, 749 sector_t sector, int size, struct page *page) 750 { 751 /* write first size bytes of page to sector of rdev 752 * Increment mddev->pending_writes before returning 753 * and decrement it on completion, waking up sb_wait 754 * if zero is reached. 755 * If an error occurred, call md_error 756 */ 757 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 758 759 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 760 bio->bi_iter.bi_sector = sector; 761 bio_add_page(bio, page, size, 0); 762 bio->bi_private = rdev; 763 bio->bi_end_io = super_written; 764 765 atomic_inc(&mddev->pending_writes); 766 submit_bio(WRITE_FLUSH_FUA, bio); 767 } 768 769 void md_super_wait(struct mddev *mddev) 770 { 771 /* wait for all superblock writes that were scheduled to complete */ 772 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); 773 } 774 775 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 776 struct page *page, int rw, bool metadata_op) 777 { 778 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); 779 int ret; 780 781 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 782 rdev->meta_bdev : rdev->bdev; 783 if (metadata_op) 784 bio->bi_iter.bi_sector = sector + rdev->sb_start; 785 else if (rdev->mddev->reshape_position != MaxSector && 786 (rdev->mddev->reshape_backwards == 787 (sector >= rdev->mddev->reshape_position))) 788 bio->bi_iter.bi_sector = sector + rdev->new_data_offset; 789 else 790 bio->bi_iter.bi_sector = sector + rdev->data_offset; 791 bio_add_page(bio, page, size, 0); 792 submit_bio_wait(rw, bio); 793 794 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 795 bio_put(bio); 796 return ret; 797 } 798 EXPORT_SYMBOL_GPL(sync_page_io); 799 800 static int read_disk_sb(struct md_rdev *rdev, int size) 801 { 802 char b[BDEVNAME_SIZE]; 803 804 if (rdev->sb_loaded) 805 return 0; 806 807 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true)) 808 goto fail; 809 rdev->sb_loaded = 1; 810 return 0; 811 812 fail: 813 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 814 bdevname(rdev->bdev,b)); 815 return -EINVAL; 816 } 817 818 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 819 { 820 return sb1->set_uuid0 == sb2->set_uuid0 && 821 sb1->set_uuid1 == sb2->set_uuid1 && 822 sb1->set_uuid2 == sb2->set_uuid2 && 823 sb1->set_uuid3 == sb2->set_uuid3; 824 } 825 826 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 827 { 828 int ret; 829 mdp_super_t *tmp1, *tmp2; 830 831 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 832 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 833 834 if (!tmp1 || !tmp2) { 835 ret = 0; 836 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n"); 837 goto abort; 838 } 839 840 *tmp1 = *sb1; 841 *tmp2 = *sb2; 842 843 /* 844 * nr_disks is not constant 845 */ 846 tmp1->nr_disks = 0; 847 tmp2->nr_disks = 0; 848 849 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 850 abort: 851 kfree(tmp1); 852 kfree(tmp2); 853 return ret; 854 } 855 856 static u32 md_csum_fold(u32 csum) 857 { 858 csum = (csum & 0xffff) + (csum >> 16); 859 return (csum & 0xffff) + (csum >> 16); 860 } 861 862 static unsigned int calc_sb_csum(mdp_super_t *sb) 863 { 864 u64 newcsum = 0; 865 u32 *sb32 = (u32*)sb; 866 int i; 867 unsigned int disk_csum, csum; 868 869 disk_csum = sb->sb_csum; 870 sb->sb_csum = 0; 871 872 for (i = 0; i < MD_SB_BYTES/4 ; i++) 873 newcsum += sb32[i]; 874 csum = (newcsum & 0xffffffff) + (newcsum>>32); 875 876 #ifdef CONFIG_ALPHA 877 /* This used to use csum_partial, which was wrong for several 878 * reasons including that different results are returned on 879 * different architectures. It isn't critical that we get exactly 880 * the same return value as before (we always csum_fold before 881 * testing, and that removes any differences). However as we 882 * know that csum_partial always returned a 16bit value on 883 * alphas, do a fold to maximise conformity to previous behaviour. 884 */ 885 sb->sb_csum = md_csum_fold(disk_csum); 886 #else 887 sb->sb_csum = disk_csum; 888 #endif 889 return csum; 890 } 891 892 /* 893 * Handle superblock details. 894 * We want to be able to handle multiple superblock formats 895 * so we have a common interface to them all, and an array of 896 * different handlers. 897 * We rely on user-space to write the initial superblock, and support 898 * reading and updating of superblocks. 899 * Interface methods are: 900 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version) 901 * loads and validates a superblock on dev. 902 * if refdev != NULL, compare superblocks on both devices 903 * Return: 904 * 0 - dev has a superblock that is compatible with refdev 905 * 1 - dev has a superblock that is compatible and newer than refdev 906 * so dev should be used as the refdev in future 907 * -EINVAL superblock incompatible or invalid 908 * -othererror e.g. -EIO 909 * 910 * int validate_super(struct mddev *mddev, struct md_rdev *dev) 911 * Verify that dev is acceptable into mddev. 912 * The first time, mddev->raid_disks will be 0, and data from 913 * dev should be merged in. Subsequent calls check that dev 914 * is new enough. Return 0 or -EINVAL 915 * 916 * void sync_super(struct mddev *mddev, struct md_rdev *dev) 917 * Update the superblock for rdev with data in mddev 918 * This does not write to disc. 919 * 920 */ 921 922 struct super_type { 923 char *name; 924 struct module *owner; 925 int (*load_super)(struct md_rdev *rdev, 926 struct md_rdev *refdev, 927 int minor_version); 928 int (*validate_super)(struct mddev *mddev, 929 struct md_rdev *rdev); 930 void (*sync_super)(struct mddev *mddev, 931 struct md_rdev *rdev); 932 unsigned long long (*rdev_size_change)(struct md_rdev *rdev, 933 sector_t num_sectors); 934 int (*allow_new_offset)(struct md_rdev *rdev, 935 unsigned long long new_offset); 936 }; 937 938 /* 939 * Check that the given mddev has no bitmap. 940 * 941 * This function is called from the run method of all personalities that do not 942 * support bitmaps. It prints an error message and returns non-zero if mddev 943 * has a bitmap. Otherwise, it returns 0. 944 * 945 */ 946 int md_check_no_bitmap(struct mddev *mddev) 947 { 948 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 949 return 0; 950 printk(KERN_ERR "%s: bitmaps are not supported for %s\n", 951 mdname(mddev), mddev->pers->name); 952 return 1; 953 } 954 EXPORT_SYMBOL(md_check_no_bitmap); 955 956 /* 957 * load_super for 0.90.0 958 */ 959 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 960 { 961 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 962 mdp_super_t *sb; 963 int ret; 964 965 /* 966 * Calculate the position of the superblock (512byte sectors), 967 * it's at the end of the disk. 968 * 969 * It also happens to be a multiple of 4Kb. 970 */ 971 rdev->sb_start = calc_dev_sboffset(rdev); 972 973 ret = read_disk_sb(rdev, MD_SB_BYTES); 974 if (ret) return ret; 975 976 ret = -EINVAL; 977 978 bdevname(rdev->bdev, b); 979 sb = page_address(rdev->sb_page); 980 981 if (sb->md_magic != MD_SB_MAGIC) { 982 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 983 b); 984 goto abort; 985 } 986 987 if (sb->major_version != 0 || 988 sb->minor_version < 90 || 989 sb->minor_version > 91) { 990 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 991 sb->major_version, sb->minor_version, 992 b); 993 goto abort; 994 } 995 996 if (sb->raid_disks <= 0) 997 goto abort; 998 999 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 1000 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 1001 b); 1002 goto abort; 1003 } 1004 1005 rdev->preferred_minor = sb->md_minor; 1006 rdev->data_offset = 0; 1007 rdev->new_data_offset = 0; 1008 rdev->sb_size = MD_SB_BYTES; 1009 rdev->badblocks.shift = -1; 1010 1011 if (sb->level == LEVEL_MULTIPATH) 1012 rdev->desc_nr = -1; 1013 else 1014 rdev->desc_nr = sb->this_disk.number; 1015 1016 if (!refdev) { 1017 ret = 1; 1018 } else { 1019 __u64 ev1, ev2; 1020 mdp_super_t *refsb = page_address(refdev->sb_page); 1021 if (!uuid_equal(refsb, sb)) { 1022 printk(KERN_WARNING "md: %s has different UUID to %s\n", 1023 b, bdevname(refdev->bdev,b2)); 1024 goto abort; 1025 } 1026 if (!sb_equal(refsb, sb)) { 1027 printk(KERN_WARNING "md: %s has same UUID" 1028 " but different superblock to %s\n", 1029 b, bdevname(refdev->bdev, b2)); 1030 goto abort; 1031 } 1032 ev1 = md_event(sb); 1033 ev2 = md_event(refsb); 1034 if (ev1 > ev2) 1035 ret = 1; 1036 else 1037 ret = 0; 1038 } 1039 rdev->sectors = rdev->sb_start; 1040 /* Limit to 4TB as metadata cannot record more than that. 1041 * (not needed for Linear and RAID0 as metadata doesn't 1042 * record this size) 1043 */ 1044 if (rdev->sectors >= (2ULL << 32) && sb->level >= 1) 1045 rdev->sectors = (2ULL << 32) - 2; 1046 1047 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) 1048 /* "this cannot possibly happen" ... */ 1049 ret = -EINVAL; 1050 1051 abort: 1052 return ret; 1053 } 1054 1055 /* 1056 * validate_super for 0.90.0 1057 */ 1058 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) 1059 { 1060 mdp_disk_t *desc; 1061 mdp_super_t *sb = page_address(rdev->sb_page); 1062 __u64 ev1 = md_event(sb); 1063 1064 rdev->raid_disk = -1; 1065 clear_bit(Faulty, &rdev->flags); 1066 clear_bit(In_sync, &rdev->flags); 1067 clear_bit(Bitmap_sync, &rdev->flags); 1068 clear_bit(WriteMostly, &rdev->flags); 1069 1070 if (mddev->raid_disks == 0) { 1071 mddev->major_version = 0; 1072 mddev->minor_version = sb->minor_version; 1073 mddev->patch_version = sb->patch_version; 1074 mddev->external = 0; 1075 mddev->chunk_sectors = sb->chunk_size >> 9; 1076 mddev->ctime = sb->ctime; 1077 mddev->utime = sb->utime; 1078 mddev->level = sb->level; 1079 mddev->clevel[0] = 0; 1080 mddev->layout = sb->layout; 1081 mddev->raid_disks = sb->raid_disks; 1082 mddev->dev_sectors = ((sector_t)sb->size) * 2; 1083 mddev->events = ev1; 1084 mddev->bitmap_info.offset = 0; 1085 mddev->bitmap_info.space = 0; 1086 /* bitmap can use 60 K after the 4K superblocks */ 1087 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1088 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 1089 mddev->reshape_backwards = 0; 1090 1091 if (mddev->minor_version >= 91) { 1092 mddev->reshape_position = sb->reshape_position; 1093 mddev->delta_disks = sb->delta_disks; 1094 mddev->new_level = sb->new_level; 1095 mddev->new_layout = sb->new_layout; 1096 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1097 if (mddev->delta_disks < 0) 1098 mddev->reshape_backwards = 1; 1099 } else { 1100 mddev->reshape_position = MaxSector; 1101 mddev->delta_disks = 0; 1102 mddev->new_level = mddev->level; 1103 mddev->new_layout = mddev->layout; 1104 mddev->new_chunk_sectors = mddev->chunk_sectors; 1105 } 1106 1107 if (sb->state & (1<<MD_SB_CLEAN)) 1108 mddev->recovery_cp = MaxSector; 1109 else { 1110 if (sb->events_hi == sb->cp_events_hi && 1111 sb->events_lo == sb->cp_events_lo) { 1112 mddev->recovery_cp = sb->recovery_cp; 1113 } else 1114 mddev->recovery_cp = 0; 1115 } 1116 1117 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1118 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1119 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1120 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1121 1122 mddev->max_disks = MD_SB_DISKS; 1123 1124 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1125 mddev->bitmap_info.file == NULL) { 1126 mddev->bitmap_info.offset = 1127 mddev->bitmap_info.default_offset; 1128 mddev->bitmap_info.space = 1129 mddev->bitmap_info.default_space; 1130 } 1131 1132 } else if (mddev->pers == NULL) { 1133 /* Insist on good event counter while assembling, except 1134 * for spares (which don't need an event count) */ 1135 ++ev1; 1136 if (sb->disks[rdev->desc_nr].state & ( 1137 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1138 if (ev1 < mddev->events) 1139 return -EINVAL; 1140 } else if (mddev->bitmap) { 1141 /* if adding to array with a bitmap, then we can accept an 1142 * older device ... but not too old. 1143 */ 1144 if (ev1 < mddev->bitmap->events_cleared) 1145 return 0; 1146 if (ev1 < mddev->events) 1147 set_bit(Bitmap_sync, &rdev->flags); 1148 } else { 1149 if (ev1 < mddev->events) 1150 /* just a hot-add of a new device, leave raid_disk at -1 */ 1151 return 0; 1152 } 1153 1154 if (mddev->level != LEVEL_MULTIPATH) { 1155 desc = sb->disks + rdev->desc_nr; 1156 1157 if (desc->state & (1<<MD_DISK_FAULTY)) 1158 set_bit(Faulty, &rdev->flags); 1159 else if (desc->state & (1<<MD_DISK_SYNC) /* && 1160 desc->raid_disk < mddev->raid_disks */) { 1161 set_bit(In_sync, &rdev->flags); 1162 rdev->raid_disk = desc->raid_disk; 1163 rdev->saved_raid_disk = desc->raid_disk; 1164 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1165 /* active but not in sync implies recovery up to 1166 * reshape position. We don't know exactly where 1167 * that is, so set to zero for now */ 1168 if (mddev->minor_version >= 91) { 1169 rdev->recovery_offset = 0; 1170 rdev->raid_disk = desc->raid_disk; 1171 } 1172 } 1173 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1174 set_bit(WriteMostly, &rdev->flags); 1175 } else /* MULTIPATH are always insync */ 1176 set_bit(In_sync, &rdev->flags); 1177 return 0; 1178 } 1179 1180 /* 1181 * sync_super for 0.90.0 1182 */ 1183 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) 1184 { 1185 mdp_super_t *sb; 1186 struct md_rdev *rdev2; 1187 int next_spare = mddev->raid_disks; 1188 1189 /* make rdev->sb match mddev data.. 1190 * 1191 * 1/ zero out disks 1192 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1193 * 3/ any empty disks < next_spare become removed 1194 * 1195 * disks[0] gets initialised to REMOVED because 1196 * we cannot be sure from other fields if it has 1197 * been initialised or not. 1198 */ 1199 int i; 1200 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1201 1202 rdev->sb_size = MD_SB_BYTES; 1203 1204 sb = page_address(rdev->sb_page); 1205 1206 memset(sb, 0, sizeof(*sb)); 1207 1208 sb->md_magic = MD_SB_MAGIC; 1209 sb->major_version = mddev->major_version; 1210 sb->patch_version = mddev->patch_version; 1211 sb->gvalid_words = 0; /* ignored */ 1212 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1213 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1214 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1215 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1216 1217 sb->ctime = mddev->ctime; 1218 sb->level = mddev->level; 1219 sb->size = mddev->dev_sectors / 2; 1220 sb->raid_disks = mddev->raid_disks; 1221 sb->md_minor = mddev->md_minor; 1222 sb->not_persistent = 0; 1223 sb->utime = mddev->utime; 1224 sb->state = 0; 1225 sb->events_hi = (mddev->events>>32); 1226 sb->events_lo = (u32)mddev->events; 1227 1228 if (mddev->reshape_position == MaxSector) 1229 sb->minor_version = 90; 1230 else { 1231 sb->minor_version = 91; 1232 sb->reshape_position = mddev->reshape_position; 1233 sb->new_level = mddev->new_level; 1234 sb->delta_disks = mddev->delta_disks; 1235 sb->new_layout = mddev->new_layout; 1236 sb->new_chunk = mddev->new_chunk_sectors << 9; 1237 } 1238 mddev->minor_version = sb->minor_version; 1239 if (mddev->in_sync) 1240 { 1241 sb->recovery_cp = mddev->recovery_cp; 1242 sb->cp_events_hi = (mddev->events>>32); 1243 sb->cp_events_lo = (u32)mddev->events; 1244 if (mddev->recovery_cp == MaxSector) 1245 sb->state = (1<< MD_SB_CLEAN); 1246 } else 1247 sb->recovery_cp = 0; 1248 1249 sb->layout = mddev->layout; 1250 sb->chunk_size = mddev->chunk_sectors << 9; 1251 1252 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1253 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1254 1255 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1256 rdev_for_each(rdev2, mddev) { 1257 mdp_disk_t *d; 1258 int desc_nr; 1259 int is_active = test_bit(In_sync, &rdev2->flags); 1260 1261 if (rdev2->raid_disk >= 0 && 1262 sb->minor_version >= 91) 1263 /* we have nowhere to store the recovery_offset, 1264 * but if it is not below the reshape_position, 1265 * we can piggy-back on that. 1266 */ 1267 is_active = 1; 1268 if (rdev2->raid_disk < 0 || 1269 test_bit(Faulty, &rdev2->flags)) 1270 is_active = 0; 1271 if (is_active) 1272 desc_nr = rdev2->raid_disk; 1273 else 1274 desc_nr = next_spare++; 1275 rdev2->desc_nr = desc_nr; 1276 d = &sb->disks[rdev2->desc_nr]; 1277 nr_disks++; 1278 d->number = rdev2->desc_nr; 1279 d->major = MAJOR(rdev2->bdev->bd_dev); 1280 d->minor = MINOR(rdev2->bdev->bd_dev); 1281 if (is_active) 1282 d->raid_disk = rdev2->raid_disk; 1283 else 1284 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1285 if (test_bit(Faulty, &rdev2->flags)) 1286 d->state = (1<<MD_DISK_FAULTY); 1287 else if (is_active) { 1288 d->state = (1<<MD_DISK_ACTIVE); 1289 if (test_bit(In_sync, &rdev2->flags)) 1290 d->state |= (1<<MD_DISK_SYNC); 1291 active++; 1292 working++; 1293 } else { 1294 d->state = 0; 1295 spare++; 1296 working++; 1297 } 1298 if (test_bit(WriteMostly, &rdev2->flags)) 1299 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1300 } 1301 /* now set the "removed" and "faulty" bits on any missing devices */ 1302 for (i=0 ; i < mddev->raid_disks ; i++) { 1303 mdp_disk_t *d = &sb->disks[i]; 1304 if (d->state == 0 && d->number == 0) { 1305 d->number = i; 1306 d->raid_disk = i; 1307 d->state = (1<<MD_DISK_REMOVED); 1308 d->state |= (1<<MD_DISK_FAULTY); 1309 failed++; 1310 } 1311 } 1312 sb->nr_disks = nr_disks; 1313 sb->active_disks = active; 1314 sb->working_disks = working; 1315 sb->failed_disks = failed; 1316 sb->spare_disks = spare; 1317 1318 sb->this_disk = sb->disks[rdev->desc_nr]; 1319 sb->sb_csum = calc_sb_csum(sb); 1320 } 1321 1322 /* 1323 * rdev_size_change for 0.90.0 1324 */ 1325 static unsigned long long 1326 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1327 { 1328 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1329 return 0; /* component must fit device */ 1330 if (rdev->mddev->bitmap_info.offset) 1331 return 0; /* can't move bitmap */ 1332 rdev->sb_start = calc_dev_sboffset(rdev); 1333 if (!num_sectors || num_sectors > rdev->sb_start) 1334 num_sectors = rdev->sb_start; 1335 /* Limit to 4TB as metadata cannot record more than that. 1336 * 4TB == 2^32 KB, or 2*2^32 sectors. 1337 */ 1338 if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) 1339 num_sectors = (2ULL << 32) - 2; 1340 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1341 rdev->sb_page); 1342 md_super_wait(rdev->mddev); 1343 return num_sectors; 1344 } 1345 1346 static int 1347 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) 1348 { 1349 /* non-zero offset changes not possible with v0.90 */ 1350 return new_offset == 0; 1351 } 1352 1353 /* 1354 * version 1 superblock 1355 */ 1356 1357 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb) 1358 { 1359 __le32 disk_csum; 1360 u32 csum; 1361 unsigned long long newcsum; 1362 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1363 __le32 *isuper = (__le32*)sb; 1364 1365 disk_csum = sb->sb_csum; 1366 sb->sb_csum = 0; 1367 newcsum = 0; 1368 for (; size >= 4; size -= 4) 1369 newcsum += le32_to_cpu(*isuper++); 1370 1371 if (size == 2) 1372 newcsum += le16_to_cpu(*(__le16*) isuper); 1373 1374 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1375 sb->sb_csum = disk_csum; 1376 return cpu_to_le32(csum); 1377 } 1378 1379 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, 1380 int acknowledged); 1381 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1382 { 1383 struct mdp_superblock_1 *sb; 1384 int ret; 1385 sector_t sb_start; 1386 sector_t sectors; 1387 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1388 int bmask; 1389 1390 /* 1391 * Calculate the position of the superblock in 512byte sectors. 1392 * It is always aligned to a 4K boundary and 1393 * depeding on minor_version, it can be: 1394 * 0: At least 8K, but less than 12K, from end of device 1395 * 1: At start of device 1396 * 2: 4K from start of device. 1397 */ 1398 switch(minor_version) { 1399 case 0: 1400 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; 1401 sb_start -= 8*2; 1402 sb_start &= ~(sector_t)(4*2-1); 1403 break; 1404 case 1: 1405 sb_start = 0; 1406 break; 1407 case 2: 1408 sb_start = 8; 1409 break; 1410 default: 1411 return -EINVAL; 1412 } 1413 rdev->sb_start = sb_start; 1414 1415 /* superblock is rarely larger than 1K, but it can be larger, 1416 * and it is safe to read 4k, so we do that 1417 */ 1418 ret = read_disk_sb(rdev, 4096); 1419 if (ret) return ret; 1420 1421 sb = page_address(rdev->sb_page); 1422 1423 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1424 sb->major_version != cpu_to_le32(1) || 1425 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1426 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1427 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1428 return -EINVAL; 1429 1430 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1431 printk("md: invalid superblock checksum on %s\n", 1432 bdevname(rdev->bdev,b)); 1433 return -EINVAL; 1434 } 1435 if (le64_to_cpu(sb->data_size) < 10) { 1436 printk("md: data_size too small on %s\n", 1437 bdevname(rdev->bdev,b)); 1438 return -EINVAL; 1439 } 1440 if (sb->pad0 || 1441 sb->pad3[0] || 1442 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) 1443 /* Some padding is non-zero, might be a new feature */ 1444 return -EINVAL; 1445 1446 rdev->preferred_minor = 0xffff; 1447 rdev->data_offset = le64_to_cpu(sb->data_offset); 1448 rdev->new_data_offset = rdev->data_offset; 1449 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) && 1450 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) 1451 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); 1452 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1453 1454 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1455 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1456 if (rdev->sb_size & bmask) 1457 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1458 1459 if (minor_version 1460 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1461 return -EINVAL; 1462 if (minor_version 1463 && rdev->new_data_offset < sb_start + (rdev->sb_size/512)) 1464 return -EINVAL; 1465 1466 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1467 rdev->desc_nr = -1; 1468 else 1469 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1470 1471 if (!rdev->bb_page) { 1472 rdev->bb_page = alloc_page(GFP_KERNEL); 1473 if (!rdev->bb_page) 1474 return -ENOMEM; 1475 } 1476 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && 1477 rdev->badblocks.count == 0) { 1478 /* need to load the bad block list. 1479 * Currently we limit it to one page. 1480 */ 1481 s32 offset; 1482 sector_t bb_sector; 1483 u64 *bbp; 1484 int i; 1485 int sectors = le16_to_cpu(sb->bblog_size); 1486 if (sectors > (PAGE_SIZE / 512)) 1487 return -EINVAL; 1488 offset = le32_to_cpu(sb->bblog_offset); 1489 if (offset == 0) 1490 return -EINVAL; 1491 bb_sector = (long long)offset; 1492 if (!sync_page_io(rdev, bb_sector, sectors << 9, 1493 rdev->bb_page, READ, true)) 1494 return -EIO; 1495 bbp = (u64 *)page_address(rdev->bb_page); 1496 rdev->badblocks.shift = sb->bblog_shift; 1497 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { 1498 u64 bb = le64_to_cpu(*bbp); 1499 int count = bb & (0x3ff); 1500 u64 sector = bb >> 10; 1501 sector <<= sb->bblog_shift; 1502 count <<= sb->bblog_shift; 1503 if (bb + 1 == 0) 1504 break; 1505 if (md_set_badblocks(&rdev->badblocks, 1506 sector, count, 1) == 0) 1507 return -EINVAL; 1508 } 1509 } else if (sb->bblog_offset != 0) 1510 rdev->badblocks.shift = 0; 1511 1512 if (!refdev) { 1513 ret = 1; 1514 } else { 1515 __u64 ev1, ev2; 1516 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); 1517 1518 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1519 sb->level != refsb->level || 1520 sb->layout != refsb->layout || 1521 sb->chunksize != refsb->chunksize) { 1522 printk(KERN_WARNING "md: %s has strangely different" 1523 " superblock to %s\n", 1524 bdevname(rdev->bdev,b), 1525 bdevname(refdev->bdev,b2)); 1526 return -EINVAL; 1527 } 1528 ev1 = le64_to_cpu(sb->events); 1529 ev2 = le64_to_cpu(refsb->events); 1530 1531 if (ev1 > ev2) 1532 ret = 1; 1533 else 1534 ret = 0; 1535 } 1536 if (minor_version) { 1537 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); 1538 sectors -= rdev->data_offset; 1539 } else 1540 sectors = rdev->sb_start; 1541 if (sectors < le64_to_cpu(sb->data_size)) 1542 return -EINVAL; 1543 rdev->sectors = le64_to_cpu(sb->data_size); 1544 return ret; 1545 } 1546 1547 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) 1548 { 1549 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 1550 __u64 ev1 = le64_to_cpu(sb->events); 1551 1552 rdev->raid_disk = -1; 1553 clear_bit(Faulty, &rdev->flags); 1554 clear_bit(In_sync, &rdev->flags); 1555 clear_bit(Bitmap_sync, &rdev->flags); 1556 clear_bit(WriteMostly, &rdev->flags); 1557 1558 if (mddev->raid_disks == 0) { 1559 mddev->major_version = 1; 1560 mddev->patch_version = 0; 1561 mddev->external = 0; 1562 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1563 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1564 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1565 mddev->level = le32_to_cpu(sb->level); 1566 mddev->clevel[0] = 0; 1567 mddev->layout = le32_to_cpu(sb->layout); 1568 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1569 mddev->dev_sectors = le64_to_cpu(sb->size); 1570 mddev->events = ev1; 1571 mddev->bitmap_info.offset = 0; 1572 mddev->bitmap_info.space = 0; 1573 /* Default location for bitmap is 1K after superblock 1574 * using 3K - total of 4K 1575 */ 1576 mddev->bitmap_info.default_offset = 1024 >> 9; 1577 mddev->bitmap_info.default_space = (4096-1024) >> 9; 1578 mddev->reshape_backwards = 0; 1579 1580 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1581 memcpy(mddev->uuid, sb->set_uuid, 16); 1582 1583 mddev->max_disks = (4096-256)/2; 1584 1585 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1586 mddev->bitmap_info.file == NULL) { 1587 mddev->bitmap_info.offset = 1588 (__s32)le32_to_cpu(sb->bitmap_offset); 1589 /* Metadata doesn't record how much space is available. 1590 * For 1.0, we assume we can use up to the superblock 1591 * if before, else to 4K beyond superblock. 1592 * For others, assume no change is possible. 1593 */ 1594 if (mddev->minor_version > 0) 1595 mddev->bitmap_info.space = 0; 1596 else if (mddev->bitmap_info.offset > 0) 1597 mddev->bitmap_info.space = 1598 8 - mddev->bitmap_info.offset; 1599 else 1600 mddev->bitmap_info.space = 1601 -mddev->bitmap_info.offset; 1602 } 1603 1604 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1605 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1606 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1607 mddev->new_level = le32_to_cpu(sb->new_level); 1608 mddev->new_layout = le32_to_cpu(sb->new_layout); 1609 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1610 if (mddev->delta_disks < 0 || 1611 (mddev->delta_disks == 0 && 1612 (le32_to_cpu(sb->feature_map) 1613 & MD_FEATURE_RESHAPE_BACKWARDS))) 1614 mddev->reshape_backwards = 1; 1615 } else { 1616 mddev->reshape_position = MaxSector; 1617 mddev->delta_disks = 0; 1618 mddev->new_level = mddev->level; 1619 mddev->new_layout = mddev->layout; 1620 mddev->new_chunk_sectors = mddev->chunk_sectors; 1621 } 1622 1623 } else if (mddev->pers == NULL) { 1624 /* Insist of good event counter while assembling, except for 1625 * spares (which don't need an event count) */ 1626 ++ev1; 1627 if (rdev->desc_nr >= 0 && 1628 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1629 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe) 1630 if (ev1 < mddev->events) 1631 return -EINVAL; 1632 } else if (mddev->bitmap) { 1633 /* If adding to array with a bitmap, then we can accept an 1634 * older device, but not too old. 1635 */ 1636 if (ev1 < mddev->bitmap->events_cleared) 1637 return 0; 1638 if (ev1 < mddev->events) 1639 set_bit(Bitmap_sync, &rdev->flags); 1640 } else { 1641 if (ev1 < mddev->events) 1642 /* just a hot-add of a new device, leave raid_disk at -1 */ 1643 return 0; 1644 } 1645 if (mddev->level != LEVEL_MULTIPATH) { 1646 int role; 1647 if (rdev->desc_nr < 0 || 1648 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1649 role = 0xffff; 1650 rdev->desc_nr = -1; 1651 } else 1652 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1653 switch(role) { 1654 case 0xffff: /* spare */ 1655 break; 1656 case 0xfffe: /* faulty */ 1657 set_bit(Faulty, &rdev->flags); 1658 break; 1659 default: 1660 rdev->saved_raid_disk = role; 1661 if ((le32_to_cpu(sb->feature_map) & 1662 MD_FEATURE_RECOVERY_OFFSET)) { 1663 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1664 if (!(le32_to_cpu(sb->feature_map) & 1665 MD_FEATURE_RECOVERY_BITMAP)) 1666 rdev->saved_raid_disk = -1; 1667 } else 1668 set_bit(In_sync, &rdev->flags); 1669 rdev->raid_disk = role; 1670 break; 1671 } 1672 if (sb->devflags & WriteMostly1) 1673 set_bit(WriteMostly, &rdev->flags); 1674 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) 1675 set_bit(Replacement, &rdev->flags); 1676 } else /* MULTIPATH are always insync */ 1677 set_bit(In_sync, &rdev->flags); 1678 1679 return 0; 1680 } 1681 1682 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) 1683 { 1684 struct mdp_superblock_1 *sb; 1685 struct md_rdev *rdev2; 1686 int max_dev, i; 1687 /* make rdev->sb match mddev and rdev data. */ 1688 1689 sb = page_address(rdev->sb_page); 1690 1691 sb->feature_map = 0; 1692 sb->pad0 = 0; 1693 sb->recovery_offset = cpu_to_le64(0); 1694 memset(sb->pad3, 0, sizeof(sb->pad3)); 1695 1696 sb->utime = cpu_to_le64((__u64)mddev->utime); 1697 sb->events = cpu_to_le64(mddev->events); 1698 if (mddev->in_sync) 1699 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1700 else 1701 sb->resync_offset = cpu_to_le64(0); 1702 1703 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1704 1705 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1706 sb->size = cpu_to_le64(mddev->dev_sectors); 1707 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 1708 sb->level = cpu_to_le32(mddev->level); 1709 sb->layout = cpu_to_le32(mddev->layout); 1710 1711 if (test_bit(WriteMostly, &rdev->flags)) 1712 sb->devflags |= WriteMostly1; 1713 else 1714 sb->devflags &= ~WriteMostly1; 1715 sb->data_offset = cpu_to_le64(rdev->data_offset); 1716 sb->data_size = cpu_to_le64(rdev->sectors); 1717 1718 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1719 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 1720 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1721 } 1722 1723 if (rdev->raid_disk >= 0 && 1724 !test_bit(In_sync, &rdev->flags)) { 1725 sb->feature_map |= 1726 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1727 sb->recovery_offset = 1728 cpu_to_le64(rdev->recovery_offset); 1729 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) 1730 sb->feature_map |= 1731 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP); 1732 } 1733 if (test_bit(Replacement, &rdev->flags)) 1734 sb->feature_map |= 1735 cpu_to_le32(MD_FEATURE_REPLACEMENT); 1736 1737 if (mddev->reshape_position != MaxSector) { 1738 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1739 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1740 sb->new_layout = cpu_to_le32(mddev->new_layout); 1741 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1742 sb->new_level = cpu_to_le32(mddev->new_level); 1743 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 1744 if (mddev->delta_disks == 0 && 1745 mddev->reshape_backwards) 1746 sb->feature_map 1747 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS); 1748 if (rdev->new_data_offset != rdev->data_offset) { 1749 sb->feature_map 1750 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET); 1751 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset 1752 - rdev->data_offset)); 1753 } 1754 } 1755 1756 if (rdev->badblocks.count == 0) 1757 /* Nothing to do for bad blocks*/ ; 1758 else if (sb->bblog_offset == 0) 1759 /* Cannot record bad blocks on this device */ 1760 md_error(mddev, rdev); 1761 else { 1762 struct badblocks *bb = &rdev->badblocks; 1763 u64 *bbp = (u64 *)page_address(rdev->bb_page); 1764 u64 *p = bb->page; 1765 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); 1766 if (bb->changed) { 1767 unsigned seq; 1768 1769 retry: 1770 seq = read_seqbegin(&bb->lock); 1771 1772 memset(bbp, 0xff, PAGE_SIZE); 1773 1774 for (i = 0 ; i < bb->count ; i++) { 1775 u64 internal_bb = p[i]; 1776 u64 store_bb = ((BB_OFFSET(internal_bb) << 10) 1777 | BB_LEN(internal_bb)); 1778 bbp[i] = cpu_to_le64(store_bb); 1779 } 1780 bb->changed = 0; 1781 if (read_seqretry(&bb->lock, seq)) 1782 goto retry; 1783 1784 bb->sector = (rdev->sb_start + 1785 (int)le32_to_cpu(sb->bblog_offset)); 1786 bb->size = le16_to_cpu(sb->bblog_size); 1787 } 1788 } 1789 1790 max_dev = 0; 1791 rdev_for_each(rdev2, mddev) 1792 if (rdev2->desc_nr+1 > max_dev) 1793 max_dev = rdev2->desc_nr+1; 1794 1795 if (max_dev > le32_to_cpu(sb->max_dev)) { 1796 int bmask; 1797 sb->max_dev = cpu_to_le32(max_dev); 1798 rdev->sb_size = max_dev * 2 + 256; 1799 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1800 if (rdev->sb_size & bmask) 1801 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1802 } else 1803 max_dev = le32_to_cpu(sb->max_dev); 1804 1805 for (i=0; i<max_dev;i++) 1806 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1807 1808 rdev_for_each(rdev2, mddev) { 1809 i = rdev2->desc_nr; 1810 if (test_bit(Faulty, &rdev2->flags)) 1811 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1812 else if (test_bit(In_sync, &rdev2->flags)) 1813 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1814 else if (rdev2->raid_disk >= 0) 1815 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1816 else 1817 sb->dev_roles[i] = cpu_to_le16(0xffff); 1818 } 1819 1820 sb->sb_csum = calc_sb_1_csum(sb); 1821 } 1822 1823 static unsigned long long 1824 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1825 { 1826 struct mdp_superblock_1 *sb; 1827 sector_t max_sectors; 1828 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1829 return 0; /* component must fit device */ 1830 if (rdev->data_offset != rdev->new_data_offset) 1831 return 0; /* too confusing */ 1832 if (rdev->sb_start < rdev->data_offset) { 1833 /* minor versions 1 and 2; superblock before data */ 1834 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; 1835 max_sectors -= rdev->data_offset; 1836 if (!num_sectors || num_sectors > max_sectors) 1837 num_sectors = max_sectors; 1838 } else if (rdev->mddev->bitmap_info.offset) { 1839 /* minor version 0 with bitmap we can't move */ 1840 return 0; 1841 } else { 1842 /* minor version 0; superblock after data */ 1843 sector_t sb_start; 1844 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2; 1845 sb_start &= ~(sector_t)(4*2 - 1); 1846 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 1847 if (!num_sectors || num_sectors > max_sectors) 1848 num_sectors = max_sectors; 1849 rdev->sb_start = sb_start; 1850 } 1851 sb = page_address(rdev->sb_page); 1852 sb->data_size = cpu_to_le64(num_sectors); 1853 sb->super_offset = rdev->sb_start; 1854 sb->sb_csum = calc_sb_1_csum(sb); 1855 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1856 rdev->sb_page); 1857 md_super_wait(rdev->mddev); 1858 return num_sectors; 1859 1860 } 1861 1862 static int 1863 super_1_allow_new_offset(struct md_rdev *rdev, 1864 unsigned long long new_offset) 1865 { 1866 /* All necessary checks on new >= old have been done */ 1867 struct bitmap *bitmap; 1868 if (new_offset >= rdev->data_offset) 1869 return 1; 1870 1871 /* with 1.0 metadata, there is no metadata to tread on 1872 * so we can always move back */ 1873 if (rdev->mddev->minor_version == 0) 1874 return 1; 1875 1876 /* otherwise we must be sure not to step on 1877 * any metadata, so stay: 1878 * 36K beyond start of superblock 1879 * beyond end of badblocks 1880 * beyond write-intent bitmap 1881 */ 1882 if (rdev->sb_start + (32+4)*2 > new_offset) 1883 return 0; 1884 bitmap = rdev->mddev->bitmap; 1885 if (bitmap && !rdev->mddev->bitmap_info.file && 1886 rdev->sb_start + rdev->mddev->bitmap_info.offset + 1887 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset) 1888 return 0; 1889 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) 1890 return 0; 1891 1892 return 1; 1893 } 1894 1895 static struct super_type super_types[] = { 1896 [0] = { 1897 .name = "0.90.0", 1898 .owner = THIS_MODULE, 1899 .load_super = super_90_load, 1900 .validate_super = super_90_validate, 1901 .sync_super = super_90_sync, 1902 .rdev_size_change = super_90_rdev_size_change, 1903 .allow_new_offset = super_90_allow_new_offset, 1904 }, 1905 [1] = { 1906 .name = "md-1", 1907 .owner = THIS_MODULE, 1908 .load_super = super_1_load, 1909 .validate_super = super_1_validate, 1910 .sync_super = super_1_sync, 1911 .rdev_size_change = super_1_rdev_size_change, 1912 .allow_new_offset = super_1_allow_new_offset, 1913 }, 1914 }; 1915 1916 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) 1917 { 1918 if (mddev->sync_super) { 1919 mddev->sync_super(mddev, rdev); 1920 return; 1921 } 1922 1923 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); 1924 1925 super_types[mddev->major_version].sync_super(mddev, rdev); 1926 } 1927 1928 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) 1929 { 1930 struct md_rdev *rdev, *rdev2; 1931 1932 rcu_read_lock(); 1933 rdev_for_each_rcu(rdev, mddev1) 1934 rdev_for_each_rcu(rdev2, mddev2) 1935 if (rdev->bdev->bd_contains == 1936 rdev2->bdev->bd_contains) { 1937 rcu_read_unlock(); 1938 return 1; 1939 } 1940 rcu_read_unlock(); 1941 return 0; 1942 } 1943 1944 static LIST_HEAD(pending_raid_disks); 1945 1946 /* 1947 * Try to register data integrity profile for an mddev 1948 * 1949 * This is called when an array is started and after a disk has been kicked 1950 * from the array. It only succeeds if all working and active component devices 1951 * are integrity capable with matching profiles. 1952 */ 1953 int md_integrity_register(struct mddev *mddev) 1954 { 1955 struct md_rdev *rdev, *reference = NULL; 1956 1957 if (list_empty(&mddev->disks)) 1958 return 0; /* nothing to do */ 1959 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) 1960 return 0; /* shouldn't register, or already is */ 1961 rdev_for_each(rdev, mddev) { 1962 /* skip spares and non-functional disks */ 1963 if (test_bit(Faulty, &rdev->flags)) 1964 continue; 1965 if (rdev->raid_disk < 0) 1966 continue; 1967 if (!reference) { 1968 /* Use the first rdev as the reference */ 1969 reference = rdev; 1970 continue; 1971 } 1972 /* does this rdev's profile match the reference profile? */ 1973 if (blk_integrity_compare(reference->bdev->bd_disk, 1974 rdev->bdev->bd_disk) < 0) 1975 return -EINVAL; 1976 } 1977 if (!reference || !bdev_get_integrity(reference->bdev)) 1978 return 0; 1979 /* 1980 * All component devices are integrity capable and have matching 1981 * profiles, register the common profile for the md device. 1982 */ 1983 if (blk_integrity_register(mddev->gendisk, 1984 bdev_get_integrity(reference->bdev)) != 0) { 1985 printk(KERN_ERR "md: failed to register integrity for %s\n", 1986 mdname(mddev)); 1987 return -EINVAL; 1988 } 1989 printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev)); 1990 if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) { 1991 printk(KERN_ERR "md: failed to create integrity pool for %s\n", 1992 mdname(mddev)); 1993 return -EINVAL; 1994 } 1995 return 0; 1996 } 1997 EXPORT_SYMBOL(md_integrity_register); 1998 1999 /* Disable data integrity if non-capable/non-matching disk is being added */ 2000 void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) 2001 { 2002 struct blk_integrity *bi_rdev; 2003 struct blk_integrity *bi_mddev; 2004 2005 if (!mddev->gendisk) 2006 return; 2007 2008 bi_rdev = bdev_get_integrity(rdev->bdev); 2009 bi_mddev = blk_get_integrity(mddev->gendisk); 2010 2011 if (!bi_mddev) /* nothing to do */ 2012 return; 2013 if (rdev->raid_disk < 0) /* skip spares */ 2014 return; 2015 if (bi_rdev && blk_integrity_compare(mddev->gendisk, 2016 rdev->bdev->bd_disk) >= 0) 2017 return; 2018 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev)); 2019 blk_integrity_unregister(mddev->gendisk); 2020 } 2021 EXPORT_SYMBOL(md_integrity_add_rdev); 2022 2023 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) 2024 { 2025 char b[BDEVNAME_SIZE]; 2026 struct kobject *ko; 2027 char *s; 2028 int err; 2029 2030 /* prevent duplicates */ 2031 if (find_rdev(mddev, rdev->bdev->bd_dev)) 2032 return -EEXIST; 2033 2034 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 2035 if (rdev->sectors && (mddev->dev_sectors == 0 || 2036 rdev->sectors < mddev->dev_sectors)) { 2037 if (mddev->pers) { 2038 /* Cannot change size, so fail 2039 * If mddev->level <= 0, then we don't care 2040 * about aligning sizes (e.g. linear) 2041 */ 2042 if (mddev->level > 0) 2043 return -ENOSPC; 2044 } else 2045 mddev->dev_sectors = rdev->sectors; 2046 } 2047 2048 /* Verify rdev->desc_nr is unique. 2049 * If it is -1, assign a free number, else 2050 * check number is not in use 2051 */ 2052 rcu_read_lock(); 2053 if (rdev->desc_nr < 0) { 2054 int choice = 0; 2055 if (mddev->pers) 2056 choice = mddev->raid_disks; 2057 while (md_find_rdev_nr_rcu(mddev, choice)) 2058 choice++; 2059 rdev->desc_nr = choice; 2060 } else { 2061 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { 2062 rcu_read_unlock(); 2063 return -EBUSY; 2064 } 2065 } 2066 rcu_read_unlock(); 2067 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 2068 printk(KERN_WARNING "md: %s: array is limited to %d devices\n", 2069 mdname(mddev), mddev->max_disks); 2070 return -EBUSY; 2071 } 2072 bdevname(rdev->bdev,b); 2073 while ( (s=strchr(b, '/')) != NULL) 2074 *s = '!'; 2075 2076 rdev->mddev = mddev; 2077 printk(KERN_INFO "md: bind<%s>\n", b); 2078 2079 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 2080 goto fail; 2081 2082 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 2083 if (sysfs_create_link(&rdev->kobj, ko, "block")) 2084 /* failure here is OK */; 2085 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 2086 2087 list_add_rcu(&rdev->same_set, &mddev->disks); 2088 bd_link_disk_holder(rdev->bdev, mddev->gendisk); 2089 2090 /* May as well allow recovery to be retried once */ 2091 mddev->recovery_disabled++; 2092 2093 return 0; 2094 2095 fail: 2096 printk(KERN_WARNING "md: failed to register dev-%s for %s\n", 2097 b, mdname(mddev)); 2098 return err; 2099 } 2100 2101 static void md_delayed_delete(struct work_struct *ws) 2102 { 2103 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); 2104 kobject_del(&rdev->kobj); 2105 kobject_put(&rdev->kobj); 2106 } 2107 2108 static void unbind_rdev_from_array(struct md_rdev *rdev) 2109 { 2110 char b[BDEVNAME_SIZE]; 2111 2112 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); 2113 list_del_rcu(&rdev->same_set); 2114 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 2115 rdev->mddev = NULL; 2116 sysfs_remove_link(&rdev->kobj, "block"); 2117 sysfs_put(rdev->sysfs_state); 2118 rdev->sysfs_state = NULL; 2119 rdev->badblocks.count = 0; 2120 /* We need to delay this, otherwise we can deadlock when 2121 * writing to 'remove' to "dev/state". We also need 2122 * to delay it due to rcu usage. 2123 */ 2124 synchronize_rcu(); 2125 INIT_WORK(&rdev->del_work, md_delayed_delete); 2126 kobject_get(&rdev->kobj); 2127 queue_work(md_misc_wq, &rdev->del_work); 2128 } 2129 2130 /* 2131 * prevent the device from being mounted, repartitioned or 2132 * otherwise reused by a RAID array (or any other kernel 2133 * subsystem), by bd_claiming the device. 2134 */ 2135 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) 2136 { 2137 int err = 0; 2138 struct block_device *bdev; 2139 char b[BDEVNAME_SIZE]; 2140 2141 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2142 shared ? (struct md_rdev *)lock_rdev : rdev); 2143 if (IS_ERR(bdev)) { 2144 printk(KERN_ERR "md: could not open %s.\n", 2145 __bdevname(dev, b)); 2146 return PTR_ERR(bdev); 2147 } 2148 rdev->bdev = bdev; 2149 return err; 2150 } 2151 2152 static void unlock_rdev(struct md_rdev *rdev) 2153 { 2154 struct block_device *bdev = rdev->bdev; 2155 rdev->bdev = NULL; 2156 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2157 } 2158 2159 void md_autodetect_dev(dev_t dev); 2160 2161 static void export_rdev(struct md_rdev *rdev) 2162 { 2163 char b[BDEVNAME_SIZE]; 2164 2165 printk(KERN_INFO "md: export_rdev(%s)\n", 2166 bdevname(rdev->bdev,b)); 2167 md_rdev_clear(rdev); 2168 #ifndef MODULE 2169 if (test_bit(AutoDetected, &rdev->flags)) 2170 md_autodetect_dev(rdev->bdev->bd_dev); 2171 #endif 2172 unlock_rdev(rdev); 2173 kobject_put(&rdev->kobj); 2174 } 2175 2176 void md_kick_rdev_from_array(struct md_rdev *rdev) 2177 { 2178 unbind_rdev_from_array(rdev); 2179 export_rdev(rdev); 2180 } 2181 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array); 2182 2183 static void export_array(struct mddev *mddev) 2184 { 2185 struct md_rdev *rdev; 2186 2187 while (!list_empty(&mddev->disks)) { 2188 rdev = list_first_entry(&mddev->disks, struct md_rdev, 2189 same_set); 2190 md_kick_rdev_from_array(rdev); 2191 } 2192 mddev->raid_disks = 0; 2193 mddev->major_version = 0; 2194 } 2195 2196 static void sync_sbs(struct mddev *mddev, int nospares) 2197 { 2198 /* Update each superblock (in-memory image), but 2199 * if we are allowed to, skip spares which already 2200 * have the right event counter, or have one earlier 2201 * (which would mean they aren't being marked as dirty 2202 * with the rest of the array) 2203 */ 2204 struct md_rdev *rdev; 2205 rdev_for_each(rdev, mddev) { 2206 if (rdev->sb_events == mddev->events || 2207 (nospares && 2208 rdev->raid_disk < 0 && 2209 rdev->sb_events+1 == mddev->events)) { 2210 /* Don't update this superblock */ 2211 rdev->sb_loaded = 2; 2212 } else { 2213 sync_super(mddev, rdev); 2214 rdev->sb_loaded = 1; 2215 } 2216 } 2217 } 2218 2219 void md_update_sb(struct mddev *mddev, int force_change) 2220 { 2221 struct md_rdev *rdev; 2222 int sync_req; 2223 int nospares = 0; 2224 int any_badblocks_changed = 0; 2225 2226 if (mddev->ro) { 2227 if (force_change) 2228 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2229 return; 2230 } 2231 repeat: 2232 /* First make sure individual recovery_offsets are correct */ 2233 rdev_for_each(rdev, mddev) { 2234 if (rdev->raid_disk >= 0 && 2235 mddev->delta_disks >= 0 && 2236 !test_bit(In_sync, &rdev->flags) && 2237 mddev->curr_resync_completed > rdev->recovery_offset) 2238 rdev->recovery_offset = mddev->curr_resync_completed; 2239 2240 } 2241 if (!mddev->persistent) { 2242 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); 2243 clear_bit(MD_CHANGE_DEVS, &mddev->flags); 2244 if (!mddev->external) { 2245 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2246 rdev_for_each(rdev, mddev) { 2247 if (rdev->badblocks.changed) { 2248 rdev->badblocks.changed = 0; 2249 md_ack_all_badblocks(&rdev->badblocks); 2250 md_error(mddev, rdev); 2251 } 2252 clear_bit(Blocked, &rdev->flags); 2253 clear_bit(BlockedBadBlocks, &rdev->flags); 2254 wake_up(&rdev->blocked_wait); 2255 } 2256 } 2257 wake_up(&mddev->sb_wait); 2258 return; 2259 } 2260 2261 spin_lock(&mddev->lock); 2262 2263 mddev->utime = get_seconds(); 2264 2265 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 2266 force_change = 1; 2267 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 2268 /* just a clean<-> dirty transition, possibly leave spares alone, 2269 * though if events isn't the right even/odd, we will have to do 2270 * spares after all 2271 */ 2272 nospares = 1; 2273 if (force_change) 2274 nospares = 0; 2275 if (mddev->degraded) 2276 /* If the array is degraded, then skipping spares is both 2277 * dangerous and fairly pointless. 2278 * Dangerous because a device that was removed from the array 2279 * might have a event_count that still looks up-to-date, 2280 * so it can be re-added without a resync. 2281 * Pointless because if there are any spares to skip, 2282 * then a recovery will happen and soon that array won't 2283 * be degraded any more and the spare can go back to sleep then. 2284 */ 2285 nospares = 0; 2286 2287 sync_req = mddev->in_sync; 2288 2289 /* If this is just a dirty<->clean transition, and the array is clean 2290 * and 'events' is odd, we can roll back to the previous clean state */ 2291 if (nospares 2292 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2293 && mddev->can_decrease_events 2294 && mddev->events != 1) { 2295 mddev->events--; 2296 mddev->can_decrease_events = 0; 2297 } else { 2298 /* otherwise we have to go forward and ... */ 2299 mddev->events ++; 2300 mddev->can_decrease_events = nospares; 2301 } 2302 2303 /* 2304 * This 64-bit counter should never wrap. 2305 * Either we are in around ~1 trillion A.C., assuming 2306 * 1 reboot per second, or we have a bug... 2307 */ 2308 WARN_ON(mddev->events == 0); 2309 2310 rdev_for_each(rdev, mddev) { 2311 if (rdev->badblocks.changed) 2312 any_badblocks_changed++; 2313 if (test_bit(Faulty, &rdev->flags)) 2314 set_bit(FaultRecorded, &rdev->flags); 2315 } 2316 2317 sync_sbs(mddev, nospares); 2318 spin_unlock(&mddev->lock); 2319 2320 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", 2321 mdname(mddev), mddev->in_sync); 2322 2323 bitmap_update_sb(mddev->bitmap); 2324 rdev_for_each(rdev, mddev) { 2325 char b[BDEVNAME_SIZE]; 2326 2327 if (rdev->sb_loaded != 1) 2328 continue; /* no noise on spare devices */ 2329 2330 if (!test_bit(Faulty, &rdev->flags)) { 2331 md_super_write(mddev,rdev, 2332 rdev->sb_start, rdev->sb_size, 2333 rdev->sb_page); 2334 pr_debug("md: (write) %s's sb offset: %llu\n", 2335 bdevname(rdev->bdev, b), 2336 (unsigned long long)rdev->sb_start); 2337 rdev->sb_events = mddev->events; 2338 if (rdev->badblocks.size) { 2339 md_super_write(mddev, rdev, 2340 rdev->badblocks.sector, 2341 rdev->badblocks.size << 9, 2342 rdev->bb_page); 2343 rdev->badblocks.size = 0; 2344 } 2345 2346 } else 2347 pr_debug("md: %s (skipping faulty)\n", 2348 bdevname(rdev->bdev, b)); 2349 2350 if (mddev->level == LEVEL_MULTIPATH) 2351 /* only need to write one superblock... */ 2352 break; 2353 } 2354 md_super_wait(mddev); 2355 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ 2356 2357 spin_lock(&mddev->lock); 2358 if (mddev->in_sync != sync_req || 2359 test_bit(MD_CHANGE_DEVS, &mddev->flags)) { 2360 /* have to write it out again */ 2361 spin_unlock(&mddev->lock); 2362 goto repeat; 2363 } 2364 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2365 spin_unlock(&mddev->lock); 2366 wake_up(&mddev->sb_wait); 2367 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2368 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2369 2370 rdev_for_each(rdev, mddev) { 2371 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) 2372 clear_bit(Blocked, &rdev->flags); 2373 2374 if (any_badblocks_changed) 2375 md_ack_all_badblocks(&rdev->badblocks); 2376 clear_bit(BlockedBadBlocks, &rdev->flags); 2377 wake_up(&rdev->blocked_wait); 2378 } 2379 } 2380 EXPORT_SYMBOL(md_update_sb); 2381 2382 static int add_bound_rdev(struct md_rdev *rdev) 2383 { 2384 struct mddev *mddev = rdev->mddev; 2385 int err = 0; 2386 2387 if (!mddev->pers->hot_remove_disk) { 2388 /* If there is hot_add_disk but no hot_remove_disk 2389 * then added disks for geometry changes, 2390 * and should be added immediately. 2391 */ 2392 super_types[mddev->major_version]. 2393 validate_super(mddev, rdev); 2394 err = mddev->pers->hot_add_disk(mddev, rdev); 2395 if (err) { 2396 unbind_rdev_from_array(rdev); 2397 export_rdev(rdev); 2398 return err; 2399 } 2400 } 2401 sysfs_notify_dirent_safe(rdev->sysfs_state); 2402 2403 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2404 if (mddev->degraded) 2405 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 2406 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2407 md_new_event(mddev); 2408 md_wakeup_thread(mddev->thread); 2409 return 0; 2410 } 2411 2412 /* words written to sysfs files may, or may not, be \n terminated. 2413 * We want to accept with case. For this we use cmd_match. 2414 */ 2415 static int cmd_match(const char *cmd, const char *str) 2416 { 2417 /* See if cmd, written into a sysfs file, matches 2418 * str. They must either be the same, or cmd can 2419 * have a trailing newline 2420 */ 2421 while (*cmd && *str && *cmd == *str) { 2422 cmd++; 2423 str++; 2424 } 2425 if (*cmd == '\n') 2426 cmd++; 2427 if (*str || *cmd) 2428 return 0; 2429 return 1; 2430 } 2431 2432 struct rdev_sysfs_entry { 2433 struct attribute attr; 2434 ssize_t (*show)(struct md_rdev *, char *); 2435 ssize_t (*store)(struct md_rdev *, const char *, size_t); 2436 }; 2437 2438 static ssize_t 2439 state_show(struct md_rdev *rdev, char *page) 2440 { 2441 char *sep = ""; 2442 size_t len = 0; 2443 unsigned long flags = ACCESS_ONCE(rdev->flags); 2444 2445 if (test_bit(Faulty, &flags) || 2446 rdev->badblocks.unacked_exist) { 2447 len+= sprintf(page+len, "%sfaulty",sep); 2448 sep = ","; 2449 } 2450 if (test_bit(In_sync, &flags)) { 2451 len += sprintf(page+len, "%sin_sync",sep); 2452 sep = ","; 2453 } 2454 if (test_bit(WriteMostly, &flags)) { 2455 len += sprintf(page+len, "%swrite_mostly",sep); 2456 sep = ","; 2457 } 2458 if (test_bit(Blocked, &flags) || 2459 (rdev->badblocks.unacked_exist 2460 && !test_bit(Faulty, &flags))) { 2461 len += sprintf(page+len, "%sblocked", sep); 2462 sep = ","; 2463 } 2464 if (!test_bit(Faulty, &flags) && 2465 !test_bit(In_sync, &flags)) { 2466 len += sprintf(page+len, "%sspare", sep); 2467 sep = ","; 2468 } 2469 if (test_bit(WriteErrorSeen, &flags)) { 2470 len += sprintf(page+len, "%swrite_error", sep); 2471 sep = ","; 2472 } 2473 if (test_bit(WantReplacement, &flags)) { 2474 len += sprintf(page+len, "%swant_replacement", sep); 2475 sep = ","; 2476 } 2477 if (test_bit(Replacement, &flags)) { 2478 len += sprintf(page+len, "%sreplacement", sep); 2479 sep = ","; 2480 } 2481 2482 return len+sprintf(page+len, "\n"); 2483 } 2484 2485 static ssize_t 2486 state_store(struct md_rdev *rdev, const char *buf, size_t len) 2487 { 2488 /* can write 2489 * faulty - simulates an error 2490 * remove - disconnects the device 2491 * writemostly - sets write_mostly 2492 * -writemostly - clears write_mostly 2493 * blocked - sets the Blocked flags 2494 * -blocked - clears the Blocked and possibly simulates an error 2495 * insync - sets Insync providing device isn't active 2496 * -insync - clear Insync for a device with a slot assigned, 2497 * so that it gets rebuilt based on bitmap 2498 * write_error - sets WriteErrorSeen 2499 * -write_error - clears WriteErrorSeen 2500 */ 2501 int err = -EINVAL; 2502 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2503 md_error(rdev->mddev, rdev); 2504 if (test_bit(Faulty, &rdev->flags)) 2505 err = 0; 2506 else 2507 err = -EBUSY; 2508 } else if (cmd_match(buf, "remove")) { 2509 if (rdev->raid_disk >= 0) 2510 err = -EBUSY; 2511 else { 2512 struct mddev *mddev = rdev->mddev; 2513 if (mddev_is_clustered(mddev)) 2514 md_cluster_ops->remove_disk(mddev, rdev); 2515 md_kick_rdev_from_array(rdev); 2516 if (mddev_is_clustered(mddev)) 2517 md_cluster_ops->metadata_update_start(mddev); 2518 if (mddev->pers) 2519 md_update_sb(mddev, 1); 2520 md_new_event(mddev); 2521 if (mddev_is_clustered(mddev)) 2522 md_cluster_ops->metadata_update_finish(mddev); 2523 err = 0; 2524 } 2525 } else if (cmd_match(buf, "writemostly")) { 2526 set_bit(WriteMostly, &rdev->flags); 2527 err = 0; 2528 } else if (cmd_match(buf, "-writemostly")) { 2529 clear_bit(WriteMostly, &rdev->flags); 2530 err = 0; 2531 } else if (cmd_match(buf, "blocked")) { 2532 set_bit(Blocked, &rdev->flags); 2533 err = 0; 2534 } else if (cmd_match(buf, "-blocked")) { 2535 if (!test_bit(Faulty, &rdev->flags) && 2536 rdev->badblocks.unacked_exist) { 2537 /* metadata handler doesn't understand badblocks, 2538 * so we need to fail the device 2539 */ 2540 md_error(rdev->mddev, rdev); 2541 } 2542 clear_bit(Blocked, &rdev->flags); 2543 clear_bit(BlockedBadBlocks, &rdev->flags); 2544 wake_up(&rdev->blocked_wait); 2545 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2546 md_wakeup_thread(rdev->mddev->thread); 2547 2548 err = 0; 2549 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2550 set_bit(In_sync, &rdev->flags); 2551 err = 0; 2552 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) { 2553 if (rdev->mddev->pers == NULL) { 2554 clear_bit(In_sync, &rdev->flags); 2555 rdev->saved_raid_disk = rdev->raid_disk; 2556 rdev->raid_disk = -1; 2557 err = 0; 2558 } 2559 } else if (cmd_match(buf, "write_error")) { 2560 set_bit(WriteErrorSeen, &rdev->flags); 2561 err = 0; 2562 } else if (cmd_match(buf, "-write_error")) { 2563 clear_bit(WriteErrorSeen, &rdev->flags); 2564 err = 0; 2565 } else if (cmd_match(buf, "want_replacement")) { 2566 /* Any non-spare device that is not a replacement can 2567 * become want_replacement at any time, but we then need to 2568 * check if recovery is needed. 2569 */ 2570 if (rdev->raid_disk >= 0 && 2571 !test_bit(Replacement, &rdev->flags)) 2572 set_bit(WantReplacement, &rdev->flags); 2573 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2574 md_wakeup_thread(rdev->mddev->thread); 2575 err = 0; 2576 } else if (cmd_match(buf, "-want_replacement")) { 2577 /* Clearing 'want_replacement' is always allowed. 2578 * Once replacements starts it is too late though. 2579 */ 2580 err = 0; 2581 clear_bit(WantReplacement, &rdev->flags); 2582 } else if (cmd_match(buf, "replacement")) { 2583 /* Can only set a device as a replacement when array has not 2584 * yet been started. Once running, replacement is automatic 2585 * from spares, or by assigning 'slot'. 2586 */ 2587 if (rdev->mddev->pers) 2588 err = -EBUSY; 2589 else { 2590 set_bit(Replacement, &rdev->flags); 2591 err = 0; 2592 } 2593 } else if (cmd_match(buf, "-replacement")) { 2594 /* Similarly, can only clear Replacement before start */ 2595 if (rdev->mddev->pers) 2596 err = -EBUSY; 2597 else { 2598 clear_bit(Replacement, &rdev->flags); 2599 err = 0; 2600 } 2601 } else if (cmd_match(buf, "re-add")) { 2602 if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) { 2603 /* clear_bit is performed _after_ all the devices 2604 * have their local Faulty bit cleared. If any writes 2605 * happen in the meantime in the local node, they 2606 * will land in the local bitmap, which will be synced 2607 * by this node eventually 2608 */ 2609 if (!mddev_is_clustered(rdev->mddev) || 2610 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) { 2611 clear_bit(Faulty, &rdev->flags); 2612 err = add_bound_rdev(rdev); 2613 } 2614 } else 2615 err = -EBUSY; 2616 } 2617 if (!err) 2618 sysfs_notify_dirent_safe(rdev->sysfs_state); 2619 return err ? err : len; 2620 } 2621 static struct rdev_sysfs_entry rdev_state = 2622 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store); 2623 2624 static ssize_t 2625 errors_show(struct md_rdev *rdev, char *page) 2626 { 2627 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 2628 } 2629 2630 static ssize_t 2631 errors_store(struct md_rdev *rdev, const char *buf, size_t len) 2632 { 2633 char *e; 2634 unsigned long n = simple_strtoul(buf, &e, 10); 2635 if (*buf && (*e == 0 || *e == '\n')) { 2636 atomic_set(&rdev->corrected_errors, n); 2637 return len; 2638 } 2639 return -EINVAL; 2640 } 2641 static struct rdev_sysfs_entry rdev_errors = 2642 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 2643 2644 static ssize_t 2645 slot_show(struct md_rdev *rdev, char *page) 2646 { 2647 if (rdev->raid_disk < 0) 2648 return sprintf(page, "none\n"); 2649 else 2650 return sprintf(page, "%d\n", rdev->raid_disk); 2651 } 2652 2653 static ssize_t 2654 slot_store(struct md_rdev *rdev, const char *buf, size_t len) 2655 { 2656 char *e; 2657 int err; 2658 int slot = simple_strtoul(buf, &e, 10); 2659 if (strncmp(buf, "none", 4)==0) 2660 slot = -1; 2661 else if (e==buf || (*e && *e!= '\n')) 2662 return -EINVAL; 2663 if (rdev->mddev->pers && slot == -1) { 2664 /* Setting 'slot' on an active array requires also 2665 * updating the 'rd%d' link, and communicating 2666 * with the personality with ->hot_*_disk. 2667 * For now we only support removing 2668 * failed/spare devices. This normally happens automatically, 2669 * but not when the metadata is externally managed. 2670 */ 2671 if (rdev->raid_disk == -1) 2672 return -EEXIST; 2673 /* personality does all needed checks */ 2674 if (rdev->mddev->pers->hot_remove_disk == NULL) 2675 return -EINVAL; 2676 clear_bit(Blocked, &rdev->flags); 2677 remove_and_add_spares(rdev->mddev, rdev); 2678 if (rdev->raid_disk >= 0) 2679 return -EBUSY; 2680 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2681 md_wakeup_thread(rdev->mddev->thread); 2682 } else if (rdev->mddev->pers) { 2683 /* Activating a spare .. or possibly reactivating 2684 * if we ever get bitmaps working here. 2685 */ 2686 2687 if (rdev->raid_disk != -1) 2688 return -EBUSY; 2689 2690 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) 2691 return -EBUSY; 2692 2693 if (rdev->mddev->pers->hot_add_disk == NULL) 2694 return -EINVAL; 2695 2696 if (slot >= rdev->mddev->raid_disks && 2697 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 2698 return -ENOSPC; 2699 2700 rdev->raid_disk = slot; 2701 if (test_bit(In_sync, &rdev->flags)) 2702 rdev->saved_raid_disk = slot; 2703 else 2704 rdev->saved_raid_disk = -1; 2705 clear_bit(In_sync, &rdev->flags); 2706 clear_bit(Bitmap_sync, &rdev->flags); 2707 err = rdev->mddev->pers-> 2708 hot_add_disk(rdev->mddev, rdev); 2709 if (err) { 2710 rdev->raid_disk = -1; 2711 return err; 2712 } else 2713 sysfs_notify_dirent_safe(rdev->sysfs_state); 2714 if (sysfs_link_rdev(rdev->mddev, rdev)) 2715 /* failure here is OK */; 2716 /* don't wakeup anyone, leave that to userspace. */ 2717 } else { 2718 if (slot >= rdev->mddev->raid_disks && 2719 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 2720 return -ENOSPC; 2721 rdev->raid_disk = slot; 2722 /* assume it is working */ 2723 clear_bit(Faulty, &rdev->flags); 2724 clear_bit(WriteMostly, &rdev->flags); 2725 set_bit(In_sync, &rdev->flags); 2726 sysfs_notify_dirent_safe(rdev->sysfs_state); 2727 } 2728 return len; 2729 } 2730 2731 static struct rdev_sysfs_entry rdev_slot = 2732 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 2733 2734 static ssize_t 2735 offset_show(struct md_rdev *rdev, char *page) 2736 { 2737 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 2738 } 2739 2740 static ssize_t 2741 offset_store(struct md_rdev *rdev, const char *buf, size_t len) 2742 { 2743 unsigned long long offset; 2744 if (kstrtoull(buf, 10, &offset) < 0) 2745 return -EINVAL; 2746 if (rdev->mddev->pers && rdev->raid_disk >= 0) 2747 return -EBUSY; 2748 if (rdev->sectors && rdev->mddev->external) 2749 /* Must set offset before size, so overlap checks 2750 * can be sane */ 2751 return -EBUSY; 2752 rdev->data_offset = offset; 2753 rdev->new_data_offset = offset; 2754 return len; 2755 } 2756 2757 static struct rdev_sysfs_entry rdev_offset = 2758 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 2759 2760 static ssize_t new_offset_show(struct md_rdev *rdev, char *page) 2761 { 2762 return sprintf(page, "%llu\n", 2763 (unsigned long long)rdev->new_data_offset); 2764 } 2765 2766 static ssize_t new_offset_store(struct md_rdev *rdev, 2767 const char *buf, size_t len) 2768 { 2769 unsigned long long new_offset; 2770 struct mddev *mddev = rdev->mddev; 2771 2772 if (kstrtoull(buf, 10, &new_offset) < 0) 2773 return -EINVAL; 2774 2775 if (mddev->sync_thread || 2776 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) 2777 return -EBUSY; 2778 if (new_offset == rdev->data_offset) 2779 /* reset is always permitted */ 2780 ; 2781 else if (new_offset > rdev->data_offset) { 2782 /* must not push array size beyond rdev_sectors */ 2783 if (new_offset - rdev->data_offset 2784 + mddev->dev_sectors > rdev->sectors) 2785 return -E2BIG; 2786 } 2787 /* Metadata worries about other space details. */ 2788 2789 /* decreasing the offset is inconsistent with a backwards 2790 * reshape. 2791 */ 2792 if (new_offset < rdev->data_offset && 2793 mddev->reshape_backwards) 2794 return -EINVAL; 2795 /* Increasing offset is inconsistent with forwards 2796 * reshape. reshape_direction should be set to 2797 * 'backwards' first. 2798 */ 2799 if (new_offset > rdev->data_offset && 2800 !mddev->reshape_backwards) 2801 return -EINVAL; 2802 2803 if (mddev->pers && mddev->persistent && 2804 !super_types[mddev->major_version] 2805 .allow_new_offset(rdev, new_offset)) 2806 return -E2BIG; 2807 rdev->new_data_offset = new_offset; 2808 if (new_offset > rdev->data_offset) 2809 mddev->reshape_backwards = 1; 2810 else if (new_offset < rdev->data_offset) 2811 mddev->reshape_backwards = 0; 2812 2813 return len; 2814 } 2815 static struct rdev_sysfs_entry rdev_new_offset = 2816 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store); 2817 2818 static ssize_t 2819 rdev_size_show(struct md_rdev *rdev, char *page) 2820 { 2821 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 2822 } 2823 2824 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 2825 { 2826 /* check if two start/length pairs overlap */ 2827 if (s1+l1 <= s2) 2828 return 0; 2829 if (s2+l2 <= s1) 2830 return 0; 2831 return 1; 2832 } 2833 2834 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 2835 { 2836 unsigned long long blocks; 2837 sector_t new; 2838 2839 if (kstrtoull(buf, 10, &blocks) < 0) 2840 return -EINVAL; 2841 2842 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 2843 return -EINVAL; /* sector conversion overflow */ 2844 2845 new = blocks * 2; 2846 if (new != blocks * 2) 2847 return -EINVAL; /* unsigned long long to sector_t overflow */ 2848 2849 *sectors = new; 2850 return 0; 2851 } 2852 2853 static ssize_t 2854 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) 2855 { 2856 struct mddev *my_mddev = rdev->mddev; 2857 sector_t oldsectors = rdev->sectors; 2858 sector_t sectors; 2859 2860 if (strict_blocks_to_sectors(buf, §ors) < 0) 2861 return -EINVAL; 2862 if (rdev->data_offset != rdev->new_data_offset) 2863 return -EINVAL; /* too confusing */ 2864 if (my_mddev->pers && rdev->raid_disk >= 0) { 2865 if (my_mddev->persistent) { 2866 sectors = super_types[my_mddev->major_version]. 2867 rdev_size_change(rdev, sectors); 2868 if (!sectors) 2869 return -EBUSY; 2870 } else if (!sectors) 2871 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 2872 rdev->data_offset; 2873 if (!my_mddev->pers->resize) 2874 /* Cannot change size for RAID0 or Linear etc */ 2875 return -EINVAL; 2876 } 2877 if (sectors < my_mddev->dev_sectors) 2878 return -EINVAL; /* component must fit device */ 2879 2880 rdev->sectors = sectors; 2881 if (sectors > oldsectors && my_mddev->external) { 2882 /* Need to check that all other rdevs with the same 2883 * ->bdev do not overlap. 'rcu' is sufficient to walk 2884 * the rdev lists safely. 2885 * This check does not provide a hard guarantee, it 2886 * just helps avoid dangerous mistakes. 2887 */ 2888 struct mddev *mddev; 2889 int overlap = 0; 2890 struct list_head *tmp; 2891 2892 rcu_read_lock(); 2893 for_each_mddev(mddev, tmp) { 2894 struct md_rdev *rdev2; 2895 2896 rdev_for_each(rdev2, mddev) 2897 if (rdev->bdev == rdev2->bdev && 2898 rdev != rdev2 && 2899 overlaps(rdev->data_offset, rdev->sectors, 2900 rdev2->data_offset, 2901 rdev2->sectors)) { 2902 overlap = 1; 2903 break; 2904 } 2905 if (overlap) { 2906 mddev_put(mddev); 2907 break; 2908 } 2909 } 2910 rcu_read_unlock(); 2911 if (overlap) { 2912 /* Someone else could have slipped in a size 2913 * change here, but doing so is just silly. 2914 * We put oldsectors back because we *know* it is 2915 * safe, and trust userspace not to race with 2916 * itself 2917 */ 2918 rdev->sectors = oldsectors; 2919 return -EBUSY; 2920 } 2921 } 2922 return len; 2923 } 2924 2925 static struct rdev_sysfs_entry rdev_size = 2926 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 2927 2928 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) 2929 { 2930 unsigned long long recovery_start = rdev->recovery_offset; 2931 2932 if (test_bit(In_sync, &rdev->flags) || 2933 recovery_start == MaxSector) 2934 return sprintf(page, "none\n"); 2935 2936 return sprintf(page, "%llu\n", recovery_start); 2937 } 2938 2939 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) 2940 { 2941 unsigned long long recovery_start; 2942 2943 if (cmd_match(buf, "none")) 2944 recovery_start = MaxSector; 2945 else if (kstrtoull(buf, 10, &recovery_start)) 2946 return -EINVAL; 2947 2948 if (rdev->mddev->pers && 2949 rdev->raid_disk >= 0) 2950 return -EBUSY; 2951 2952 rdev->recovery_offset = recovery_start; 2953 if (recovery_start == MaxSector) 2954 set_bit(In_sync, &rdev->flags); 2955 else 2956 clear_bit(In_sync, &rdev->flags); 2957 return len; 2958 } 2959 2960 static struct rdev_sysfs_entry rdev_recovery_start = 2961 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 2962 2963 static ssize_t 2964 badblocks_show(struct badblocks *bb, char *page, int unack); 2965 static ssize_t 2966 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack); 2967 2968 static ssize_t bb_show(struct md_rdev *rdev, char *page) 2969 { 2970 return badblocks_show(&rdev->badblocks, page, 0); 2971 } 2972 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) 2973 { 2974 int rv = badblocks_store(&rdev->badblocks, page, len, 0); 2975 /* Maybe that ack was all we needed */ 2976 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) 2977 wake_up(&rdev->blocked_wait); 2978 return rv; 2979 } 2980 static struct rdev_sysfs_entry rdev_bad_blocks = 2981 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); 2982 2983 static ssize_t ubb_show(struct md_rdev *rdev, char *page) 2984 { 2985 return badblocks_show(&rdev->badblocks, page, 1); 2986 } 2987 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) 2988 { 2989 return badblocks_store(&rdev->badblocks, page, len, 1); 2990 } 2991 static struct rdev_sysfs_entry rdev_unack_bad_blocks = 2992 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store); 2993 2994 static struct attribute *rdev_default_attrs[] = { 2995 &rdev_state.attr, 2996 &rdev_errors.attr, 2997 &rdev_slot.attr, 2998 &rdev_offset.attr, 2999 &rdev_new_offset.attr, 3000 &rdev_size.attr, 3001 &rdev_recovery_start.attr, 3002 &rdev_bad_blocks.attr, 3003 &rdev_unack_bad_blocks.attr, 3004 NULL, 3005 }; 3006 static ssize_t 3007 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3008 { 3009 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3010 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3011 3012 if (!entry->show) 3013 return -EIO; 3014 if (!rdev->mddev) 3015 return -EBUSY; 3016 return entry->show(rdev, page); 3017 } 3018 3019 static ssize_t 3020 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 3021 const char *page, size_t length) 3022 { 3023 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3024 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3025 ssize_t rv; 3026 struct mddev *mddev = rdev->mddev; 3027 3028 if (!entry->store) 3029 return -EIO; 3030 if (!capable(CAP_SYS_ADMIN)) 3031 return -EACCES; 3032 rv = mddev ? mddev_lock(mddev): -EBUSY; 3033 if (!rv) { 3034 if (rdev->mddev == NULL) 3035 rv = -EBUSY; 3036 else 3037 rv = entry->store(rdev, page, length); 3038 mddev_unlock(mddev); 3039 } 3040 return rv; 3041 } 3042 3043 static void rdev_free(struct kobject *ko) 3044 { 3045 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); 3046 kfree(rdev); 3047 } 3048 static const struct sysfs_ops rdev_sysfs_ops = { 3049 .show = rdev_attr_show, 3050 .store = rdev_attr_store, 3051 }; 3052 static struct kobj_type rdev_ktype = { 3053 .release = rdev_free, 3054 .sysfs_ops = &rdev_sysfs_ops, 3055 .default_attrs = rdev_default_attrs, 3056 }; 3057 3058 int md_rdev_init(struct md_rdev *rdev) 3059 { 3060 rdev->desc_nr = -1; 3061 rdev->saved_raid_disk = -1; 3062 rdev->raid_disk = -1; 3063 rdev->flags = 0; 3064 rdev->data_offset = 0; 3065 rdev->new_data_offset = 0; 3066 rdev->sb_events = 0; 3067 rdev->last_read_error.tv_sec = 0; 3068 rdev->last_read_error.tv_nsec = 0; 3069 rdev->sb_loaded = 0; 3070 rdev->bb_page = NULL; 3071 atomic_set(&rdev->nr_pending, 0); 3072 atomic_set(&rdev->read_errors, 0); 3073 atomic_set(&rdev->corrected_errors, 0); 3074 3075 INIT_LIST_HEAD(&rdev->same_set); 3076 init_waitqueue_head(&rdev->blocked_wait); 3077 3078 /* Add space to store bad block list. 3079 * This reserves the space even on arrays where it cannot 3080 * be used - I wonder if that matters 3081 */ 3082 rdev->badblocks.count = 0; 3083 rdev->badblocks.shift = -1; /* disabled until explicitly enabled */ 3084 rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL); 3085 seqlock_init(&rdev->badblocks.lock); 3086 if (rdev->badblocks.page == NULL) 3087 return -ENOMEM; 3088 3089 return 0; 3090 } 3091 EXPORT_SYMBOL_GPL(md_rdev_init); 3092 /* 3093 * Import a device. If 'super_format' >= 0, then sanity check the superblock 3094 * 3095 * mark the device faulty if: 3096 * 3097 * - the device is nonexistent (zero size) 3098 * - the device has no valid superblock 3099 * 3100 * a faulty rdev _never_ has rdev->sb set. 3101 */ 3102 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) 3103 { 3104 char b[BDEVNAME_SIZE]; 3105 int err; 3106 struct md_rdev *rdev; 3107 sector_t size; 3108 3109 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 3110 if (!rdev) { 3111 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 3112 return ERR_PTR(-ENOMEM); 3113 } 3114 3115 err = md_rdev_init(rdev); 3116 if (err) 3117 goto abort_free; 3118 err = alloc_disk_sb(rdev); 3119 if (err) 3120 goto abort_free; 3121 3122 err = lock_rdev(rdev, newdev, super_format == -2); 3123 if (err) 3124 goto abort_free; 3125 3126 kobject_init(&rdev->kobj, &rdev_ktype); 3127 3128 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; 3129 if (!size) { 3130 printk(KERN_WARNING 3131 "md: %s has zero or unknown size, marking faulty!\n", 3132 bdevname(rdev->bdev,b)); 3133 err = -EINVAL; 3134 goto abort_free; 3135 } 3136 3137 if (super_format >= 0) { 3138 err = super_types[super_format]. 3139 load_super(rdev, NULL, super_minor); 3140 if (err == -EINVAL) { 3141 printk(KERN_WARNING 3142 "md: %s does not have a valid v%d.%d " 3143 "superblock, not importing!\n", 3144 bdevname(rdev->bdev,b), 3145 super_format, super_minor); 3146 goto abort_free; 3147 } 3148 if (err < 0) { 3149 printk(KERN_WARNING 3150 "md: could not read %s's sb, not importing!\n", 3151 bdevname(rdev->bdev,b)); 3152 goto abort_free; 3153 } 3154 } 3155 3156 return rdev; 3157 3158 abort_free: 3159 if (rdev->bdev) 3160 unlock_rdev(rdev); 3161 md_rdev_clear(rdev); 3162 kfree(rdev); 3163 return ERR_PTR(err); 3164 } 3165 3166 /* 3167 * Check a full RAID array for plausibility 3168 */ 3169 3170 static void analyze_sbs(struct mddev *mddev) 3171 { 3172 int i; 3173 struct md_rdev *rdev, *freshest, *tmp; 3174 char b[BDEVNAME_SIZE]; 3175 3176 freshest = NULL; 3177 rdev_for_each_safe(rdev, tmp, mddev) 3178 switch (super_types[mddev->major_version]. 3179 load_super(rdev, freshest, mddev->minor_version)) { 3180 case 1: 3181 freshest = rdev; 3182 break; 3183 case 0: 3184 break; 3185 default: 3186 printk( KERN_ERR \ 3187 "md: fatal superblock inconsistency in %s" 3188 " -- removing from array\n", 3189 bdevname(rdev->bdev,b)); 3190 md_kick_rdev_from_array(rdev); 3191 } 3192 3193 super_types[mddev->major_version]. 3194 validate_super(mddev, freshest); 3195 3196 i = 0; 3197 rdev_for_each_safe(rdev, tmp, mddev) { 3198 if (mddev->max_disks && 3199 (rdev->desc_nr >= mddev->max_disks || 3200 i > mddev->max_disks)) { 3201 printk(KERN_WARNING 3202 "md: %s: %s: only %d devices permitted\n", 3203 mdname(mddev), bdevname(rdev->bdev, b), 3204 mddev->max_disks); 3205 md_kick_rdev_from_array(rdev); 3206 continue; 3207 } 3208 if (rdev != freshest) { 3209 if (super_types[mddev->major_version]. 3210 validate_super(mddev, rdev)) { 3211 printk(KERN_WARNING "md: kicking non-fresh %s" 3212 " from array!\n", 3213 bdevname(rdev->bdev,b)); 3214 md_kick_rdev_from_array(rdev); 3215 continue; 3216 } 3217 /* No device should have a Candidate flag 3218 * when reading devices 3219 */ 3220 if (test_bit(Candidate, &rdev->flags)) { 3221 pr_info("md: kicking Cluster Candidate %s from array!\n", 3222 bdevname(rdev->bdev, b)); 3223 md_kick_rdev_from_array(rdev); 3224 } 3225 } 3226 if (mddev->level == LEVEL_MULTIPATH) { 3227 rdev->desc_nr = i++; 3228 rdev->raid_disk = rdev->desc_nr; 3229 set_bit(In_sync, &rdev->flags); 3230 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { 3231 rdev->raid_disk = -1; 3232 clear_bit(In_sync, &rdev->flags); 3233 } 3234 } 3235 } 3236 3237 /* Read a fixed-point number. 3238 * Numbers in sysfs attributes should be in "standard" units where 3239 * possible, so time should be in seconds. 3240 * However we internally use a a much smaller unit such as 3241 * milliseconds or jiffies. 3242 * This function takes a decimal number with a possible fractional 3243 * component, and produces an integer which is the result of 3244 * multiplying that number by 10^'scale'. 3245 * all without any floating-point arithmetic. 3246 */ 3247 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 3248 { 3249 unsigned long result = 0; 3250 long decimals = -1; 3251 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 3252 if (*cp == '.') 3253 decimals = 0; 3254 else if (decimals < scale) { 3255 unsigned int value; 3256 value = *cp - '0'; 3257 result = result * 10 + value; 3258 if (decimals >= 0) 3259 decimals++; 3260 } 3261 cp++; 3262 } 3263 if (*cp == '\n') 3264 cp++; 3265 if (*cp) 3266 return -EINVAL; 3267 if (decimals < 0) 3268 decimals = 0; 3269 while (decimals < scale) { 3270 result *= 10; 3271 decimals ++; 3272 } 3273 *res = result; 3274 return 0; 3275 } 3276 3277 static void md_safemode_timeout(unsigned long data); 3278 3279 static ssize_t 3280 safe_delay_show(struct mddev *mddev, char *page) 3281 { 3282 int msec = (mddev->safemode_delay*1000)/HZ; 3283 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 3284 } 3285 static ssize_t 3286 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) 3287 { 3288 unsigned long msec; 3289 3290 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) 3291 return -EINVAL; 3292 if (msec == 0) 3293 mddev->safemode_delay = 0; 3294 else { 3295 unsigned long old_delay = mddev->safemode_delay; 3296 unsigned long new_delay = (msec*HZ)/1000; 3297 3298 if (new_delay == 0) 3299 new_delay = 1; 3300 mddev->safemode_delay = new_delay; 3301 if (new_delay < old_delay || old_delay == 0) 3302 mod_timer(&mddev->safemode_timer, jiffies+1); 3303 } 3304 return len; 3305 } 3306 static struct md_sysfs_entry md_safe_delay = 3307 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 3308 3309 static ssize_t 3310 level_show(struct mddev *mddev, char *page) 3311 { 3312 struct md_personality *p; 3313 int ret; 3314 spin_lock(&mddev->lock); 3315 p = mddev->pers; 3316 if (p) 3317 ret = sprintf(page, "%s\n", p->name); 3318 else if (mddev->clevel[0]) 3319 ret = sprintf(page, "%s\n", mddev->clevel); 3320 else if (mddev->level != LEVEL_NONE) 3321 ret = sprintf(page, "%d\n", mddev->level); 3322 else 3323 ret = 0; 3324 spin_unlock(&mddev->lock); 3325 return ret; 3326 } 3327 3328 static ssize_t 3329 level_store(struct mddev *mddev, const char *buf, size_t len) 3330 { 3331 char clevel[16]; 3332 ssize_t rv; 3333 size_t slen = len; 3334 struct md_personality *pers, *oldpers; 3335 long level; 3336 void *priv, *oldpriv; 3337 struct md_rdev *rdev; 3338 3339 if (slen == 0 || slen >= sizeof(clevel)) 3340 return -EINVAL; 3341 3342 rv = mddev_lock(mddev); 3343 if (rv) 3344 return rv; 3345 3346 if (mddev->pers == NULL) { 3347 strncpy(mddev->clevel, buf, slen); 3348 if (mddev->clevel[slen-1] == '\n') 3349 slen--; 3350 mddev->clevel[slen] = 0; 3351 mddev->level = LEVEL_NONE; 3352 rv = len; 3353 goto out_unlock; 3354 } 3355 rv = -EROFS; 3356 if (mddev->ro) 3357 goto out_unlock; 3358 3359 /* request to change the personality. Need to ensure: 3360 * - array is not engaged in resync/recovery/reshape 3361 * - old personality can be suspended 3362 * - new personality will access other array. 3363 */ 3364 3365 rv = -EBUSY; 3366 if (mddev->sync_thread || 3367 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3368 mddev->reshape_position != MaxSector || 3369 mddev->sysfs_active) 3370 goto out_unlock; 3371 3372 rv = -EINVAL; 3373 if (!mddev->pers->quiesce) { 3374 printk(KERN_WARNING "md: %s: %s does not support online personality change\n", 3375 mdname(mddev), mddev->pers->name); 3376 goto out_unlock; 3377 } 3378 3379 /* Now find the new personality */ 3380 strncpy(clevel, buf, slen); 3381 if (clevel[slen-1] == '\n') 3382 slen--; 3383 clevel[slen] = 0; 3384 if (kstrtol(clevel, 10, &level)) 3385 level = LEVEL_NONE; 3386 3387 if (request_module("md-%s", clevel) != 0) 3388 request_module("md-level-%s", clevel); 3389 spin_lock(&pers_lock); 3390 pers = find_pers(level, clevel); 3391 if (!pers || !try_module_get(pers->owner)) { 3392 spin_unlock(&pers_lock); 3393 printk(KERN_WARNING "md: personality %s not loaded\n", clevel); 3394 rv = -EINVAL; 3395 goto out_unlock; 3396 } 3397 spin_unlock(&pers_lock); 3398 3399 if (pers == mddev->pers) { 3400 /* Nothing to do! */ 3401 module_put(pers->owner); 3402 rv = len; 3403 goto out_unlock; 3404 } 3405 if (!pers->takeover) { 3406 module_put(pers->owner); 3407 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n", 3408 mdname(mddev), clevel); 3409 rv = -EINVAL; 3410 goto out_unlock; 3411 } 3412 3413 rdev_for_each(rdev, mddev) 3414 rdev->new_raid_disk = rdev->raid_disk; 3415 3416 /* ->takeover must set new_* and/or delta_disks 3417 * if it succeeds, and may set them when it fails. 3418 */ 3419 priv = pers->takeover(mddev); 3420 if (IS_ERR(priv)) { 3421 mddev->new_level = mddev->level; 3422 mddev->new_layout = mddev->layout; 3423 mddev->new_chunk_sectors = mddev->chunk_sectors; 3424 mddev->raid_disks -= mddev->delta_disks; 3425 mddev->delta_disks = 0; 3426 mddev->reshape_backwards = 0; 3427 module_put(pers->owner); 3428 printk(KERN_WARNING "md: %s: %s would not accept array\n", 3429 mdname(mddev), clevel); 3430 rv = PTR_ERR(priv); 3431 goto out_unlock; 3432 } 3433 3434 /* Looks like we have a winner */ 3435 mddev_suspend(mddev); 3436 mddev_detach(mddev); 3437 3438 spin_lock(&mddev->lock); 3439 oldpers = mddev->pers; 3440 oldpriv = mddev->private; 3441 mddev->pers = pers; 3442 mddev->private = priv; 3443 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 3444 mddev->level = mddev->new_level; 3445 mddev->layout = mddev->new_layout; 3446 mddev->chunk_sectors = mddev->new_chunk_sectors; 3447 mddev->delta_disks = 0; 3448 mddev->reshape_backwards = 0; 3449 mddev->degraded = 0; 3450 spin_unlock(&mddev->lock); 3451 3452 if (oldpers->sync_request == NULL && 3453 mddev->external) { 3454 /* We are converting from a no-redundancy array 3455 * to a redundancy array and metadata is managed 3456 * externally so we need to be sure that writes 3457 * won't block due to a need to transition 3458 * clean->dirty 3459 * until external management is started. 3460 */ 3461 mddev->in_sync = 0; 3462 mddev->safemode_delay = 0; 3463 mddev->safemode = 0; 3464 } 3465 3466 oldpers->free(mddev, oldpriv); 3467 3468 if (oldpers->sync_request == NULL && 3469 pers->sync_request != NULL) { 3470 /* need to add the md_redundancy_group */ 3471 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 3472 printk(KERN_WARNING 3473 "md: cannot register extra attributes for %s\n", 3474 mdname(mddev)); 3475 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); 3476 } 3477 if (oldpers->sync_request != NULL && 3478 pers->sync_request == NULL) { 3479 /* need to remove the md_redundancy_group */ 3480 if (mddev->to_remove == NULL) 3481 mddev->to_remove = &md_redundancy_group; 3482 } 3483 3484 rdev_for_each(rdev, mddev) { 3485 if (rdev->raid_disk < 0) 3486 continue; 3487 if (rdev->new_raid_disk >= mddev->raid_disks) 3488 rdev->new_raid_disk = -1; 3489 if (rdev->new_raid_disk == rdev->raid_disk) 3490 continue; 3491 sysfs_unlink_rdev(mddev, rdev); 3492 } 3493 rdev_for_each(rdev, mddev) { 3494 if (rdev->raid_disk < 0) 3495 continue; 3496 if (rdev->new_raid_disk == rdev->raid_disk) 3497 continue; 3498 rdev->raid_disk = rdev->new_raid_disk; 3499 if (rdev->raid_disk < 0) 3500 clear_bit(In_sync, &rdev->flags); 3501 else { 3502 if (sysfs_link_rdev(mddev, rdev)) 3503 printk(KERN_WARNING "md: cannot register rd%d" 3504 " for %s after level change\n", 3505 rdev->raid_disk, mdname(mddev)); 3506 } 3507 } 3508 3509 if (pers->sync_request == NULL) { 3510 /* this is now an array without redundancy, so 3511 * it must always be in_sync 3512 */ 3513 mddev->in_sync = 1; 3514 del_timer_sync(&mddev->safemode_timer); 3515 } 3516 blk_set_stacking_limits(&mddev->queue->limits); 3517 pers->run(mddev); 3518 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3519 mddev_resume(mddev); 3520 if (!mddev->thread) 3521 md_update_sb(mddev, 1); 3522 sysfs_notify(&mddev->kobj, NULL, "level"); 3523 md_new_event(mddev); 3524 rv = len; 3525 out_unlock: 3526 mddev_unlock(mddev); 3527 return rv; 3528 } 3529 3530 static struct md_sysfs_entry md_level = 3531 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 3532 3533 static ssize_t 3534 layout_show(struct mddev *mddev, char *page) 3535 { 3536 /* just a number, not meaningful for all levels */ 3537 if (mddev->reshape_position != MaxSector && 3538 mddev->layout != mddev->new_layout) 3539 return sprintf(page, "%d (%d)\n", 3540 mddev->new_layout, mddev->layout); 3541 return sprintf(page, "%d\n", mddev->layout); 3542 } 3543 3544 static ssize_t 3545 layout_store(struct mddev *mddev, const char *buf, size_t len) 3546 { 3547 char *e; 3548 unsigned long n = simple_strtoul(buf, &e, 10); 3549 int err; 3550 3551 if (!*buf || (*e && *e != '\n')) 3552 return -EINVAL; 3553 err = mddev_lock(mddev); 3554 if (err) 3555 return err; 3556 3557 if (mddev->pers) { 3558 if (mddev->pers->check_reshape == NULL) 3559 err = -EBUSY; 3560 else if (mddev->ro) 3561 err = -EROFS; 3562 else { 3563 mddev->new_layout = n; 3564 err = mddev->pers->check_reshape(mddev); 3565 if (err) 3566 mddev->new_layout = mddev->layout; 3567 } 3568 } else { 3569 mddev->new_layout = n; 3570 if (mddev->reshape_position == MaxSector) 3571 mddev->layout = n; 3572 } 3573 mddev_unlock(mddev); 3574 return err ?: len; 3575 } 3576 static struct md_sysfs_entry md_layout = 3577 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 3578 3579 static ssize_t 3580 raid_disks_show(struct mddev *mddev, char *page) 3581 { 3582 if (mddev->raid_disks == 0) 3583 return 0; 3584 if (mddev->reshape_position != MaxSector && 3585 mddev->delta_disks != 0) 3586 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 3587 mddev->raid_disks - mddev->delta_disks); 3588 return sprintf(page, "%d\n", mddev->raid_disks); 3589 } 3590 3591 static int update_raid_disks(struct mddev *mddev, int raid_disks); 3592 3593 static ssize_t 3594 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) 3595 { 3596 char *e; 3597 int err; 3598 unsigned long n = simple_strtoul(buf, &e, 10); 3599 3600 if (!*buf || (*e && *e != '\n')) 3601 return -EINVAL; 3602 3603 err = mddev_lock(mddev); 3604 if (err) 3605 return err; 3606 if (mddev->pers) 3607 err = update_raid_disks(mddev, n); 3608 else if (mddev->reshape_position != MaxSector) { 3609 struct md_rdev *rdev; 3610 int olddisks = mddev->raid_disks - mddev->delta_disks; 3611 3612 err = -EINVAL; 3613 rdev_for_each(rdev, mddev) { 3614 if (olddisks < n && 3615 rdev->data_offset < rdev->new_data_offset) 3616 goto out_unlock; 3617 if (olddisks > n && 3618 rdev->data_offset > rdev->new_data_offset) 3619 goto out_unlock; 3620 } 3621 err = 0; 3622 mddev->delta_disks = n - olddisks; 3623 mddev->raid_disks = n; 3624 mddev->reshape_backwards = (mddev->delta_disks < 0); 3625 } else 3626 mddev->raid_disks = n; 3627 out_unlock: 3628 mddev_unlock(mddev); 3629 return err ? err : len; 3630 } 3631 static struct md_sysfs_entry md_raid_disks = 3632 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 3633 3634 static ssize_t 3635 chunk_size_show(struct mddev *mddev, char *page) 3636 { 3637 if (mddev->reshape_position != MaxSector && 3638 mddev->chunk_sectors != mddev->new_chunk_sectors) 3639 return sprintf(page, "%d (%d)\n", 3640 mddev->new_chunk_sectors << 9, 3641 mddev->chunk_sectors << 9); 3642 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 3643 } 3644 3645 static ssize_t 3646 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) 3647 { 3648 int err; 3649 char *e; 3650 unsigned long n = simple_strtoul(buf, &e, 10); 3651 3652 if (!*buf || (*e && *e != '\n')) 3653 return -EINVAL; 3654 3655 err = mddev_lock(mddev); 3656 if (err) 3657 return err; 3658 if (mddev->pers) { 3659 if (mddev->pers->check_reshape == NULL) 3660 err = -EBUSY; 3661 else if (mddev->ro) 3662 err = -EROFS; 3663 else { 3664 mddev->new_chunk_sectors = n >> 9; 3665 err = mddev->pers->check_reshape(mddev); 3666 if (err) 3667 mddev->new_chunk_sectors = mddev->chunk_sectors; 3668 } 3669 } else { 3670 mddev->new_chunk_sectors = n >> 9; 3671 if (mddev->reshape_position == MaxSector) 3672 mddev->chunk_sectors = n >> 9; 3673 } 3674 mddev_unlock(mddev); 3675 return err ?: len; 3676 } 3677 static struct md_sysfs_entry md_chunk_size = 3678 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 3679 3680 static ssize_t 3681 resync_start_show(struct mddev *mddev, char *page) 3682 { 3683 if (mddev->recovery_cp == MaxSector) 3684 return sprintf(page, "none\n"); 3685 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 3686 } 3687 3688 static ssize_t 3689 resync_start_store(struct mddev *mddev, const char *buf, size_t len) 3690 { 3691 int err; 3692 char *e; 3693 unsigned long long n = simple_strtoull(buf, &e, 10); 3694 3695 err = mddev_lock(mddev); 3696 if (err) 3697 return err; 3698 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 3699 err = -EBUSY; 3700 else if (cmd_match(buf, "none")) 3701 n = MaxSector; 3702 else if (!*buf || (*e && *e != '\n')) 3703 err = -EINVAL; 3704 3705 if (!err) { 3706 mddev->recovery_cp = n; 3707 if (mddev->pers) 3708 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3709 } 3710 mddev_unlock(mddev); 3711 return err ?: len; 3712 } 3713 static struct md_sysfs_entry md_resync_start = 3714 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR, 3715 resync_start_show, resync_start_store); 3716 3717 /* 3718 * The array state can be: 3719 * 3720 * clear 3721 * No devices, no size, no level 3722 * Equivalent to STOP_ARRAY ioctl 3723 * inactive 3724 * May have some settings, but array is not active 3725 * all IO results in error 3726 * When written, doesn't tear down array, but just stops it 3727 * suspended (not supported yet) 3728 * All IO requests will block. The array can be reconfigured. 3729 * Writing this, if accepted, will block until array is quiescent 3730 * readonly 3731 * no resync can happen. no superblocks get written. 3732 * write requests fail 3733 * read-auto 3734 * like readonly, but behaves like 'clean' on a write request. 3735 * 3736 * clean - no pending writes, but otherwise active. 3737 * When written to inactive array, starts without resync 3738 * If a write request arrives then 3739 * if metadata is known, mark 'dirty' and switch to 'active'. 3740 * if not known, block and switch to write-pending 3741 * If written to an active array that has pending writes, then fails. 3742 * active 3743 * fully active: IO and resync can be happening. 3744 * When written to inactive array, starts with resync 3745 * 3746 * write-pending 3747 * clean, but writes are blocked waiting for 'active' to be written. 3748 * 3749 * active-idle 3750 * like active, but no writes have been seen for a while (100msec). 3751 * 3752 */ 3753 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 3754 write_pending, active_idle, bad_word}; 3755 static char *array_states[] = { 3756 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 3757 "write-pending", "active-idle", NULL }; 3758 3759 static int match_word(const char *word, char **list) 3760 { 3761 int n; 3762 for (n=0; list[n]; n++) 3763 if (cmd_match(word, list[n])) 3764 break; 3765 return n; 3766 } 3767 3768 static ssize_t 3769 array_state_show(struct mddev *mddev, char *page) 3770 { 3771 enum array_state st = inactive; 3772 3773 if (mddev->pers) 3774 switch(mddev->ro) { 3775 case 1: 3776 st = readonly; 3777 break; 3778 case 2: 3779 st = read_auto; 3780 break; 3781 case 0: 3782 if (mddev->in_sync) 3783 st = clean; 3784 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 3785 st = write_pending; 3786 else if (mddev->safemode) 3787 st = active_idle; 3788 else 3789 st = active; 3790 } 3791 else { 3792 if (list_empty(&mddev->disks) && 3793 mddev->raid_disks == 0 && 3794 mddev->dev_sectors == 0) 3795 st = clear; 3796 else 3797 st = inactive; 3798 } 3799 return sprintf(page, "%s\n", array_states[st]); 3800 } 3801 3802 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev); 3803 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev); 3804 static int do_md_run(struct mddev *mddev); 3805 static int restart_array(struct mddev *mddev); 3806 3807 static ssize_t 3808 array_state_store(struct mddev *mddev, const char *buf, size_t len) 3809 { 3810 int err; 3811 enum array_state st = match_word(buf, array_states); 3812 3813 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) { 3814 /* don't take reconfig_mutex when toggling between 3815 * clean and active 3816 */ 3817 spin_lock(&mddev->lock); 3818 if (st == active) { 3819 restart_array(mddev); 3820 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 3821 wake_up(&mddev->sb_wait); 3822 err = 0; 3823 } else /* st == clean */ { 3824 restart_array(mddev); 3825 if (atomic_read(&mddev->writes_pending) == 0) { 3826 if (mddev->in_sync == 0) { 3827 mddev->in_sync = 1; 3828 if (mddev->safemode == 1) 3829 mddev->safemode = 0; 3830 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3831 } 3832 err = 0; 3833 } else 3834 err = -EBUSY; 3835 } 3836 spin_unlock(&mddev->lock); 3837 return err; 3838 } 3839 err = mddev_lock(mddev); 3840 if (err) 3841 return err; 3842 err = -EINVAL; 3843 switch(st) { 3844 case bad_word: 3845 break; 3846 case clear: 3847 /* stopping an active array */ 3848 err = do_md_stop(mddev, 0, NULL); 3849 break; 3850 case inactive: 3851 /* stopping an active array */ 3852 if (mddev->pers) 3853 err = do_md_stop(mddev, 2, NULL); 3854 else 3855 err = 0; /* already inactive */ 3856 break; 3857 case suspended: 3858 break; /* not supported yet */ 3859 case readonly: 3860 if (mddev->pers) 3861 err = md_set_readonly(mddev, NULL); 3862 else { 3863 mddev->ro = 1; 3864 set_disk_ro(mddev->gendisk, 1); 3865 err = do_md_run(mddev); 3866 } 3867 break; 3868 case read_auto: 3869 if (mddev->pers) { 3870 if (mddev->ro == 0) 3871 err = md_set_readonly(mddev, NULL); 3872 else if (mddev->ro == 1) 3873 err = restart_array(mddev); 3874 if (err == 0) { 3875 mddev->ro = 2; 3876 set_disk_ro(mddev->gendisk, 0); 3877 } 3878 } else { 3879 mddev->ro = 2; 3880 err = do_md_run(mddev); 3881 } 3882 break; 3883 case clean: 3884 if (mddev->pers) { 3885 restart_array(mddev); 3886 spin_lock(&mddev->lock); 3887 if (atomic_read(&mddev->writes_pending) == 0) { 3888 if (mddev->in_sync == 0) { 3889 mddev->in_sync = 1; 3890 if (mddev->safemode == 1) 3891 mddev->safemode = 0; 3892 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3893 } 3894 err = 0; 3895 } else 3896 err = -EBUSY; 3897 spin_unlock(&mddev->lock); 3898 } else 3899 err = -EINVAL; 3900 break; 3901 case active: 3902 if (mddev->pers) { 3903 restart_array(mddev); 3904 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 3905 wake_up(&mddev->sb_wait); 3906 err = 0; 3907 } else { 3908 mddev->ro = 0; 3909 set_disk_ro(mddev->gendisk, 0); 3910 err = do_md_run(mddev); 3911 } 3912 break; 3913 case write_pending: 3914 case active_idle: 3915 /* these cannot be set */ 3916 break; 3917 } 3918 3919 if (!err) { 3920 if (mddev->hold_active == UNTIL_IOCTL) 3921 mddev->hold_active = 0; 3922 sysfs_notify_dirent_safe(mddev->sysfs_state); 3923 } 3924 mddev_unlock(mddev); 3925 return err ?: len; 3926 } 3927 static struct md_sysfs_entry md_array_state = 3928 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3929 3930 static ssize_t 3931 max_corrected_read_errors_show(struct mddev *mddev, char *page) { 3932 return sprintf(page, "%d\n", 3933 atomic_read(&mddev->max_corr_read_errors)); 3934 } 3935 3936 static ssize_t 3937 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) 3938 { 3939 char *e; 3940 unsigned long n = simple_strtoul(buf, &e, 10); 3941 3942 if (*buf && (*e == 0 || *e == '\n')) { 3943 atomic_set(&mddev->max_corr_read_errors, n); 3944 return len; 3945 } 3946 return -EINVAL; 3947 } 3948 3949 static struct md_sysfs_entry max_corr_read_errors = 3950 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 3951 max_corrected_read_errors_store); 3952 3953 static ssize_t 3954 null_show(struct mddev *mddev, char *page) 3955 { 3956 return -EINVAL; 3957 } 3958 3959 static ssize_t 3960 new_dev_store(struct mddev *mddev, const char *buf, size_t len) 3961 { 3962 /* buf must be %d:%d\n? giving major and minor numbers */ 3963 /* The new device is added to the array. 3964 * If the array has a persistent superblock, we read the 3965 * superblock to initialise info and check validity. 3966 * Otherwise, only checking done is that in bind_rdev_to_array, 3967 * which mainly checks size. 3968 */ 3969 char *e; 3970 int major = simple_strtoul(buf, &e, 10); 3971 int minor; 3972 dev_t dev; 3973 struct md_rdev *rdev; 3974 int err; 3975 3976 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 3977 return -EINVAL; 3978 minor = simple_strtoul(e+1, &e, 10); 3979 if (*e && *e != '\n') 3980 return -EINVAL; 3981 dev = MKDEV(major, minor); 3982 if (major != MAJOR(dev) || 3983 minor != MINOR(dev)) 3984 return -EOVERFLOW; 3985 3986 flush_workqueue(md_misc_wq); 3987 3988 err = mddev_lock(mddev); 3989 if (err) 3990 return err; 3991 if (mddev->persistent) { 3992 rdev = md_import_device(dev, mddev->major_version, 3993 mddev->minor_version); 3994 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 3995 struct md_rdev *rdev0 3996 = list_entry(mddev->disks.next, 3997 struct md_rdev, same_set); 3998 err = super_types[mddev->major_version] 3999 .load_super(rdev, rdev0, mddev->minor_version); 4000 if (err < 0) 4001 goto out; 4002 } 4003 } else if (mddev->external) 4004 rdev = md_import_device(dev, -2, -1); 4005 else 4006 rdev = md_import_device(dev, -1, -1); 4007 4008 if (IS_ERR(rdev)) 4009 return PTR_ERR(rdev); 4010 err = bind_rdev_to_array(rdev, mddev); 4011 out: 4012 if (err) 4013 export_rdev(rdev); 4014 mddev_unlock(mddev); 4015 return err ? err : len; 4016 } 4017 4018 static struct md_sysfs_entry md_new_device = 4019 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 4020 4021 static ssize_t 4022 bitmap_store(struct mddev *mddev, const char *buf, size_t len) 4023 { 4024 char *end; 4025 unsigned long chunk, end_chunk; 4026 int err; 4027 4028 err = mddev_lock(mddev); 4029 if (err) 4030 return err; 4031 if (!mddev->bitmap) 4032 goto out; 4033 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 4034 while (*buf) { 4035 chunk = end_chunk = simple_strtoul(buf, &end, 0); 4036 if (buf == end) break; 4037 if (*end == '-') { /* range */ 4038 buf = end + 1; 4039 end_chunk = simple_strtoul(buf, &end, 0); 4040 if (buf == end) break; 4041 } 4042 if (*end && !isspace(*end)) break; 4043 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 4044 buf = skip_spaces(end); 4045 } 4046 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 4047 out: 4048 mddev_unlock(mddev); 4049 return len; 4050 } 4051 4052 static struct md_sysfs_entry md_bitmap = 4053 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 4054 4055 static ssize_t 4056 size_show(struct mddev *mddev, char *page) 4057 { 4058 return sprintf(page, "%llu\n", 4059 (unsigned long long)mddev->dev_sectors / 2); 4060 } 4061 4062 static int update_size(struct mddev *mddev, sector_t num_sectors); 4063 4064 static ssize_t 4065 size_store(struct mddev *mddev, const char *buf, size_t len) 4066 { 4067 /* If array is inactive, we can reduce the component size, but 4068 * not increase it (except from 0). 4069 * If array is active, we can try an on-line resize 4070 */ 4071 sector_t sectors; 4072 int err = strict_blocks_to_sectors(buf, §ors); 4073 4074 if (err < 0) 4075 return err; 4076 err = mddev_lock(mddev); 4077 if (err) 4078 return err; 4079 if (mddev->pers) { 4080 if (mddev_is_clustered(mddev)) 4081 md_cluster_ops->metadata_update_start(mddev); 4082 err = update_size(mddev, sectors); 4083 md_update_sb(mddev, 1); 4084 if (mddev_is_clustered(mddev)) 4085 md_cluster_ops->metadata_update_finish(mddev); 4086 } else { 4087 if (mddev->dev_sectors == 0 || 4088 mddev->dev_sectors > sectors) 4089 mddev->dev_sectors = sectors; 4090 else 4091 err = -ENOSPC; 4092 } 4093 mddev_unlock(mddev); 4094 return err ? err : len; 4095 } 4096 4097 static struct md_sysfs_entry md_size = 4098 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 4099 4100 /* Metadata version. 4101 * This is one of 4102 * 'none' for arrays with no metadata (good luck...) 4103 * 'external' for arrays with externally managed metadata, 4104 * or N.M for internally known formats 4105 */ 4106 static ssize_t 4107 metadata_show(struct mddev *mddev, char *page) 4108 { 4109 if (mddev->persistent) 4110 return sprintf(page, "%d.%d\n", 4111 mddev->major_version, mddev->minor_version); 4112 else if (mddev->external) 4113 return sprintf(page, "external:%s\n", mddev->metadata_type); 4114 else 4115 return sprintf(page, "none\n"); 4116 } 4117 4118 static ssize_t 4119 metadata_store(struct mddev *mddev, const char *buf, size_t len) 4120 { 4121 int major, minor; 4122 char *e; 4123 int err; 4124 /* Changing the details of 'external' metadata is 4125 * always permitted. Otherwise there must be 4126 * no devices attached to the array. 4127 */ 4128 4129 err = mddev_lock(mddev); 4130 if (err) 4131 return err; 4132 err = -EBUSY; 4133 if (mddev->external && strncmp(buf, "external:", 9) == 0) 4134 ; 4135 else if (!list_empty(&mddev->disks)) 4136 goto out_unlock; 4137 4138 err = 0; 4139 if (cmd_match(buf, "none")) { 4140 mddev->persistent = 0; 4141 mddev->external = 0; 4142 mddev->major_version = 0; 4143 mddev->minor_version = 90; 4144 goto out_unlock; 4145 } 4146 if (strncmp(buf, "external:", 9) == 0) { 4147 size_t namelen = len-9; 4148 if (namelen >= sizeof(mddev->metadata_type)) 4149 namelen = sizeof(mddev->metadata_type)-1; 4150 strncpy(mddev->metadata_type, buf+9, namelen); 4151 mddev->metadata_type[namelen] = 0; 4152 if (namelen && mddev->metadata_type[namelen-1] == '\n') 4153 mddev->metadata_type[--namelen] = 0; 4154 mddev->persistent = 0; 4155 mddev->external = 1; 4156 mddev->major_version = 0; 4157 mddev->minor_version = 90; 4158 goto out_unlock; 4159 } 4160 major = simple_strtoul(buf, &e, 10); 4161 err = -EINVAL; 4162 if (e==buf || *e != '.') 4163 goto out_unlock; 4164 buf = e+1; 4165 minor = simple_strtoul(buf, &e, 10); 4166 if (e==buf || (*e && *e != '\n') ) 4167 goto out_unlock; 4168 err = -ENOENT; 4169 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 4170 goto out_unlock; 4171 mddev->major_version = major; 4172 mddev->minor_version = minor; 4173 mddev->persistent = 1; 4174 mddev->external = 0; 4175 err = 0; 4176 out_unlock: 4177 mddev_unlock(mddev); 4178 return err ?: len; 4179 } 4180 4181 static struct md_sysfs_entry md_metadata = 4182 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4183 4184 static ssize_t 4185 action_show(struct mddev *mddev, char *page) 4186 { 4187 char *type = "idle"; 4188 unsigned long recovery = mddev->recovery; 4189 if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 4190 type = "frozen"; 4191 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) || 4192 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) { 4193 if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 4194 type = "reshape"; 4195 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 4196 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) 4197 type = "resync"; 4198 else if (test_bit(MD_RECOVERY_CHECK, &recovery)) 4199 type = "check"; 4200 else 4201 type = "repair"; 4202 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 4203 type = "recover"; 4204 } 4205 return sprintf(page, "%s\n", type); 4206 } 4207 4208 static ssize_t 4209 action_store(struct mddev *mddev, const char *page, size_t len) 4210 { 4211 if (!mddev->pers || !mddev->pers->sync_request) 4212 return -EINVAL; 4213 4214 if (cmd_match(page, "frozen")) 4215 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4216 else 4217 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4218 4219 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4220 flush_workqueue(md_misc_wq); 4221 if (mddev->sync_thread) { 4222 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4223 if (mddev_lock(mddev) == 0) { 4224 md_reap_sync_thread(mddev); 4225 mddev_unlock(mddev); 4226 } 4227 } 4228 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 4229 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 4230 return -EBUSY; 4231 else if (cmd_match(page, "resync")) 4232 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4233 else if (cmd_match(page, "recover")) { 4234 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4235 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4236 } else if (cmd_match(page, "reshape")) { 4237 int err; 4238 if (mddev->pers->start_reshape == NULL) 4239 return -EINVAL; 4240 err = mddev_lock(mddev); 4241 if (!err) { 4242 err = mddev->pers->start_reshape(mddev); 4243 mddev_unlock(mddev); 4244 } 4245 if (err) 4246 return err; 4247 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4248 } else { 4249 if (cmd_match(page, "check")) 4250 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4251 else if (!cmd_match(page, "repair")) 4252 return -EINVAL; 4253 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4254 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4255 } 4256 if (mddev->ro == 2) { 4257 /* A write to sync_action is enough to justify 4258 * canceling read-auto mode 4259 */ 4260 mddev->ro = 0; 4261 md_wakeup_thread(mddev->sync_thread); 4262 } 4263 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4264 md_wakeup_thread(mddev->thread); 4265 sysfs_notify_dirent_safe(mddev->sysfs_action); 4266 return len; 4267 } 4268 4269 static struct md_sysfs_entry md_scan_mode = 4270 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 4271 4272 static ssize_t 4273 last_sync_action_show(struct mddev *mddev, char *page) 4274 { 4275 return sprintf(page, "%s\n", mddev->last_sync_action); 4276 } 4277 4278 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action); 4279 4280 static ssize_t 4281 mismatch_cnt_show(struct mddev *mddev, char *page) 4282 { 4283 return sprintf(page, "%llu\n", 4284 (unsigned long long) 4285 atomic64_read(&mddev->resync_mismatches)); 4286 } 4287 4288 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 4289 4290 static ssize_t 4291 sync_min_show(struct mddev *mddev, char *page) 4292 { 4293 return sprintf(page, "%d (%s)\n", speed_min(mddev), 4294 mddev->sync_speed_min ? "local": "system"); 4295 } 4296 4297 static ssize_t 4298 sync_min_store(struct mddev *mddev, const char *buf, size_t len) 4299 { 4300 int min; 4301 char *e; 4302 if (strncmp(buf, "system", 6)==0) { 4303 mddev->sync_speed_min = 0; 4304 return len; 4305 } 4306 min = simple_strtoul(buf, &e, 10); 4307 if (buf == e || (*e && *e != '\n') || min <= 0) 4308 return -EINVAL; 4309 mddev->sync_speed_min = min; 4310 return len; 4311 } 4312 4313 static struct md_sysfs_entry md_sync_min = 4314 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 4315 4316 static ssize_t 4317 sync_max_show(struct mddev *mddev, char *page) 4318 { 4319 return sprintf(page, "%d (%s)\n", speed_max(mddev), 4320 mddev->sync_speed_max ? "local": "system"); 4321 } 4322 4323 static ssize_t 4324 sync_max_store(struct mddev *mddev, const char *buf, size_t len) 4325 { 4326 int max; 4327 char *e; 4328 if (strncmp(buf, "system", 6)==0) { 4329 mddev->sync_speed_max = 0; 4330 return len; 4331 } 4332 max = simple_strtoul(buf, &e, 10); 4333 if (buf == e || (*e && *e != '\n') || max <= 0) 4334 return -EINVAL; 4335 mddev->sync_speed_max = max; 4336 return len; 4337 } 4338 4339 static struct md_sysfs_entry md_sync_max = 4340 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 4341 4342 static ssize_t 4343 degraded_show(struct mddev *mddev, char *page) 4344 { 4345 return sprintf(page, "%d\n", mddev->degraded); 4346 } 4347 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 4348 4349 static ssize_t 4350 sync_force_parallel_show(struct mddev *mddev, char *page) 4351 { 4352 return sprintf(page, "%d\n", mddev->parallel_resync); 4353 } 4354 4355 static ssize_t 4356 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) 4357 { 4358 long n; 4359 4360 if (kstrtol(buf, 10, &n)) 4361 return -EINVAL; 4362 4363 if (n != 0 && n != 1) 4364 return -EINVAL; 4365 4366 mddev->parallel_resync = n; 4367 4368 if (mddev->sync_thread) 4369 wake_up(&resync_wait); 4370 4371 return len; 4372 } 4373 4374 /* force parallel resync, even with shared block devices */ 4375 static struct md_sysfs_entry md_sync_force_parallel = 4376 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 4377 sync_force_parallel_show, sync_force_parallel_store); 4378 4379 static ssize_t 4380 sync_speed_show(struct mddev *mddev, char *page) 4381 { 4382 unsigned long resync, dt, db; 4383 if (mddev->curr_resync == 0) 4384 return sprintf(page, "none\n"); 4385 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 4386 dt = (jiffies - mddev->resync_mark) / HZ; 4387 if (!dt) dt++; 4388 db = resync - mddev->resync_mark_cnt; 4389 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 4390 } 4391 4392 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 4393 4394 static ssize_t 4395 sync_completed_show(struct mddev *mddev, char *page) 4396 { 4397 unsigned long long max_sectors, resync; 4398 4399 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4400 return sprintf(page, "none\n"); 4401 4402 if (mddev->curr_resync == 1 || 4403 mddev->curr_resync == 2) 4404 return sprintf(page, "delayed\n"); 4405 4406 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 4407 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4408 max_sectors = mddev->resync_max_sectors; 4409 else 4410 max_sectors = mddev->dev_sectors; 4411 4412 resync = mddev->curr_resync_completed; 4413 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 4414 } 4415 4416 static struct md_sysfs_entry md_sync_completed = 4417 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL); 4418 4419 static ssize_t 4420 min_sync_show(struct mddev *mddev, char *page) 4421 { 4422 return sprintf(page, "%llu\n", 4423 (unsigned long long)mddev->resync_min); 4424 } 4425 static ssize_t 4426 min_sync_store(struct mddev *mddev, const char *buf, size_t len) 4427 { 4428 unsigned long long min; 4429 int err; 4430 4431 if (kstrtoull(buf, 10, &min)) 4432 return -EINVAL; 4433 4434 spin_lock(&mddev->lock); 4435 err = -EINVAL; 4436 if (min > mddev->resync_max) 4437 goto out_unlock; 4438 4439 err = -EBUSY; 4440 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4441 goto out_unlock; 4442 4443 /* Round down to multiple of 4K for safety */ 4444 mddev->resync_min = round_down(min, 8); 4445 err = 0; 4446 4447 out_unlock: 4448 spin_unlock(&mddev->lock); 4449 return err ?: len; 4450 } 4451 4452 static struct md_sysfs_entry md_min_sync = 4453 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 4454 4455 static ssize_t 4456 max_sync_show(struct mddev *mddev, char *page) 4457 { 4458 if (mddev->resync_max == MaxSector) 4459 return sprintf(page, "max\n"); 4460 else 4461 return sprintf(page, "%llu\n", 4462 (unsigned long long)mddev->resync_max); 4463 } 4464 static ssize_t 4465 max_sync_store(struct mddev *mddev, const char *buf, size_t len) 4466 { 4467 int err; 4468 spin_lock(&mddev->lock); 4469 if (strncmp(buf, "max", 3) == 0) 4470 mddev->resync_max = MaxSector; 4471 else { 4472 unsigned long long max; 4473 int chunk; 4474 4475 err = -EINVAL; 4476 if (kstrtoull(buf, 10, &max)) 4477 goto out_unlock; 4478 if (max < mddev->resync_min) 4479 goto out_unlock; 4480 4481 err = -EBUSY; 4482 if (max < mddev->resync_max && 4483 mddev->ro == 0 && 4484 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4485 goto out_unlock; 4486 4487 /* Must be a multiple of chunk_size */ 4488 chunk = mddev->chunk_sectors; 4489 if (chunk) { 4490 sector_t temp = max; 4491 4492 err = -EINVAL; 4493 if (sector_div(temp, chunk)) 4494 goto out_unlock; 4495 } 4496 mddev->resync_max = max; 4497 } 4498 wake_up(&mddev->recovery_wait); 4499 err = 0; 4500 out_unlock: 4501 spin_unlock(&mddev->lock); 4502 return err ?: len; 4503 } 4504 4505 static struct md_sysfs_entry md_max_sync = 4506 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 4507 4508 static ssize_t 4509 suspend_lo_show(struct mddev *mddev, char *page) 4510 { 4511 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 4512 } 4513 4514 static ssize_t 4515 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) 4516 { 4517 char *e; 4518 unsigned long long new = simple_strtoull(buf, &e, 10); 4519 unsigned long long old; 4520 int err; 4521 4522 if (buf == e || (*e && *e != '\n')) 4523 return -EINVAL; 4524 4525 err = mddev_lock(mddev); 4526 if (err) 4527 return err; 4528 err = -EINVAL; 4529 if (mddev->pers == NULL || 4530 mddev->pers->quiesce == NULL) 4531 goto unlock; 4532 old = mddev->suspend_lo; 4533 mddev->suspend_lo = new; 4534 if (new >= old) 4535 /* Shrinking suspended region */ 4536 mddev->pers->quiesce(mddev, 2); 4537 else { 4538 /* Expanding suspended region - need to wait */ 4539 mddev->pers->quiesce(mddev, 1); 4540 mddev->pers->quiesce(mddev, 0); 4541 } 4542 err = 0; 4543 unlock: 4544 mddev_unlock(mddev); 4545 return err ?: len; 4546 } 4547 static struct md_sysfs_entry md_suspend_lo = 4548 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 4549 4550 static ssize_t 4551 suspend_hi_show(struct mddev *mddev, char *page) 4552 { 4553 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 4554 } 4555 4556 static ssize_t 4557 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) 4558 { 4559 char *e; 4560 unsigned long long new = simple_strtoull(buf, &e, 10); 4561 unsigned long long old; 4562 int err; 4563 4564 if (buf == e || (*e && *e != '\n')) 4565 return -EINVAL; 4566 4567 err = mddev_lock(mddev); 4568 if (err) 4569 return err; 4570 err = -EINVAL; 4571 if (mddev->pers == NULL || 4572 mddev->pers->quiesce == NULL) 4573 goto unlock; 4574 old = mddev->suspend_hi; 4575 mddev->suspend_hi = new; 4576 if (new <= old) 4577 /* Shrinking suspended region */ 4578 mddev->pers->quiesce(mddev, 2); 4579 else { 4580 /* Expanding suspended region - need to wait */ 4581 mddev->pers->quiesce(mddev, 1); 4582 mddev->pers->quiesce(mddev, 0); 4583 } 4584 err = 0; 4585 unlock: 4586 mddev_unlock(mddev); 4587 return err ?: len; 4588 } 4589 static struct md_sysfs_entry md_suspend_hi = 4590 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 4591 4592 static ssize_t 4593 reshape_position_show(struct mddev *mddev, char *page) 4594 { 4595 if (mddev->reshape_position != MaxSector) 4596 return sprintf(page, "%llu\n", 4597 (unsigned long long)mddev->reshape_position); 4598 strcpy(page, "none\n"); 4599 return 5; 4600 } 4601 4602 static ssize_t 4603 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) 4604 { 4605 struct md_rdev *rdev; 4606 char *e; 4607 int err; 4608 unsigned long long new = simple_strtoull(buf, &e, 10); 4609 4610 if (buf == e || (*e && *e != '\n')) 4611 return -EINVAL; 4612 err = mddev_lock(mddev); 4613 if (err) 4614 return err; 4615 err = -EBUSY; 4616 if (mddev->pers) 4617 goto unlock; 4618 mddev->reshape_position = new; 4619 mddev->delta_disks = 0; 4620 mddev->reshape_backwards = 0; 4621 mddev->new_level = mddev->level; 4622 mddev->new_layout = mddev->layout; 4623 mddev->new_chunk_sectors = mddev->chunk_sectors; 4624 rdev_for_each(rdev, mddev) 4625 rdev->new_data_offset = rdev->data_offset; 4626 err = 0; 4627 unlock: 4628 mddev_unlock(mddev); 4629 return err ?: len; 4630 } 4631 4632 static struct md_sysfs_entry md_reshape_position = 4633 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 4634 reshape_position_store); 4635 4636 static ssize_t 4637 reshape_direction_show(struct mddev *mddev, char *page) 4638 { 4639 return sprintf(page, "%s\n", 4640 mddev->reshape_backwards ? "backwards" : "forwards"); 4641 } 4642 4643 static ssize_t 4644 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) 4645 { 4646 int backwards = 0; 4647 int err; 4648 4649 if (cmd_match(buf, "forwards")) 4650 backwards = 0; 4651 else if (cmd_match(buf, "backwards")) 4652 backwards = 1; 4653 else 4654 return -EINVAL; 4655 if (mddev->reshape_backwards == backwards) 4656 return len; 4657 4658 err = mddev_lock(mddev); 4659 if (err) 4660 return err; 4661 /* check if we are allowed to change */ 4662 if (mddev->delta_disks) 4663 err = -EBUSY; 4664 else if (mddev->persistent && 4665 mddev->major_version == 0) 4666 err = -EINVAL; 4667 else 4668 mddev->reshape_backwards = backwards; 4669 mddev_unlock(mddev); 4670 return err ?: len; 4671 } 4672 4673 static struct md_sysfs_entry md_reshape_direction = 4674 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show, 4675 reshape_direction_store); 4676 4677 static ssize_t 4678 array_size_show(struct mddev *mddev, char *page) 4679 { 4680 if (mddev->external_size) 4681 return sprintf(page, "%llu\n", 4682 (unsigned long long)mddev->array_sectors/2); 4683 else 4684 return sprintf(page, "default\n"); 4685 } 4686 4687 static ssize_t 4688 array_size_store(struct mddev *mddev, const char *buf, size_t len) 4689 { 4690 sector_t sectors; 4691 int err; 4692 4693 err = mddev_lock(mddev); 4694 if (err) 4695 return err; 4696 4697 if (strncmp(buf, "default", 7) == 0) { 4698 if (mddev->pers) 4699 sectors = mddev->pers->size(mddev, 0, 0); 4700 else 4701 sectors = mddev->array_sectors; 4702 4703 mddev->external_size = 0; 4704 } else { 4705 if (strict_blocks_to_sectors(buf, §ors) < 0) 4706 err = -EINVAL; 4707 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 4708 err = -E2BIG; 4709 else 4710 mddev->external_size = 1; 4711 } 4712 4713 if (!err) { 4714 mddev->array_sectors = sectors; 4715 if (mddev->pers) { 4716 set_capacity(mddev->gendisk, mddev->array_sectors); 4717 revalidate_disk(mddev->gendisk); 4718 } 4719 } 4720 mddev_unlock(mddev); 4721 return err ?: len; 4722 } 4723 4724 static struct md_sysfs_entry md_array_size = 4725 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 4726 array_size_store); 4727 4728 static struct attribute *md_default_attrs[] = { 4729 &md_level.attr, 4730 &md_layout.attr, 4731 &md_raid_disks.attr, 4732 &md_chunk_size.attr, 4733 &md_size.attr, 4734 &md_resync_start.attr, 4735 &md_metadata.attr, 4736 &md_new_device.attr, 4737 &md_safe_delay.attr, 4738 &md_array_state.attr, 4739 &md_reshape_position.attr, 4740 &md_reshape_direction.attr, 4741 &md_array_size.attr, 4742 &max_corr_read_errors.attr, 4743 NULL, 4744 }; 4745 4746 static struct attribute *md_redundancy_attrs[] = { 4747 &md_scan_mode.attr, 4748 &md_last_scan_mode.attr, 4749 &md_mismatches.attr, 4750 &md_sync_min.attr, 4751 &md_sync_max.attr, 4752 &md_sync_speed.attr, 4753 &md_sync_force_parallel.attr, 4754 &md_sync_completed.attr, 4755 &md_min_sync.attr, 4756 &md_max_sync.attr, 4757 &md_suspend_lo.attr, 4758 &md_suspend_hi.attr, 4759 &md_bitmap.attr, 4760 &md_degraded.attr, 4761 NULL, 4762 }; 4763 static struct attribute_group md_redundancy_group = { 4764 .name = NULL, 4765 .attrs = md_redundancy_attrs, 4766 }; 4767 4768 static ssize_t 4769 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 4770 { 4771 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4772 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 4773 ssize_t rv; 4774 4775 if (!entry->show) 4776 return -EIO; 4777 spin_lock(&all_mddevs_lock); 4778 if (list_empty(&mddev->all_mddevs)) { 4779 spin_unlock(&all_mddevs_lock); 4780 return -EBUSY; 4781 } 4782 mddev_get(mddev); 4783 spin_unlock(&all_mddevs_lock); 4784 4785 rv = entry->show(mddev, page); 4786 mddev_put(mddev); 4787 return rv; 4788 } 4789 4790 static ssize_t 4791 md_attr_store(struct kobject *kobj, struct attribute *attr, 4792 const char *page, size_t length) 4793 { 4794 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4795 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 4796 ssize_t rv; 4797 4798 if (!entry->store) 4799 return -EIO; 4800 if (!capable(CAP_SYS_ADMIN)) 4801 return -EACCES; 4802 spin_lock(&all_mddevs_lock); 4803 if (list_empty(&mddev->all_mddevs)) { 4804 spin_unlock(&all_mddevs_lock); 4805 return -EBUSY; 4806 } 4807 mddev_get(mddev); 4808 spin_unlock(&all_mddevs_lock); 4809 rv = entry->store(mddev, page, length); 4810 mddev_put(mddev); 4811 return rv; 4812 } 4813 4814 static void md_free(struct kobject *ko) 4815 { 4816 struct mddev *mddev = container_of(ko, struct mddev, kobj); 4817 4818 if (mddev->sysfs_state) 4819 sysfs_put(mddev->sysfs_state); 4820 4821 if (mddev->gendisk) { 4822 del_gendisk(mddev->gendisk); 4823 put_disk(mddev->gendisk); 4824 } 4825 if (mddev->queue) 4826 blk_cleanup_queue(mddev->queue); 4827 4828 kfree(mddev); 4829 } 4830 4831 static const struct sysfs_ops md_sysfs_ops = { 4832 .show = md_attr_show, 4833 .store = md_attr_store, 4834 }; 4835 static struct kobj_type md_ktype = { 4836 .release = md_free, 4837 .sysfs_ops = &md_sysfs_ops, 4838 .default_attrs = md_default_attrs, 4839 }; 4840 4841 int mdp_major = 0; 4842 4843 static void mddev_delayed_delete(struct work_struct *ws) 4844 { 4845 struct mddev *mddev = container_of(ws, struct mddev, del_work); 4846 4847 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); 4848 kobject_del(&mddev->kobj); 4849 kobject_put(&mddev->kobj); 4850 } 4851 4852 static int md_alloc(dev_t dev, char *name) 4853 { 4854 static DEFINE_MUTEX(disks_mutex); 4855 struct mddev *mddev = mddev_find(dev); 4856 struct gendisk *disk; 4857 int partitioned; 4858 int shift; 4859 int unit; 4860 int error; 4861 4862 if (!mddev) 4863 return -ENODEV; 4864 4865 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 4866 shift = partitioned ? MdpMinorShift : 0; 4867 unit = MINOR(mddev->unit) >> shift; 4868 4869 /* wait for any previous instance of this device to be 4870 * completely removed (mddev_delayed_delete). 4871 */ 4872 flush_workqueue(md_misc_wq); 4873 4874 mutex_lock(&disks_mutex); 4875 error = -EEXIST; 4876 if (mddev->gendisk) 4877 goto abort; 4878 4879 if (name) { 4880 /* Need to ensure that 'name' is not a duplicate. 4881 */ 4882 struct mddev *mddev2; 4883 spin_lock(&all_mddevs_lock); 4884 4885 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 4886 if (mddev2->gendisk && 4887 strcmp(mddev2->gendisk->disk_name, name) == 0) { 4888 spin_unlock(&all_mddevs_lock); 4889 goto abort; 4890 } 4891 spin_unlock(&all_mddevs_lock); 4892 } 4893 4894 error = -ENOMEM; 4895 mddev->queue = blk_alloc_queue(GFP_KERNEL); 4896 if (!mddev->queue) 4897 goto abort; 4898 mddev->queue->queuedata = mddev; 4899 4900 blk_queue_make_request(mddev->queue, md_make_request); 4901 blk_set_stacking_limits(&mddev->queue->limits); 4902 4903 disk = alloc_disk(1 << shift); 4904 if (!disk) { 4905 blk_cleanup_queue(mddev->queue); 4906 mddev->queue = NULL; 4907 goto abort; 4908 } 4909 disk->major = MAJOR(mddev->unit); 4910 disk->first_minor = unit << shift; 4911 if (name) 4912 strcpy(disk->disk_name, name); 4913 else if (partitioned) 4914 sprintf(disk->disk_name, "md_d%d", unit); 4915 else 4916 sprintf(disk->disk_name, "md%d", unit); 4917 disk->fops = &md_fops; 4918 disk->private_data = mddev; 4919 disk->queue = mddev->queue; 4920 blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA); 4921 /* Allow extended partitions. This makes the 4922 * 'mdp' device redundant, but we can't really 4923 * remove it now. 4924 */ 4925 disk->flags |= GENHD_FL_EXT_DEVT; 4926 mddev->gendisk = disk; 4927 /* As soon as we call add_disk(), another thread could get 4928 * through to md_open, so make sure it doesn't get too far 4929 */ 4930 mutex_lock(&mddev->open_mutex); 4931 add_disk(disk); 4932 4933 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 4934 &disk_to_dev(disk)->kobj, "%s", "md"); 4935 if (error) { 4936 /* This isn't possible, but as kobject_init_and_add is marked 4937 * __must_check, we must do something with the result 4938 */ 4939 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 4940 disk->disk_name); 4941 error = 0; 4942 } 4943 if (mddev->kobj.sd && 4944 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) 4945 printk(KERN_DEBUG "pointless warning\n"); 4946 mutex_unlock(&mddev->open_mutex); 4947 abort: 4948 mutex_unlock(&disks_mutex); 4949 if (!error && mddev->kobj.sd) { 4950 kobject_uevent(&mddev->kobj, KOBJ_ADD); 4951 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 4952 } 4953 mddev_put(mddev); 4954 return error; 4955 } 4956 4957 static struct kobject *md_probe(dev_t dev, int *part, void *data) 4958 { 4959 md_alloc(dev, NULL); 4960 return NULL; 4961 } 4962 4963 static int add_named_array(const char *val, struct kernel_param *kp) 4964 { 4965 /* val must be "md_*" where * is not all digits. 4966 * We allocate an array with a large free minor number, and 4967 * set the name to val. val must not already be an active name. 4968 */ 4969 int len = strlen(val); 4970 char buf[DISK_NAME_LEN]; 4971 4972 while (len && val[len-1] == '\n') 4973 len--; 4974 if (len >= DISK_NAME_LEN) 4975 return -E2BIG; 4976 strlcpy(buf, val, len+1); 4977 if (strncmp(buf, "md_", 3) != 0) 4978 return -EINVAL; 4979 return md_alloc(0, buf); 4980 } 4981 4982 static void md_safemode_timeout(unsigned long data) 4983 { 4984 struct mddev *mddev = (struct mddev *) data; 4985 4986 if (!atomic_read(&mddev->writes_pending)) { 4987 mddev->safemode = 1; 4988 if (mddev->external) 4989 sysfs_notify_dirent_safe(mddev->sysfs_state); 4990 } 4991 md_wakeup_thread(mddev->thread); 4992 } 4993 4994 static int start_dirty_degraded; 4995 4996 int md_run(struct mddev *mddev) 4997 { 4998 int err; 4999 struct md_rdev *rdev; 5000 struct md_personality *pers; 5001 5002 if (list_empty(&mddev->disks)) 5003 /* cannot run an array with no devices.. */ 5004 return -EINVAL; 5005 5006 if (mddev->pers) 5007 return -EBUSY; 5008 /* Cannot run until previous stop completes properly */ 5009 if (mddev->sysfs_active) 5010 return -EBUSY; 5011 5012 /* 5013 * Analyze all RAID superblock(s) 5014 */ 5015 if (!mddev->raid_disks) { 5016 if (!mddev->persistent) 5017 return -EINVAL; 5018 analyze_sbs(mddev); 5019 } 5020 5021 if (mddev->level != LEVEL_NONE) 5022 request_module("md-level-%d", mddev->level); 5023 else if (mddev->clevel[0]) 5024 request_module("md-%s", mddev->clevel); 5025 5026 /* 5027 * Drop all container device buffers, from now on 5028 * the only valid external interface is through the md 5029 * device. 5030 */ 5031 rdev_for_each(rdev, mddev) { 5032 if (test_bit(Faulty, &rdev->flags)) 5033 continue; 5034 sync_blockdev(rdev->bdev); 5035 invalidate_bdev(rdev->bdev); 5036 5037 /* perform some consistency tests on the device. 5038 * We don't want the data to overlap the metadata, 5039 * Internal Bitmap issues have been handled elsewhere. 5040 */ 5041 if (rdev->meta_bdev) { 5042 /* Nothing to check */; 5043 } else if (rdev->data_offset < rdev->sb_start) { 5044 if (mddev->dev_sectors && 5045 rdev->data_offset + mddev->dev_sectors 5046 > rdev->sb_start) { 5047 printk("md: %s: data overlaps metadata\n", 5048 mdname(mddev)); 5049 return -EINVAL; 5050 } 5051 } else { 5052 if (rdev->sb_start + rdev->sb_size/512 5053 > rdev->data_offset) { 5054 printk("md: %s: metadata overlaps data\n", 5055 mdname(mddev)); 5056 return -EINVAL; 5057 } 5058 } 5059 sysfs_notify_dirent_safe(rdev->sysfs_state); 5060 } 5061 5062 if (mddev->bio_set == NULL) 5063 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0); 5064 5065 spin_lock(&pers_lock); 5066 pers = find_pers(mddev->level, mddev->clevel); 5067 if (!pers || !try_module_get(pers->owner)) { 5068 spin_unlock(&pers_lock); 5069 if (mddev->level != LEVEL_NONE) 5070 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 5071 mddev->level); 5072 else 5073 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 5074 mddev->clevel); 5075 return -EINVAL; 5076 } 5077 spin_unlock(&pers_lock); 5078 if (mddev->level != pers->level) { 5079 mddev->level = pers->level; 5080 mddev->new_level = pers->level; 5081 } 5082 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 5083 5084 if (mddev->reshape_position != MaxSector && 5085 pers->start_reshape == NULL) { 5086 /* This personality cannot handle reshaping... */ 5087 module_put(pers->owner); 5088 return -EINVAL; 5089 } 5090 5091 if (pers->sync_request) { 5092 /* Warn if this is a potentially silly 5093 * configuration. 5094 */ 5095 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5096 struct md_rdev *rdev2; 5097 int warned = 0; 5098 5099 rdev_for_each(rdev, mddev) 5100 rdev_for_each(rdev2, mddev) { 5101 if (rdev < rdev2 && 5102 rdev->bdev->bd_contains == 5103 rdev2->bdev->bd_contains) { 5104 printk(KERN_WARNING 5105 "%s: WARNING: %s appears to be" 5106 " on the same physical disk as" 5107 " %s.\n", 5108 mdname(mddev), 5109 bdevname(rdev->bdev,b), 5110 bdevname(rdev2->bdev,b2)); 5111 warned = 1; 5112 } 5113 } 5114 5115 if (warned) 5116 printk(KERN_WARNING 5117 "True protection against single-disk" 5118 " failure might be compromised.\n"); 5119 } 5120 5121 mddev->recovery = 0; 5122 /* may be over-ridden by personality */ 5123 mddev->resync_max_sectors = mddev->dev_sectors; 5124 5125 mddev->ok_start_degraded = start_dirty_degraded; 5126 5127 if (start_readonly && mddev->ro == 0) 5128 mddev->ro = 2; /* read-only, but switch on first write */ 5129 5130 err = pers->run(mddev); 5131 if (err) 5132 printk(KERN_ERR "md: pers->run() failed ...\n"); 5133 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { 5134 WARN_ONCE(!mddev->external_size, "%s: default size too small," 5135 " but 'external_size' not in effect?\n", __func__); 5136 printk(KERN_ERR 5137 "md: invalid array_size %llu > default size %llu\n", 5138 (unsigned long long)mddev->array_sectors / 2, 5139 (unsigned long long)pers->size(mddev, 0, 0) / 2); 5140 err = -EINVAL; 5141 } 5142 if (err == 0 && pers->sync_request && 5143 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { 5144 struct bitmap *bitmap; 5145 5146 bitmap = bitmap_create(mddev, -1); 5147 if (IS_ERR(bitmap)) { 5148 err = PTR_ERR(bitmap); 5149 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 5150 mdname(mddev), err); 5151 } else 5152 mddev->bitmap = bitmap; 5153 5154 } 5155 if (err) { 5156 mddev_detach(mddev); 5157 if (mddev->private) 5158 pers->free(mddev, mddev->private); 5159 module_put(pers->owner); 5160 bitmap_destroy(mddev); 5161 return err; 5162 } 5163 if (mddev->queue) { 5164 mddev->queue->backing_dev_info.congested_data = mddev; 5165 mddev->queue->backing_dev_info.congested_fn = md_congested; 5166 blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec); 5167 } 5168 if (pers->sync_request) { 5169 if (mddev->kobj.sd && 5170 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 5171 printk(KERN_WARNING 5172 "md: cannot register extra attributes for %s\n", 5173 mdname(mddev)); 5174 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 5175 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 5176 mddev->ro = 0; 5177 5178 atomic_set(&mddev->writes_pending,0); 5179 atomic_set(&mddev->max_corr_read_errors, 5180 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 5181 mddev->safemode = 0; 5182 mddev->safemode_timer.function = md_safemode_timeout; 5183 mddev->safemode_timer.data = (unsigned long) mddev; 5184 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 5185 mddev->in_sync = 1; 5186 smp_wmb(); 5187 spin_lock(&mddev->lock); 5188 mddev->pers = pers; 5189 mddev->ready = 1; 5190 spin_unlock(&mddev->lock); 5191 rdev_for_each(rdev, mddev) 5192 if (rdev->raid_disk >= 0) 5193 if (sysfs_link_rdev(mddev, rdev)) 5194 /* failure here is OK */; 5195 5196 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5197 5198 if (mddev->flags & MD_UPDATE_SB_FLAGS) 5199 md_update_sb(mddev, 0); 5200 5201 md_new_event(mddev); 5202 sysfs_notify_dirent_safe(mddev->sysfs_state); 5203 sysfs_notify_dirent_safe(mddev->sysfs_action); 5204 sysfs_notify(&mddev->kobj, NULL, "degraded"); 5205 return 0; 5206 } 5207 EXPORT_SYMBOL_GPL(md_run); 5208 5209 static int do_md_run(struct mddev *mddev) 5210 { 5211 int err; 5212 5213 err = md_run(mddev); 5214 if (err) 5215 goto out; 5216 err = bitmap_load(mddev); 5217 if (err) { 5218 bitmap_destroy(mddev); 5219 goto out; 5220 } 5221 5222 md_wakeup_thread(mddev->thread); 5223 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 5224 5225 set_capacity(mddev->gendisk, mddev->array_sectors); 5226 revalidate_disk(mddev->gendisk); 5227 mddev->changed = 1; 5228 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 5229 out: 5230 return err; 5231 } 5232 5233 static int restart_array(struct mddev *mddev) 5234 { 5235 struct gendisk *disk = mddev->gendisk; 5236 5237 /* Complain if it has no devices */ 5238 if (list_empty(&mddev->disks)) 5239 return -ENXIO; 5240 if (!mddev->pers) 5241 return -EINVAL; 5242 if (!mddev->ro) 5243 return -EBUSY; 5244 mddev->safemode = 0; 5245 mddev->ro = 0; 5246 set_disk_ro(disk, 0); 5247 printk(KERN_INFO "md: %s switched to read-write mode.\n", 5248 mdname(mddev)); 5249 /* Kick recovery or resync if necessary */ 5250 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5251 md_wakeup_thread(mddev->thread); 5252 md_wakeup_thread(mddev->sync_thread); 5253 sysfs_notify_dirent_safe(mddev->sysfs_state); 5254 return 0; 5255 } 5256 5257 static void md_clean(struct mddev *mddev) 5258 { 5259 mddev->array_sectors = 0; 5260 mddev->external_size = 0; 5261 mddev->dev_sectors = 0; 5262 mddev->raid_disks = 0; 5263 mddev->recovery_cp = 0; 5264 mddev->resync_min = 0; 5265 mddev->resync_max = MaxSector; 5266 mddev->reshape_position = MaxSector; 5267 mddev->external = 0; 5268 mddev->persistent = 0; 5269 mddev->level = LEVEL_NONE; 5270 mddev->clevel[0] = 0; 5271 mddev->flags = 0; 5272 mddev->ro = 0; 5273 mddev->metadata_type[0] = 0; 5274 mddev->chunk_sectors = 0; 5275 mddev->ctime = mddev->utime = 0; 5276 mddev->layout = 0; 5277 mddev->max_disks = 0; 5278 mddev->events = 0; 5279 mddev->can_decrease_events = 0; 5280 mddev->delta_disks = 0; 5281 mddev->reshape_backwards = 0; 5282 mddev->new_level = LEVEL_NONE; 5283 mddev->new_layout = 0; 5284 mddev->new_chunk_sectors = 0; 5285 mddev->curr_resync = 0; 5286 atomic64_set(&mddev->resync_mismatches, 0); 5287 mddev->suspend_lo = mddev->suspend_hi = 0; 5288 mddev->sync_speed_min = mddev->sync_speed_max = 0; 5289 mddev->recovery = 0; 5290 mddev->in_sync = 0; 5291 mddev->changed = 0; 5292 mddev->degraded = 0; 5293 mddev->safemode = 0; 5294 mddev->merge_check_needed = 0; 5295 mddev->bitmap_info.offset = 0; 5296 mddev->bitmap_info.default_offset = 0; 5297 mddev->bitmap_info.default_space = 0; 5298 mddev->bitmap_info.chunksize = 0; 5299 mddev->bitmap_info.daemon_sleep = 0; 5300 mddev->bitmap_info.max_write_behind = 0; 5301 } 5302 5303 static void __md_stop_writes(struct mddev *mddev) 5304 { 5305 if (mddev_is_clustered(mddev)) 5306 md_cluster_ops->metadata_update_start(mddev); 5307 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5308 flush_workqueue(md_misc_wq); 5309 if (mddev->sync_thread) { 5310 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5311 md_reap_sync_thread(mddev); 5312 } 5313 5314 del_timer_sync(&mddev->safemode_timer); 5315 5316 bitmap_flush(mddev); 5317 md_super_wait(mddev); 5318 5319 if (mddev->ro == 0 && 5320 (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) { 5321 /* mark array as shutdown cleanly */ 5322 mddev->in_sync = 1; 5323 md_update_sb(mddev, 1); 5324 } 5325 if (mddev_is_clustered(mddev)) 5326 md_cluster_ops->metadata_update_finish(mddev); 5327 } 5328 5329 void md_stop_writes(struct mddev *mddev) 5330 { 5331 mddev_lock_nointr(mddev); 5332 __md_stop_writes(mddev); 5333 mddev_unlock(mddev); 5334 } 5335 EXPORT_SYMBOL_GPL(md_stop_writes); 5336 5337 static void mddev_detach(struct mddev *mddev) 5338 { 5339 struct bitmap *bitmap = mddev->bitmap; 5340 /* wait for behind writes to complete */ 5341 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) { 5342 printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n", 5343 mdname(mddev)); 5344 /* need to kick something here to make sure I/O goes? */ 5345 wait_event(bitmap->behind_wait, 5346 atomic_read(&bitmap->behind_writes) == 0); 5347 } 5348 if (mddev->pers && mddev->pers->quiesce) { 5349 mddev->pers->quiesce(mddev, 1); 5350 mddev->pers->quiesce(mddev, 0); 5351 } 5352 md_unregister_thread(&mddev->thread); 5353 if (mddev->queue) 5354 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 5355 } 5356 5357 static void __md_stop(struct mddev *mddev) 5358 { 5359 struct md_personality *pers = mddev->pers; 5360 mddev_detach(mddev); 5361 spin_lock(&mddev->lock); 5362 mddev->ready = 0; 5363 mddev->pers = NULL; 5364 spin_unlock(&mddev->lock); 5365 pers->free(mddev, mddev->private); 5366 if (pers->sync_request && mddev->to_remove == NULL) 5367 mddev->to_remove = &md_redundancy_group; 5368 module_put(pers->owner); 5369 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5370 } 5371 5372 void md_stop(struct mddev *mddev) 5373 { 5374 /* stop the array and free an attached data structures. 5375 * This is called from dm-raid 5376 */ 5377 __md_stop(mddev); 5378 bitmap_destroy(mddev); 5379 if (mddev->bio_set) 5380 bioset_free(mddev->bio_set); 5381 } 5382 5383 EXPORT_SYMBOL_GPL(md_stop); 5384 5385 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) 5386 { 5387 int err = 0; 5388 int did_freeze = 0; 5389 5390 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 5391 did_freeze = 1; 5392 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5393 md_wakeup_thread(mddev->thread); 5394 } 5395 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5396 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5397 if (mddev->sync_thread) 5398 /* Thread might be blocked waiting for metadata update 5399 * which will now never happen */ 5400 wake_up_process(mddev->sync_thread->tsk); 5401 5402 mddev_unlock(mddev); 5403 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, 5404 &mddev->recovery)); 5405 mddev_lock_nointr(mddev); 5406 5407 mutex_lock(&mddev->open_mutex); 5408 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 5409 mddev->sync_thread || 5410 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 5411 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { 5412 printk("md: %s still in use.\n",mdname(mddev)); 5413 if (did_freeze) { 5414 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5415 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5416 md_wakeup_thread(mddev->thread); 5417 } 5418 err = -EBUSY; 5419 goto out; 5420 } 5421 if (mddev->pers) { 5422 __md_stop_writes(mddev); 5423 5424 err = -ENXIO; 5425 if (mddev->ro==1) 5426 goto out; 5427 mddev->ro = 1; 5428 set_disk_ro(mddev->gendisk, 1); 5429 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5430 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5431 md_wakeup_thread(mddev->thread); 5432 sysfs_notify_dirent_safe(mddev->sysfs_state); 5433 err = 0; 5434 } 5435 out: 5436 mutex_unlock(&mddev->open_mutex); 5437 return err; 5438 } 5439 5440 /* mode: 5441 * 0 - completely stop and dis-assemble array 5442 * 2 - stop but do not disassemble array 5443 */ 5444 static int do_md_stop(struct mddev *mddev, int mode, 5445 struct block_device *bdev) 5446 { 5447 struct gendisk *disk = mddev->gendisk; 5448 struct md_rdev *rdev; 5449 int did_freeze = 0; 5450 5451 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 5452 did_freeze = 1; 5453 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5454 md_wakeup_thread(mddev->thread); 5455 } 5456 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5457 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5458 if (mddev->sync_thread) 5459 /* Thread might be blocked waiting for metadata update 5460 * which will now never happen */ 5461 wake_up_process(mddev->sync_thread->tsk); 5462 5463 mddev_unlock(mddev); 5464 wait_event(resync_wait, (mddev->sync_thread == NULL && 5465 !test_bit(MD_RECOVERY_RUNNING, 5466 &mddev->recovery))); 5467 mddev_lock_nointr(mddev); 5468 5469 mutex_lock(&mddev->open_mutex); 5470 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 5471 mddev->sysfs_active || 5472 mddev->sync_thread || 5473 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 5474 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { 5475 printk("md: %s still in use.\n",mdname(mddev)); 5476 mutex_unlock(&mddev->open_mutex); 5477 if (did_freeze) { 5478 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5479 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5480 md_wakeup_thread(mddev->thread); 5481 } 5482 return -EBUSY; 5483 } 5484 if (mddev->pers) { 5485 if (mddev->ro) 5486 set_disk_ro(disk, 0); 5487 5488 __md_stop_writes(mddev); 5489 __md_stop(mddev); 5490 mddev->queue->merge_bvec_fn = NULL; 5491 mddev->queue->backing_dev_info.congested_fn = NULL; 5492 5493 /* tell userspace to handle 'inactive' */ 5494 sysfs_notify_dirent_safe(mddev->sysfs_state); 5495 5496 rdev_for_each(rdev, mddev) 5497 if (rdev->raid_disk >= 0) 5498 sysfs_unlink_rdev(mddev, rdev); 5499 5500 set_capacity(disk, 0); 5501 mutex_unlock(&mddev->open_mutex); 5502 mddev->changed = 1; 5503 revalidate_disk(disk); 5504 5505 if (mddev->ro) 5506 mddev->ro = 0; 5507 } else 5508 mutex_unlock(&mddev->open_mutex); 5509 /* 5510 * Free resources if final stop 5511 */ 5512 if (mode == 0) { 5513 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 5514 5515 bitmap_destroy(mddev); 5516 if (mddev->bitmap_info.file) { 5517 struct file *f = mddev->bitmap_info.file; 5518 spin_lock(&mddev->lock); 5519 mddev->bitmap_info.file = NULL; 5520 spin_unlock(&mddev->lock); 5521 fput(f); 5522 } 5523 mddev->bitmap_info.offset = 0; 5524 5525 export_array(mddev); 5526 5527 md_clean(mddev); 5528 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 5529 if (mddev->hold_active == UNTIL_STOP) 5530 mddev->hold_active = 0; 5531 } 5532 blk_integrity_unregister(disk); 5533 md_new_event(mddev); 5534 sysfs_notify_dirent_safe(mddev->sysfs_state); 5535 return 0; 5536 } 5537 5538 #ifndef MODULE 5539 static void autorun_array(struct mddev *mddev) 5540 { 5541 struct md_rdev *rdev; 5542 int err; 5543 5544 if (list_empty(&mddev->disks)) 5545 return; 5546 5547 printk(KERN_INFO "md: running: "); 5548 5549 rdev_for_each(rdev, mddev) { 5550 char b[BDEVNAME_SIZE]; 5551 printk("<%s>", bdevname(rdev->bdev,b)); 5552 } 5553 printk("\n"); 5554 5555 err = do_md_run(mddev); 5556 if (err) { 5557 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 5558 do_md_stop(mddev, 0, NULL); 5559 } 5560 } 5561 5562 /* 5563 * lets try to run arrays based on all disks that have arrived 5564 * until now. (those are in pending_raid_disks) 5565 * 5566 * the method: pick the first pending disk, collect all disks with 5567 * the same UUID, remove all from the pending list and put them into 5568 * the 'same_array' list. Then order this list based on superblock 5569 * update time (freshest comes first), kick out 'old' disks and 5570 * compare superblocks. If everything's fine then run it. 5571 * 5572 * If "unit" is allocated, then bump its reference count 5573 */ 5574 static void autorun_devices(int part) 5575 { 5576 struct md_rdev *rdev0, *rdev, *tmp; 5577 struct mddev *mddev; 5578 char b[BDEVNAME_SIZE]; 5579 5580 printk(KERN_INFO "md: autorun ...\n"); 5581 while (!list_empty(&pending_raid_disks)) { 5582 int unit; 5583 dev_t dev; 5584 LIST_HEAD(candidates); 5585 rdev0 = list_entry(pending_raid_disks.next, 5586 struct md_rdev, same_set); 5587 5588 printk(KERN_INFO "md: considering %s ...\n", 5589 bdevname(rdev0->bdev,b)); 5590 INIT_LIST_HEAD(&candidates); 5591 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 5592 if (super_90_load(rdev, rdev0, 0) >= 0) { 5593 printk(KERN_INFO "md: adding %s ...\n", 5594 bdevname(rdev->bdev,b)); 5595 list_move(&rdev->same_set, &candidates); 5596 } 5597 /* 5598 * now we have a set of devices, with all of them having 5599 * mostly sane superblocks. It's time to allocate the 5600 * mddev. 5601 */ 5602 if (part) { 5603 dev = MKDEV(mdp_major, 5604 rdev0->preferred_minor << MdpMinorShift); 5605 unit = MINOR(dev) >> MdpMinorShift; 5606 } else { 5607 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 5608 unit = MINOR(dev); 5609 } 5610 if (rdev0->preferred_minor != unit) { 5611 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 5612 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 5613 break; 5614 } 5615 5616 md_probe(dev, NULL, NULL); 5617 mddev = mddev_find(dev); 5618 if (!mddev || !mddev->gendisk) { 5619 if (mddev) 5620 mddev_put(mddev); 5621 printk(KERN_ERR 5622 "md: cannot allocate memory for md drive.\n"); 5623 break; 5624 } 5625 if (mddev_lock(mddev)) 5626 printk(KERN_WARNING "md: %s locked, cannot run\n", 5627 mdname(mddev)); 5628 else if (mddev->raid_disks || mddev->major_version 5629 || !list_empty(&mddev->disks)) { 5630 printk(KERN_WARNING 5631 "md: %s already running, cannot run %s\n", 5632 mdname(mddev), bdevname(rdev0->bdev,b)); 5633 mddev_unlock(mddev); 5634 } else { 5635 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 5636 mddev->persistent = 1; 5637 rdev_for_each_list(rdev, tmp, &candidates) { 5638 list_del_init(&rdev->same_set); 5639 if (bind_rdev_to_array(rdev, mddev)) 5640 export_rdev(rdev); 5641 } 5642 autorun_array(mddev); 5643 mddev_unlock(mddev); 5644 } 5645 /* on success, candidates will be empty, on error 5646 * it won't... 5647 */ 5648 rdev_for_each_list(rdev, tmp, &candidates) { 5649 list_del_init(&rdev->same_set); 5650 export_rdev(rdev); 5651 } 5652 mddev_put(mddev); 5653 } 5654 printk(KERN_INFO "md: ... autorun DONE.\n"); 5655 } 5656 #endif /* !MODULE */ 5657 5658 static int get_version(void __user *arg) 5659 { 5660 mdu_version_t ver; 5661 5662 ver.major = MD_MAJOR_VERSION; 5663 ver.minor = MD_MINOR_VERSION; 5664 ver.patchlevel = MD_PATCHLEVEL_VERSION; 5665 5666 if (copy_to_user(arg, &ver, sizeof(ver))) 5667 return -EFAULT; 5668 5669 return 0; 5670 } 5671 5672 static int get_array_info(struct mddev *mddev, void __user *arg) 5673 { 5674 mdu_array_info_t info; 5675 int nr,working,insync,failed,spare; 5676 struct md_rdev *rdev; 5677 5678 nr = working = insync = failed = spare = 0; 5679 rcu_read_lock(); 5680 rdev_for_each_rcu(rdev, mddev) { 5681 nr++; 5682 if (test_bit(Faulty, &rdev->flags)) 5683 failed++; 5684 else { 5685 working++; 5686 if (test_bit(In_sync, &rdev->flags)) 5687 insync++; 5688 else 5689 spare++; 5690 } 5691 } 5692 rcu_read_unlock(); 5693 5694 info.major_version = mddev->major_version; 5695 info.minor_version = mddev->minor_version; 5696 info.patch_version = MD_PATCHLEVEL_VERSION; 5697 info.ctime = mddev->ctime; 5698 info.level = mddev->level; 5699 info.size = mddev->dev_sectors / 2; 5700 if (info.size != mddev->dev_sectors / 2) /* overflow */ 5701 info.size = -1; 5702 info.nr_disks = nr; 5703 info.raid_disks = mddev->raid_disks; 5704 info.md_minor = mddev->md_minor; 5705 info.not_persistent= !mddev->persistent; 5706 5707 info.utime = mddev->utime; 5708 info.state = 0; 5709 if (mddev->in_sync) 5710 info.state = (1<<MD_SB_CLEAN); 5711 if (mddev->bitmap && mddev->bitmap_info.offset) 5712 info.state |= (1<<MD_SB_BITMAP_PRESENT); 5713 if (mddev_is_clustered(mddev)) 5714 info.state |= (1<<MD_SB_CLUSTERED); 5715 info.active_disks = insync; 5716 info.working_disks = working; 5717 info.failed_disks = failed; 5718 info.spare_disks = spare; 5719 5720 info.layout = mddev->layout; 5721 info.chunk_size = mddev->chunk_sectors << 9; 5722 5723 if (copy_to_user(arg, &info, sizeof(info))) 5724 return -EFAULT; 5725 5726 return 0; 5727 } 5728 5729 static int get_bitmap_file(struct mddev *mddev, void __user * arg) 5730 { 5731 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 5732 char *ptr; 5733 int err; 5734 5735 file = kmalloc(sizeof(*file), GFP_NOIO); 5736 if (!file) 5737 return -ENOMEM; 5738 5739 err = 0; 5740 spin_lock(&mddev->lock); 5741 /* bitmap disabled, zero the first byte and copy out */ 5742 if (!mddev->bitmap_info.file) 5743 file->pathname[0] = '\0'; 5744 else if ((ptr = d_path(&mddev->bitmap_info.file->f_path, 5745 file->pathname, sizeof(file->pathname))), 5746 IS_ERR(ptr)) 5747 err = PTR_ERR(ptr); 5748 else 5749 memmove(file->pathname, ptr, 5750 sizeof(file->pathname)-(ptr-file->pathname)); 5751 spin_unlock(&mddev->lock); 5752 5753 if (err == 0 && 5754 copy_to_user(arg, file, sizeof(*file))) 5755 err = -EFAULT; 5756 5757 kfree(file); 5758 return err; 5759 } 5760 5761 static int get_disk_info(struct mddev *mddev, void __user * arg) 5762 { 5763 mdu_disk_info_t info; 5764 struct md_rdev *rdev; 5765 5766 if (copy_from_user(&info, arg, sizeof(info))) 5767 return -EFAULT; 5768 5769 rcu_read_lock(); 5770 rdev = md_find_rdev_nr_rcu(mddev, info.number); 5771 if (rdev) { 5772 info.major = MAJOR(rdev->bdev->bd_dev); 5773 info.minor = MINOR(rdev->bdev->bd_dev); 5774 info.raid_disk = rdev->raid_disk; 5775 info.state = 0; 5776 if (test_bit(Faulty, &rdev->flags)) 5777 info.state |= (1<<MD_DISK_FAULTY); 5778 else if (test_bit(In_sync, &rdev->flags)) { 5779 info.state |= (1<<MD_DISK_ACTIVE); 5780 info.state |= (1<<MD_DISK_SYNC); 5781 } 5782 if (test_bit(WriteMostly, &rdev->flags)) 5783 info.state |= (1<<MD_DISK_WRITEMOSTLY); 5784 } else { 5785 info.major = info.minor = 0; 5786 info.raid_disk = -1; 5787 info.state = (1<<MD_DISK_REMOVED); 5788 } 5789 rcu_read_unlock(); 5790 5791 if (copy_to_user(arg, &info, sizeof(info))) 5792 return -EFAULT; 5793 5794 return 0; 5795 } 5796 5797 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) 5798 { 5799 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5800 struct md_rdev *rdev; 5801 dev_t dev = MKDEV(info->major,info->minor); 5802 5803 if (mddev_is_clustered(mddev) && 5804 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) { 5805 pr_err("%s: Cannot add to clustered mddev.\n", 5806 mdname(mddev)); 5807 return -EINVAL; 5808 } 5809 5810 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 5811 return -EOVERFLOW; 5812 5813 if (!mddev->raid_disks) { 5814 int err; 5815 /* expecting a device which has a superblock */ 5816 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 5817 if (IS_ERR(rdev)) { 5818 printk(KERN_WARNING 5819 "md: md_import_device returned %ld\n", 5820 PTR_ERR(rdev)); 5821 return PTR_ERR(rdev); 5822 } 5823 if (!list_empty(&mddev->disks)) { 5824 struct md_rdev *rdev0 5825 = list_entry(mddev->disks.next, 5826 struct md_rdev, same_set); 5827 err = super_types[mddev->major_version] 5828 .load_super(rdev, rdev0, mddev->minor_version); 5829 if (err < 0) { 5830 printk(KERN_WARNING 5831 "md: %s has different UUID to %s\n", 5832 bdevname(rdev->bdev,b), 5833 bdevname(rdev0->bdev,b2)); 5834 export_rdev(rdev); 5835 return -EINVAL; 5836 } 5837 } 5838 err = bind_rdev_to_array(rdev, mddev); 5839 if (err) 5840 export_rdev(rdev); 5841 return err; 5842 } 5843 5844 /* 5845 * add_new_disk can be used once the array is assembled 5846 * to add "hot spares". They must already have a superblock 5847 * written 5848 */ 5849 if (mddev->pers) { 5850 int err; 5851 if (!mddev->pers->hot_add_disk) { 5852 printk(KERN_WARNING 5853 "%s: personality does not support diskops!\n", 5854 mdname(mddev)); 5855 return -EINVAL; 5856 } 5857 if (mddev->persistent) 5858 rdev = md_import_device(dev, mddev->major_version, 5859 mddev->minor_version); 5860 else 5861 rdev = md_import_device(dev, -1, -1); 5862 if (IS_ERR(rdev)) { 5863 printk(KERN_WARNING 5864 "md: md_import_device returned %ld\n", 5865 PTR_ERR(rdev)); 5866 return PTR_ERR(rdev); 5867 } 5868 /* set saved_raid_disk if appropriate */ 5869 if (!mddev->persistent) { 5870 if (info->state & (1<<MD_DISK_SYNC) && 5871 info->raid_disk < mddev->raid_disks) { 5872 rdev->raid_disk = info->raid_disk; 5873 set_bit(In_sync, &rdev->flags); 5874 clear_bit(Bitmap_sync, &rdev->flags); 5875 } else 5876 rdev->raid_disk = -1; 5877 rdev->saved_raid_disk = rdev->raid_disk; 5878 } else 5879 super_types[mddev->major_version]. 5880 validate_super(mddev, rdev); 5881 if ((info->state & (1<<MD_DISK_SYNC)) && 5882 rdev->raid_disk != info->raid_disk) { 5883 /* This was a hot-add request, but events doesn't 5884 * match, so reject it. 5885 */ 5886 export_rdev(rdev); 5887 return -EINVAL; 5888 } 5889 5890 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 5891 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5892 set_bit(WriteMostly, &rdev->flags); 5893 else 5894 clear_bit(WriteMostly, &rdev->flags); 5895 5896 /* 5897 * check whether the device shows up in other nodes 5898 */ 5899 if (mddev_is_clustered(mddev)) { 5900 if (info->state & (1 << MD_DISK_CANDIDATE)) { 5901 /* Through --cluster-confirm */ 5902 set_bit(Candidate, &rdev->flags); 5903 err = md_cluster_ops->new_disk_ack(mddev, true); 5904 if (err) { 5905 export_rdev(rdev); 5906 return err; 5907 } 5908 } else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) { 5909 /* --add initiated by this node */ 5910 err = md_cluster_ops->add_new_disk_start(mddev, rdev); 5911 if (err) { 5912 md_cluster_ops->add_new_disk_finish(mddev); 5913 export_rdev(rdev); 5914 return err; 5915 } 5916 } 5917 } 5918 5919 rdev->raid_disk = -1; 5920 err = bind_rdev_to_array(rdev, mddev); 5921 if (err) 5922 export_rdev(rdev); 5923 else 5924 err = add_bound_rdev(rdev); 5925 if (mddev_is_clustered(mddev) && 5926 (info->state & (1 << MD_DISK_CLUSTER_ADD))) 5927 md_cluster_ops->add_new_disk_finish(mddev); 5928 return err; 5929 } 5930 5931 /* otherwise, add_new_disk is only allowed 5932 * for major_version==0 superblocks 5933 */ 5934 if (mddev->major_version != 0) { 5935 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 5936 mdname(mddev)); 5937 return -EINVAL; 5938 } 5939 5940 if (!(info->state & (1<<MD_DISK_FAULTY))) { 5941 int err; 5942 rdev = md_import_device(dev, -1, 0); 5943 if (IS_ERR(rdev)) { 5944 printk(KERN_WARNING 5945 "md: error, md_import_device() returned %ld\n", 5946 PTR_ERR(rdev)); 5947 return PTR_ERR(rdev); 5948 } 5949 rdev->desc_nr = info->number; 5950 if (info->raid_disk < mddev->raid_disks) 5951 rdev->raid_disk = info->raid_disk; 5952 else 5953 rdev->raid_disk = -1; 5954 5955 if (rdev->raid_disk < mddev->raid_disks) 5956 if (info->state & (1<<MD_DISK_SYNC)) 5957 set_bit(In_sync, &rdev->flags); 5958 5959 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5960 set_bit(WriteMostly, &rdev->flags); 5961 5962 if (!mddev->persistent) { 5963 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 5964 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 5965 } else 5966 rdev->sb_start = calc_dev_sboffset(rdev); 5967 rdev->sectors = rdev->sb_start; 5968 5969 err = bind_rdev_to_array(rdev, mddev); 5970 if (err) { 5971 export_rdev(rdev); 5972 return err; 5973 } 5974 } 5975 5976 return 0; 5977 } 5978 5979 static int hot_remove_disk(struct mddev *mddev, dev_t dev) 5980 { 5981 char b[BDEVNAME_SIZE]; 5982 struct md_rdev *rdev; 5983 5984 rdev = find_rdev(mddev, dev); 5985 if (!rdev) 5986 return -ENXIO; 5987 5988 if (mddev_is_clustered(mddev)) 5989 md_cluster_ops->metadata_update_start(mddev); 5990 5991 clear_bit(Blocked, &rdev->flags); 5992 remove_and_add_spares(mddev, rdev); 5993 5994 if (rdev->raid_disk >= 0) 5995 goto busy; 5996 5997 if (mddev_is_clustered(mddev)) 5998 md_cluster_ops->remove_disk(mddev, rdev); 5999 6000 md_kick_rdev_from_array(rdev); 6001 md_update_sb(mddev, 1); 6002 md_new_event(mddev); 6003 6004 if (mddev_is_clustered(mddev)) 6005 md_cluster_ops->metadata_update_finish(mddev); 6006 6007 return 0; 6008 busy: 6009 if (mddev_is_clustered(mddev)) 6010 md_cluster_ops->metadata_update_cancel(mddev); 6011 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", 6012 bdevname(rdev->bdev,b), mdname(mddev)); 6013 return -EBUSY; 6014 } 6015 6016 static int hot_add_disk(struct mddev *mddev, dev_t dev) 6017 { 6018 char b[BDEVNAME_SIZE]; 6019 int err; 6020 struct md_rdev *rdev; 6021 6022 if (!mddev->pers) 6023 return -ENODEV; 6024 6025 if (mddev->major_version != 0) { 6026 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 6027 " version-0 superblocks.\n", 6028 mdname(mddev)); 6029 return -EINVAL; 6030 } 6031 if (!mddev->pers->hot_add_disk) { 6032 printk(KERN_WARNING 6033 "%s: personality does not support diskops!\n", 6034 mdname(mddev)); 6035 return -EINVAL; 6036 } 6037 6038 rdev = md_import_device(dev, -1, 0); 6039 if (IS_ERR(rdev)) { 6040 printk(KERN_WARNING 6041 "md: error, md_import_device() returned %ld\n", 6042 PTR_ERR(rdev)); 6043 return -EINVAL; 6044 } 6045 6046 if (mddev->persistent) 6047 rdev->sb_start = calc_dev_sboffset(rdev); 6048 else 6049 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 6050 6051 rdev->sectors = rdev->sb_start; 6052 6053 if (test_bit(Faulty, &rdev->flags)) { 6054 printk(KERN_WARNING 6055 "md: can not hot-add faulty %s disk to %s!\n", 6056 bdevname(rdev->bdev,b), mdname(mddev)); 6057 err = -EINVAL; 6058 goto abort_export; 6059 } 6060 6061 if (mddev_is_clustered(mddev)) 6062 md_cluster_ops->metadata_update_start(mddev); 6063 clear_bit(In_sync, &rdev->flags); 6064 rdev->desc_nr = -1; 6065 rdev->saved_raid_disk = -1; 6066 err = bind_rdev_to_array(rdev, mddev); 6067 if (err) 6068 goto abort_clustered; 6069 6070 /* 6071 * The rest should better be atomic, we can have disk failures 6072 * noticed in interrupt contexts ... 6073 */ 6074 6075 rdev->raid_disk = -1; 6076 6077 md_update_sb(mddev, 1); 6078 6079 if (mddev_is_clustered(mddev)) 6080 md_cluster_ops->metadata_update_finish(mddev); 6081 /* 6082 * Kick recovery, maybe this spare has to be added to the 6083 * array immediately. 6084 */ 6085 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6086 md_wakeup_thread(mddev->thread); 6087 md_new_event(mddev); 6088 return 0; 6089 6090 abort_clustered: 6091 if (mddev_is_clustered(mddev)) 6092 md_cluster_ops->metadata_update_cancel(mddev); 6093 abort_export: 6094 export_rdev(rdev); 6095 return err; 6096 } 6097 6098 static int set_bitmap_file(struct mddev *mddev, int fd) 6099 { 6100 int err = 0; 6101 6102 if (mddev->pers) { 6103 if (!mddev->pers->quiesce || !mddev->thread) 6104 return -EBUSY; 6105 if (mddev->recovery || mddev->sync_thread) 6106 return -EBUSY; 6107 /* we should be able to change the bitmap.. */ 6108 } 6109 6110 if (fd >= 0) { 6111 struct inode *inode; 6112 struct file *f; 6113 6114 if (mddev->bitmap || mddev->bitmap_info.file) 6115 return -EEXIST; /* cannot add when bitmap is present */ 6116 f = fget(fd); 6117 6118 if (f == NULL) { 6119 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 6120 mdname(mddev)); 6121 return -EBADF; 6122 } 6123 6124 inode = f->f_mapping->host; 6125 if (!S_ISREG(inode->i_mode)) { 6126 printk(KERN_ERR "%s: error: bitmap file must be a regular file\n", 6127 mdname(mddev)); 6128 err = -EBADF; 6129 } else if (!(f->f_mode & FMODE_WRITE)) { 6130 printk(KERN_ERR "%s: error: bitmap file must open for write\n", 6131 mdname(mddev)); 6132 err = -EBADF; 6133 } else if (atomic_read(&inode->i_writecount) != 1) { 6134 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 6135 mdname(mddev)); 6136 err = -EBUSY; 6137 } 6138 if (err) { 6139 fput(f); 6140 return err; 6141 } 6142 mddev->bitmap_info.file = f; 6143 mddev->bitmap_info.offset = 0; /* file overrides offset */ 6144 } else if (mddev->bitmap == NULL) 6145 return -ENOENT; /* cannot remove what isn't there */ 6146 err = 0; 6147 if (mddev->pers) { 6148 mddev->pers->quiesce(mddev, 1); 6149 if (fd >= 0) { 6150 struct bitmap *bitmap; 6151 6152 bitmap = bitmap_create(mddev, -1); 6153 if (!IS_ERR(bitmap)) { 6154 mddev->bitmap = bitmap; 6155 err = bitmap_load(mddev); 6156 } else 6157 err = PTR_ERR(bitmap); 6158 } 6159 if (fd < 0 || err) { 6160 bitmap_destroy(mddev); 6161 fd = -1; /* make sure to put the file */ 6162 } 6163 mddev->pers->quiesce(mddev, 0); 6164 } 6165 if (fd < 0) { 6166 struct file *f = mddev->bitmap_info.file; 6167 if (f) { 6168 spin_lock(&mddev->lock); 6169 mddev->bitmap_info.file = NULL; 6170 spin_unlock(&mddev->lock); 6171 fput(f); 6172 } 6173 } 6174 6175 return err; 6176 } 6177 6178 /* 6179 * set_array_info is used two different ways 6180 * The original usage is when creating a new array. 6181 * In this usage, raid_disks is > 0 and it together with 6182 * level, size, not_persistent,layout,chunksize determine the 6183 * shape of the array. 6184 * This will always create an array with a type-0.90.0 superblock. 6185 * The newer usage is when assembling an array. 6186 * In this case raid_disks will be 0, and the major_version field is 6187 * use to determine which style super-blocks are to be found on the devices. 6188 * The minor and patch _version numbers are also kept incase the 6189 * super_block handler wishes to interpret them. 6190 */ 6191 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) 6192 { 6193 6194 if (info->raid_disks == 0) { 6195 /* just setting version number for superblock loading */ 6196 if (info->major_version < 0 || 6197 info->major_version >= ARRAY_SIZE(super_types) || 6198 super_types[info->major_version].name == NULL) { 6199 /* maybe try to auto-load a module? */ 6200 printk(KERN_INFO 6201 "md: superblock version %d not known\n", 6202 info->major_version); 6203 return -EINVAL; 6204 } 6205 mddev->major_version = info->major_version; 6206 mddev->minor_version = info->minor_version; 6207 mddev->patch_version = info->patch_version; 6208 mddev->persistent = !info->not_persistent; 6209 /* ensure mddev_put doesn't delete this now that there 6210 * is some minimal configuration. 6211 */ 6212 mddev->ctime = get_seconds(); 6213 return 0; 6214 } 6215 mddev->major_version = MD_MAJOR_VERSION; 6216 mddev->minor_version = MD_MINOR_VERSION; 6217 mddev->patch_version = MD_PATCHLEVEL_VERSION; 6218 mddev->ctime = get_seconds(); 6219 6220 mddev->level = info->level; 6221 mddev->clevel[0] = 0; 6222 mddev->dev_sectors = 2 * (sector_t)info->size; 6223 mddev->raid_disks = info->raid_disks; 6224 /* don't set md_minor, it is determined by which /dev/md* was 6225 * openned 6226 */ 6227 if (info->state & (1<<MD_SB_CLEAN)) 6228 mddev->recovery_cp = MaxSector; 6229 else 6230 mddev->recovery_cp = 0; 6231 mddev->persistent = ! info->not_persistent; 6232 mddev->external = 0; 6233 6234 mddev->layout = info->layout; 6235 mddev->chunk_sectors = info->chunk_size >> 9; 6236 6237 mddev->max_disks = MD_SB_DISKS; 6238 6239 if (mddev->persistent) 6240 mddev->flags = 0; 6241 set_bit(MD_CHANGE_DEVS, &mddev->flags); 6242 6243 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 6244 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 6245 mddev->bitmap_info.offset = 0; 6246 6247 mddev->reshape_position = MaxSector; 6248 6249 /* 6250 * Generate a 128 bit UUID 6251 */ 6252 get_random_bytes(mddev->uuid, 16); 6253 6254 mddev->new_level = mddev->level; 6255 mddev->new_chunk_sectors = mddev->chunk_sectors; 6256 mddev->new_layout = mddev->layout; 6257 mddev->delta_disks = 0; 6258 mddev->reshape_backwards = 0; 6259 6260 return 0; 6261 } 6262 6263 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) 6264 { 6265 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); 6266 6267 if (mddev->external_size) 6268 return; 6269 6270 mddev->array_sectors = array_sectors; 6271 } 6272 EXPORT_SYMBOL(md_set_array_sectors); 6273 6274 static int update_size(struct mddev *mddev, sector_t num_sectors) 6275 { 6276 struct md_rdev *rdev; 6277 int rv; 6278 int fit = (num_sectors == 0); 6279 6280 if (mddev->pers->resize == NULL) 6281 return -EINVAL; 6282 /* The "num_sectors" is the number of sectors of each device that 6283 * is used. This can only make sense for arrays with redundancy. 6284 * linear and raid0 always use whatever space is available. We can only 6285 * consider changing this number if no resync or reconstruction is 6286 * happening, and if the new size is acceptable. It must fit before the 6287 * sb_start or, if that is <data_offset, it must fit before the size 6288 * of each device. If num_sectors is zero, we find the largest size 6289 * that fits. 6290 */ 6291 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 6292 mddev->sync_thread) 6293 return -EBUSY; 6294 if (mddev->ro) 6295 return -EROFS; 6296 6297 rdev_for_each(rdev, mddev) { 6298 sector_t avail = rdev->sectors; 6299 6300 if (fit && (num_sectors == 0 || num_sectors > avail)) 6301 num_sectors = avail; 6302 if (avail < num_sectors) 6303 return -ENOSPC; 6304 } 6305 rv = mddev->pers->resize(mddev, num_sectors); 6306 if (!rv) 6307 revalidate_disk(mddev->gendisk); 6308 return rv; 6309 } 6310 6311 static int update_raid_disks(struct mddev *mddev, int raid_disks) 6312 { 6313 int rv; 6314 struct md_rdev *rdev; 6315 /* change the number of raid disks */ 6316 if (mddev->pers->check_reshape == NULL) 6317 return -EINVAL; 6318 if (mddev->ro) 6319 return -EROFS; 6320 if (raid_disks <= 0 || 6321 (mddev->max_disks && raid_disks >= mddev->max_disks)) 6322 return -EINVAL; 6323 if (mddev->sync_thread || 6324 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 6325 mddev->reshape_position != MaxSector) 6326 return -EBUSY; 6327 6328 rdev_for_each(rdev, mddev) { 6329 if (mddev->raid_disks < raid_disks && 6330 rdev->data_offset < rdev->new_data_offset) 6331 return -EINVAL; 6332 if (mddev->raid_disks > raid_disks && 6333 rdev->data_offset > rdev->new_data_offset) 6334 return -EINVAL; 6335 } 6336 6337 mddev->delta_disks = raid_disks - mddev->raid_disks; 6338 if (mddev->delta_disks < 0) 6339 mddev->reshape_backwards = 1; 6340 else if (mddev->delta_disks > 0) 6341 mddev->reshape_backwards = 0; 6342 6343 rv = mddev->pers->check_reshape(mddev); 6344 if (rv < 0) { 6345 mddev->delta_disks = 0; 6346 mddev->reshape_backwards = 0; 6347 } 6348 return rv; 6349 } 6350 6351 /* 6352 * update_array_info is used to change the configuration of an 6353 * on-line array. 6354 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 6355 * fields in the info are checked against the array. 6356 * Any differences that cannot be handled will cause an error. 6357 * Normally, only one change can be managed at a time. 6358 */ 6359 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) 6360 { 6361 int rv = 0; 6362 int cnt = 0; 6363 int state = 0; 6364 6365 /* calculate expected state,ignoring low bits */ 6366 if (mddev->bitmap && mddev->bitmap_info.offset) 6367 state |= (1 << MD_SB_BITMAP_PRESENT); 6368 6369 if (mddev->major_version != info->major_version || 6370 mddev->minor_version != info->minor_version || 6371 /* mddev->patch_version != info->patch_version || */ 6372 mddev->ctime != info->ctime || 6373 mddev->level != info->level || 6374 /* mddev->layout != info->layout || */ 6375 !mddev->persistent != info->not_persistent|| 6376 mddev->chunk_sectors != info->chunk_size >> 9 || 6377 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 6378 ((state^info->state) & 0xfffffe00) 6379 ) 6380 return -EINVAL; 6381 /* Check there is only one change */ 6382 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 6383 cnt++; 6384 if (mddev->raid_disks != info->raid_disks) 6385 cnt++; 6386 if (mddev->layout != info->layout) 6387 cnt++; 6388 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 6389 cnt++; 6390 if (cnt == 0) 6391 return 0; 6392 if (cnt > 1) 6393 return -EINVAL; 6394 6395 if (mddev->layout != info->layout) { 6396 /* Change layout 6397 * we don't need to do anything at the md level, the 6398 * personality will take care of it all. 6399 */ 6400 if (mddev->pers->check_reshape == NULL) 6401 return -EINVAL; 6402 else { 6403 mddev->new_layout = info->layout; 6404 rv = mddev->pers->check_reshape(mddev); 6405 if (rv) 6406 mddev->new_layout = mddev->layout; 6407 return rv; 6408 } 6409 } 6410 if (mddev_is_clustered(mddev)) 6411 md_cluster_ops->metadata_update_start(mddev); 6412 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 6413 rv = update_size(mddev, (sector_t)info->size * 2); 6414 6415 if (mddev->raid_disks != info->raid_disks) 6416 rv = update_raid_disks(mddev, info->raid_disks); 6417 6418 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 6419 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { 6420 rv = -EINVAL; 6421 goto err; 6422 } 6423 if (mddev->recovery || mddev->sync_thread) { 6424 rv = -EBUSY; 6425 goto err; 6426 } 6427 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 6428 struct bitmap *bitmap; 6429 /* add the bitmap */ 6430 if (mddev->bitmap) { 6431 rv = -EEXIST; 6432 goto err; 6433 } 6434 if (mddev->bitmap_info.default_offset == 0) { 6435 rv = -EINVAL; 6436 goto err; 6437 } 6438 mddev->bitmap_info.offset = 6439 mddev->bitmap_info.default_offset; 6440 mddev->bitmap_info.space = 6441 mddev->bitmap_info.default_space; 6442 mddev->pers->quiesce(mddev, 1); 6443 bitmap = bitmap_create(mddev, -1); 6444 if (!IS_ERR(bitmap)) { 6445 mddev->bitmap = bitmap; 6446 rv = bitmap_load(mddev); 6447 } else 6448 rv = PTR_ERR(bitmap); 6449 if (rv) 6450 bitmap_destroy(mddev); 6451 mddev->pers->quiesce(mddev, 0); 6452 } else { 6453 /* remove the bitmap */ 6454 if (!mddev->bitmap) { 6455 rv = -ENOENT; 6456 goto err; 6457 } 6458 if (mddev->bitmap->storage.file) { 6459 rv = -EINVAL; 6460 goto err; 6461 } 6462 mddev->pers->quiesce(mddev, 1); 6463 bitmap_destroy(mddev); 6464 mddev->pers->quiesce(mddev, 0); 6465 mddev->bitmap_info.offset = 0; 6466 } 6467 } 6468 md_update_sb(mddev, 1); 6469 if (mddev_is_clustered(mddev)) 6470 md_cluster_ops->metadata_update_finish(mddev); 6471 return rv; 6472 err: 6473 if (mddev_is_clustered(mddev)) 6474 md_cluster_ops->metadata_update_cancel(mddev); 6475 return rv; 6476 } 6477 6478 static int set_disk_faulty(struct mddev *mddev, dev_t dev) 6479 { 6480 struct md_rdev *rdev; 6481 int err = 0; 6482 6483 if (mddev->pers == NULL) 6484 return -ENODEV; 6485 6486 rcu_read_lock(); 6487 rdev = find_rdev_rcu(mddev, dev); 6488 if (!rdev) 6489 err = -ENODEV; 6490 else { 6491 md_error(mddev, rdev); 6492 if (!test_bit(Faulty, &rdev->flags)) 6493 err = -EBUSY; 6494 } 6495 rcu_read_unlock(); 6496 return err; 6497 } 6498 6499 /* 6500 * We have a problem here : there is no easy way to give a CHS 6501 * virtual geometry. We currently pretend that we have a 2 heads 6502 * 4 sectors (with a BIG number of cylinders...). This drives 6503 * dosfs just mad... ;-) 6504 */ 6505 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 6506 { 6507 struct mddev *mddev = bdev->bd_disk->private_data; 6508 6509 geo->heads = 2; 6510 geo->sectors = 4; 6511 geo->cylinders = mddev->array_sectors / 8; 6512 return 0; 6513 } 6514 6515 static inline bool md_ioctl_valid(unsigned int cmd) 6516 { 6517 switch (cmd) { 6518 case ADD_NEW_DISK: 6519 case BLKROSET: 6520 case GET_ARRAY_INFO: 6521 case GET_BITMAP_FILE: 6522 case GET_DISK_INFO: 6523 case HOT_ADD_DISK: 6524 case HOT_REMOVE_DISK: 6525 case RAID_AUTORUN: 6526 case RAID_VERSION: 6527 case RESTART_ARRAY_RW: 6528 case RUN_ARRAY: 6529 case SET_ARRAY_INFO: 6530 case SET_BITMAP_FILE: 6531 case SET_DISK_FAULTY: 6532 case STOP_ARRAY: 6533 case STOP_ARRAY_RO: 6534 case CLUSTERED_DISK_NACK: 6535 return true; 6536 default: 6537 return false; 6538 } 6539 } 6540 6541 static int md_ioctl(struct block_device *bdev, fmode_t mode, 6542 unsigned int cmd, unsigned long arg) 6543 { 6544 int err = 0; 6545 void __user *argp = (void __user *)arg; 6546 struct mddev *mddev = NULL; 6547 int ro; 6548 6549 if (!md_ioctl_valid(cmd)) 6550 return -ENOTTY; 6551 6552 switch (cmd) { 6553 case RAID_VERSION: 6554 case GET_ARRAY_INFO: 6555 case GET_DISK_INFO: 6556 break; 6557 default: 6558 if (!capable(CAP_SYS_ADMIN)) 6559 return -EACCES; 6560 } 6561 6562 /* 6563 * Commands dealing with the RAID driver but not any 6564 * particular array: 6565 */ 6566 switch (cmd) { 6567 case RAID_VERSION: 6568 err = get_version(argp); 6569 goto out; 6570 6571 #ifndef MODULE 6572 case RAID_AUTORUN: 6573 err = 0; 6574 autostart_arrays(arg); 6575 goto out; 6576 #endif 6577 default:; 6578 } 6579 6580 /* 6581 * Commands creating/starting a new array: 6582 */ 6583 6584 mddev = bdev->bd_disk->private_data; 6585 6586 if (!mddev) { 6587 BUG(); 6588 goto out; 6589 } 6590 6591 /* Some actions do not requires the mutex */ 6592 switch (cmd) { 6593 case GET_ARRAY_INFO: 6594 if (!mddev->raid_disks && !mddev->external) 6595 err = -ENODEV; 6596 else 6597 err = get_array_info(mddev, argp); 6598 goto out; 6599 6600 case GET_DISK_INFO: 6601 if (!mddev->raid_disks && !mddev->external) 6602 err = -ENODEV; 6603 else 6604 err = get_disk_info(mddev, argp); 6605 goto out; 6606 6607 case SET_DISK_FAULTY: 6608 err = set_disk_faulty(mddev, new_decode_dev(arg)); 6609 goto out; 6610 6611 case GET_BITMAP_FILE: 6612 err = get_bitmap_file(mddev, argp); 6613 goto out; 6614 6615 } 6616 6617 if (cmd == ADD_NEW_DISK) 6618 /* need to ensure md_delayed_delete() has completed */ 6619 flush_workqueue(md_misc_wq); 6620 6621 if (cmd == HOT_REMOVE_DISK) 6622 /* need to ensure recovery thread has run */ 6623 wait_event_interruptible_timeout(mddev->sb_wait, 6624 !test_bit(MD_RECOVERY_NEEDED, 6625 &mddev->flags), 6626 msecs_to_jiffies(5000)); 6627 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { 6628 /* Need to flush page cache, and ensure no-one else opens 6629 * and writes 6630 */ 6631 mutex_lock(&mddev->open_mutex); 6632 if (mddev->pers && atomic_read(&mddev->openers) > 1) { 6633 mutex_unlock(&mddev->open_mutex); 6634 err = -EBUSY; 6635 goto out; 6636 } 6637 set_bit(MD_STILL_CLOSED, &mddev->flags); 6638 mutex_unlock(&mddev->open_mutex); 6639 sync_blockdev(bdev); 6640 } 6641 err = mddev_lock(mddev); 6642 if (err) { 6643 printk(KERN_INFO 6644 "md: ioctl lock interrupted, reason %d, cmd %d\n", 6645 err, cmd); 6646 goto out; 6647 } 6648 6649 if (cmd == SET_ARRAY_INFO) { 6650 mdu_array_info_t info; 6651 if (!arg) 6652 memset(&info, 0, sizeof(info)); 6653 else if (copy_from_user(&info, argp, sizeof(info))) { 6654 err = -EFAULT; 6655 goto unlock; 6656 } 6657 if (mddev->pers) { 6658 err = update_array_info(mddev, &info); 6659 if (err) { 6660 printk(KERN_WARNING "md: couldn't update" 6661 " array info. %d\n", err); 6662 goto unlock; 6663 } 6664 goto unlock; 6665 } 6666 if (!list_empty(&mddev->disks)) { 6667 printk(KERN_WARNING 6668 "md: array %s already has disks!\n", 6669 mdname(mddev)); 6670 err = -EBUSY; 6671 goto unlock; 6672 } 6673 if (mddev->raid_disks) { 6674 printk(KERN_WARNING 6675 "md: array %s already initialised!\n", 6676 mdname(mddev)); 6677 err = -EBUSY; 6678 goto unlock; 6679 } 6680 err = set_array_info(mddev, &info); 6681 if (err) { 6682 printk(KERN_WARNING "md: couldn't set" 6683 " array info. %d\n", err); 6684 goto unlock; 6685 } 6686 goto unlock; 6687 } 6688 6689 /* 6690 * Commands querying/configuring an existing array: 6691 */ 6692 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 6693 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 6694 if ((!mddev->raid_disks && !mddev->external) 6695 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 6696 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 6697 && cmd != GET_BITMAP_FILE) { 6698 err = -ENODEV; 6699 goto unlock; 6700 } 6701 6702 /* 6703 * Commands even a read-only array can execute: 6704 */ 6705 switch (cmd) { 6706 case RESTART_ARRAY_RW: 6707 err = restart_array(mddev); 6708 goto unlock; 6709 6710 case STOP_ARRAY: 6711 err = do_md_stop(mddev, 0, bdev); 6712 goto unlock; 6713 6714 case STOP_ARRAY_RO: 6715 err = md_set_readonly(mddev, bdev); 6716 goto unlock; 6717 6718 case HOT_REMOVE_DISK: 6719 err = hot_remove_disk(mddev, new_decode_dev(arg)); 6720 goto unlock; 6721 6722 case ADD_NEW_DISK: 6723 /* We can support ADD_NEW_DISK on read-only arrays 6724 * on if we are re-adding a preexisting device. 6725 * So require mddev->pers and MD_DISK_SYNC. 6726 */ 6727 if (mddev->pers) { 6728 mdu_disk_info_t info; 6729 if (copy_from_user(&info, argp, sizeof(info))) 6730 err = -EFAULT; 6731 else if (!(info.state & (1<<MD_DISK_SYNC))) 6732 /* Need to clear read-only for this */ 6733 break; 6734 else 6735 err = add_new_disk(mddev, &info); 6736 goto unlock; 6737 } 6738 break; 6739 6740 case BLKROSET: 6741 if (get_user(ro, (int __user *)(arg))) { 6742 err = -EFAULT; 6743 goto unlock; 6744 } 6745 err = -EINVAL; 6746 6747 /* if the bdev is going readonly the value of mddev->ro 6748 * does not matter, no writes are coming 6749 */ 6750 if (ro) 6751 goto unlock; 6752 6753 /* are we are already prepared for writes? */ 6754 if (mddev->ro != 1) 6755 goto unlock; 6756 6757 /* transitioning to readauto need only happen for 6758 * arrays that call md_write_start 6759 */ 6760 if (mddev->pers) { 6761 err = restart_array(mddev); 6762 if (err == 0) { 6763 mddev->ro = 2; 6764 set_disk_ro(mddev->gendisk, 0); 6765 } 6766 } 6767 goto unlock; 6768 } 6769 6770 /* 6771 * The remaining ioctls are changing the state of the 6772 * superblock, so we do not allow them on read-only arrays. 6773 */ 6774 if (mddev->ro && mddev->pers) { 6775 if (mddev->ro == 2) { 6776 mddev->ro = 0; 6777 sysfs_notify_dirent_safe(mddev->sysfs_state); 6778 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6779 /* mddev_unlock will wake thread */ 6780 /* If a device failed while we were read-only, we 6781 * need to make sure the metadata is updated now. 6782 */ 6783 if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) { 6784 mddev_unlock(mddev); 6785 wait_event(mddev->sb_wait, 6786 !test_bit(MD_CHANGE_DEVS, &mddev->flags) && 6787 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 6788 mddev_lock_nointr(mddev); 6789 } 6790 } else { 6791 err = -EROFS; 6792 goto unlock; 6793 } 6794 } 6795 6796 switch (cmd) { 6797 case ADD_NEW_DISK: 6798 { 6799 mdu_disk_info_t info; 6800 if (copy_from_user(&info, argp, sizeof(info))) 6801 err = -EFAULT; 6802 else 6803 err = add_new_disk(mddev, &info); 6804 goto unlock; 6805 } 6806 6807 case CLUSTERED_DISK_NACK: 6808 if (mddev_is_clustered(mddev)) 6809 md_cluster_ops->new_disk_ack(mddev, false); 6810 else 6811 err = -EINVAL; 6812 goto unlock; 6813 6814 case HOT_ADD_DISK: 6815 err = hot_add_disk(mddev, new_decode_dev(arg)); 6816 goto unlock; 6817 6818 case RUN_ARRAY: 6819 err = do_md_run(mddev); 6820 goto unlock; 6821 6822 case SET_BITMAP_FILE: 6823 err = set_bitmap_file(mddev, (int)arg); 6824 goto unlock; 6825 6826 default: 6827 err = -EINVAL; 6828 goto unlock; 6829 } 6830 6831 unlock: 6832 if (mddev->hold_active == UNTIL_IOCTL && 6833 err != -EINVAL) 6834 mddev->hold_active = 0; 6835 mddev_unlock(mddev); 6836 out: 6837 return err; 6838 } 6839 #ifdef CONFIG_COMPAT 6840 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, 6841 unsigned int cmd, unsigned long arg) 6842 { 6843 switch (cmd) { 6844 case HOT_REMOVE_DISK: 6845 case HOT_ADD_DISK: 6846 case SET_DISK_FAULTY: 6847 case SET_BITMAP_FILE: 6848 /* These take in integer arg, do not convert */ 6849 break; 6850 default: 6851 arg = (unsigned long)compat_ptr(arg); 6852 break; 6853 } 6854 6855 return md_ioctl(bdev, mode, cmd, arg); 6856 } 6857 #endif /* CONFIG_COMPAT */ 6858 6859 static int md_open(struct block_device *bdev, fmode_t mode) 6860 { 6861 /* 6862 * Succeed if we can lock the mddev, which confirms that 6863 * it isn't being stopped right now. 6864 */ 6865 struct mddev *mddev = mddev_find(bdev->bd_dev); 6866 int err; 6867 6868 if (!mddev) 6869 return -ENODEV; 6870 6871 if (mddev->gendisk != bdev->bd_disk) { 6872 /* we are racing with mddev_put which is discarding this 6873 * bd_disk. 6874 */ 6875 mddev_put(mddev); 6876 /* Wait until bdev->bd_disk is definitely gone */ 6877 flush_workqueue(md_misc_wq); 6878 /* Then retry the open from the top */ 6879 return -ERESTARTSYS; 6880 } 6881 BUG_ON(mddev != bdev->bd_disk->private_data); 6882 6883 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) 6884 goto out; 6885 6886 err = 0; 6887 atomic_inc(&mddev->openers); 6888 clear_bit(MD_STILL_CLOSED, &mddev->flags); 6889 mutex_unlock(&mddev->open_mutex); 6890 6891 check_disk_change(bdev); 6892 out: 6893 return err; 6894 } 6895 6896 static void md_release(struct gendisk *disk, fmode_t mode) 6897 { 6898 struct mddev *mddev = disk->private_data; 6899 6900 BUG_ON(!mddev); 6901 atomic_dec(&mddev->openers); 6902 mddev_put(mddev); 6903 } 6904 6905 static int md_media_changed(struct gendisk *disk) 6906 { 6907 struct mddev *mddev = disk->private_data; 6908 6909 return mddev->changed; 6910 } 6911 6912 static int md_revalidate(struct gendisk *disk) 6913 { 6914 struct mddev *mddev = disk->private_data; 6915 6916 mddev->changed = 0; 6917 return 0; 6918 } 6919 static const struct block_device_operations md_fops = 6920 { 6921 .owner = THIS_MODULE, 6922 .open = md_open, 6923 .release = md_release, 6924 .ioctl = md_ioctl, 6925 #ifdef CONFIG_COMPAT 6926 .compat_ioctl = md_compat_ioctl, 6927 #endif 6928 .getgeo = md_getgeo, 6929 .media_changed = md_media_changed, 6930 .revalidate_disk= md_revalidate, 6931 }; 6932 6933 static int md_thread(void *arg) 6934 { 6935 struct md_thread *thread = arg; 6936 6937 /* 6938 * md_thread is a 'system-thread', it's priority should be very 6939 * high. We avoid resource deadlocks individually in each 6940 * raid personality. (RAID5 does preallocation) We also use RR and 6941 * the very same RT priority as kswapd, thus we will never get 6942 * into a priority inversion deadlock. 6943 * 6944 * we definitely have to have equal or higher priority than 6945 * bdflush, otherwise bdflush will deadlock if there are too 6946 * many dirty RAID5 blocks. 6947 */ 6948 6949 allow_signal(SIGKILL); 6950 while (!kthread_should_stop()) { 6951 6952 /* We need to wait INTERRUPTIBLE so that 6953 * we don't add to the load-average. 6954 * That means we need to be sure no signals are 6955 * pending 6956 */ 6957 if (signal_pending(current)) 6958 flush_signals(current); 6959 6960 wait_event_interruptible_timeout 6961 (thread->wqueue, 6962 test_bit(THREAD_WAKEUP, &thread->flags) 6963 || kthread_should_stop(), 6964 thread->timeout); 6965 6966 clear_bit(THREAD_WAKEUP, &thread->flags); 6967 if (!kthread_should_stop()) 6968 thread->run(thread); 6969 } 6970 6971 return 0; 6972 } 6973 6974 void md_wakeup_thread(struct md_thread *thread) 6975 { 6976 if (thread) { 6977 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); 6978 set_bit(THREAD_WAKEUP, &thread->flags); 6979 wake_up(&thread->wqueue); 6980 } 6981 } 6982 EXPORT_SYMBOL(md_wakeup_thread); 6983 6984 struct md_thread *md_register_thread(void (*run) (struct md_thread *), 6985 struct mddev *mddev, const char *name) 6986 { 6987 struct md_thread *thread; 6988 6989 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL); 6990 if (!thread) 6991 return NULL; 6992 6993 init_waitqueue_head(&thread->wqueue); 6994 6995 thread->run = run; 6996 thread->mddev = mddev; 6997 thread->timeout = MAX_SCHEDULE_TIMEOUT; 6998 thread->tsk = kthread_run(md_thread, thread, 6999 "%s_%s", 7000 mdname(thread->mddev), 7001 name); 7002 if (IS_ERR(thread->tsk)) { 7003 kfree(thread); 7004 return NULL; 7005 } 7006 return thread; 7007 } 7008 EXPORT_SYMBOL(md_register_thread); 7009 7010 void md_unregister_thread(struct md_thread **threadp) 7011 { 7012 struct md_thread *thread = *threadp; 7013 if (!thread) 7014 return; 7015 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 7016 /* Locking ensures that mddev_unlock does not wake_up a 7017 * non-existent thread 7018 */ 7019 spin_lock(&pers_lock); 7020 *threadp = NULL; 7021 spin_unlock(&pers_lock); 7022 7023 kthread_stop(thread->tsk); 7024 kfree(thread); 7025 } 7026 EXPORT_SYMBOL(md_unregister_thread); 7027 7028 void md_error(struct mddev *mddev, struct md_rdev *rdev) 7029 { 7030 if (!rdev || test_bit(Faulty, &rdev->flags)) 7031 return; 7032 7033 if (!mddev->pers || !mddev->pers->error_handler) 7034 return; 7035 mddev->pers->error_handler(mddev,rdev); 7036 if (mddev->degraded) 7037 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7038 sysfs_notify_dirent_safe(rdev->sysfs_state); 7039 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7040 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7041 md_wakeup_thread(mddev->thread); 7042 if (mddev->event_work.func) 7043 queue_work(md_misc_wq, &mddev->event_work); 7044 md_new_event_inintr(mddev); 7045 } 7046 EXPORT_SYMBOL(md_error); 7047 7048 /* seq_file implementation /proc/mdstat */ 7049 7050 static void status_unused(struct seq_file *seq) 7051 { 7052 int i = 0; 7053 struct md_rdev *rdev; 7054 7055 seq_printf(seq, "unused devices: "); 7056 7057 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 7058 char b[BDEVNAME_SIZE]; 7059 i++; 7060 seq_printf(seq, "%s ", 7061 bdevname(rdev->bdev,b)); 7062 } 7063 if (!i) 7064 seq_printf(seq, "<none>"); 7065 7066 seq_printf(seq, "\n"); 7067 } 7068 7069 static void status_resync(struct seq_file *seq, struct mddev *mddev) 7070 { 7071 sector_t max_sectors, resync, res; 7072 unsigned long dt, db; 7073 sector_t rt; 7074 int scale; 7075 unsigned int per_milli; 7076 7077 if (mddev->curr_resync <= 3) 7078 resync = 0; 7079 else 7080 resync = mddev->curr_resync 7081 - atomic_read(&mddev->recovery_active); 7082 7083 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 7084 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7085 max_sectors = mddev->resync_max_sectors; 7086 else 7087 max_sectors = mddev->dev_sectors; 7088 7089 WARN_ON(max_sectors == 0); 7090 /* Pick 'scale' such that (resync>>scale)*1000 will fit 7091 * in a sector_t, and (max_sectors>>scale) will fit in a 7092 * u32, as those are the requirements for sector_div. 7093 * Thus 'scale' must be at least 10 7094 */ 7095 scale = 10; 7096 if (sizeof(sector_t) > sizeof(unsigned long)) { 7097 while ( max_sectors/2 > (1ULL<<(scale+32))) 7098 scale++; 7099 } 7100 res = (resync>>scale)*1000; 7101 sector_div(res, (u32)((max_sectors>>scale)+1)); 7102 7103 per_milli = res; 7104 { 7105 int i, x = per_milli/50, y = 20-x; 7106 seq_printf(seq, "["); 7107 for (i = 0; i < x; i++) 7108 seq_printf(seq, "="); 7109 seq_printf(seq, ">"); 7110 for (i = 0; i < y; i++) 7111 seq_printf(seq, "."); 7112 seq_printf(seq, "] "); 7113 } 7114 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 7115 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 7116 "reshape" : 7117 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 7118 "check" : 7119 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 7120 "resync" : "recovery"))), 7121 per_milli/10, per_milli % 10, 7122 (unsigned long long) resync/2, 7123 (unsigned long long) max_sectors/2); 7124 7125 /* 7126 * dt: time from mark until now 7127 * db: blocks written from mark until now 7128 * rt: remaining time 7129 * 7130 * rt is a sector_t, so could be 32bit or 64bit. 7131 * So we divide before multiply in case it is 32bit and close 7132 * to the limit. 7133 * We scale the divisor (db) by 32 to avoid losing precision 7134 * near the end of resync when the number of remaining sectors 7135 * is close to 'db'. 7136 * We then divide rt by 32 after multiplying by db to compensate. 7137 * The '+1' avoids division by zero if db is very small. 7138 */ 7139 dt = ((jiffies - mddev->resync_mark) / HZ); 7140 if (!dt) dt++; 7141 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) 7142 - mddev->resync_mark_cnt; 7143 7144 rt = max_sectors - resync; /* number of remaining sectors */ 7145 sector_div(rt, db/32+1); 7146 rt *= dt; 7147 rt >>= 5; 7148 7149 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 7150 ((unsigned long)rt % 60)/6); 7151 7152 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 7153 } 7154 7155 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 7156 { 7157 struct list_head *tmp; 7158 loff_t l = *pos; 7159 struct mddev *mddev; 7160 7161 if (l >= 0x10000) 7162 return NULL; 7163 if (!l--) 7164 /* header */ 7165 return (void*)1; 7166 7167 spin_lock(&all_mddevs_lock); 7168 list_for_each(tmp,&all_mddevs) 7169 if (!l--) { 7170 mddev = list_entry(tmp, struct mddev, all_mddevs); 7171 mddev_get(mddev); 7172 spin_unlock(&all_mddevs_lock); 7173 return mddev; 7174 } 7175 spin_unlock(&all_mddevs_lock); 7176 if (!l--) 7177 return (void*)2;/* tail */ 7178 return NULL; 7179 } 7180 7181 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 7182 { 7183 struct list_head *tmp; 7184 struct mddev *next_mddev, *mddev = v; 7185 7186 ++*pos; 7187 if (v == (void*)2) 7188 return NULL; 7189 7190 spin_lock(&all_mddevs_lock); 7191 if (v == (void*)1) 7192 tmp = all_mddevs.next; 7193 else 7194 tmp = mddev->all_mddevs.next; 7195 if (tmp != &all_mddevs) 7196 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs)); 7197 else { 7198 next_mddev = (void*)2; 7199 *pos = 0x10000; 7200 } 7201 spin_unlock(&all_mddevs_lock); 7202 7203 if (v != (void*)1) 7204 mddev_put(mddev); 7205 return next_mddev; 7206 7207 } 7208 7209 static void md_seq_stop(struct seq_file *seq, void *v) 7210 { 7211 struct mddev *mddev = v; 7212 7213 if (mddev && v != (void*)1 && v != (void*)2) 7214 mddev_put(mddev); 7215 } 7216 7217 static int md_seq_show(struct seq_file *seq, void *v) 7218 { 7219 struct mddev *mddev = v; 7220 sector_t sectors; 7221 struct md_rdev *rdev; 7222 7223 if (v == (void*)1) { 7224 struct md_personality *pers; 7225 seq_printf(seq, "Personalities : "); 7226 spin_lock(&pers_lock); 7227 list_for_each_entry(pers, &pers_list, list) 7228 seq_printf(seq, "[%s] ", pers->name); 7229 7230 spin_unlock(&pers_lock); 7231 seq_printf(seq, "\n"); 7232 seq->poll_event = atomic_read(&md_event_count); 7233 return 0; 7234 } 7235 if (v == (void*)2) { 7236 status_unused(seq); 7237 return 0; 7238 } 7239 7240 spin_lock(&mddev->lock); 7241 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 7242 seq_printf(seq, "%s : %sactive", mdname(mddev), 7243 mddev->pers ? "" : "in"); 7244 if (mddev->pers) { 7245 if (mddev->ro==1) 7246 seq_printf(seq, " (read-only)"); 7247 if (mddev->ro==2) 7248 seq_printf(seq, " (auto-read-only)"); 7249 seq_printf(seq, " %s", mddev->pers->name); 7250 } 7251 7252 sectors = 0; 7253 rcu_read_lock(); 7254 rdev_for_each_rcu(rdev, mddev) { 7255 char b[BDEVNAME_SIZE]; 7256 seq_printf(seq, " %s[%d]", 7257 bdevname(rdev->bdev,b), rdev->desc_nr); 7258 if (test_bit(WriteMostly, &rdev->flags)) 7259 seq_printf(seq, "(W)"); 7260 if (test_bit(Faulty, &rdev->flags)) { 7261 seq_printf(seq, "(F)"); 7262 continue; 7263 } 7264 if (rdev->raid_disk < 0) 7265 seq_printf(seq, "(S)"); /* spare */ 7266 if (test_bit(Replacement, &rdev->flags)) 7267 seq_printf(seq, "(R)"); 7268 sectors += rdev->sectors; 7269 } 7270 rcu_read_unlock(); 7271 7272 if (!list_empty(&mddev->disks)) { 7273 if (mddev->pers) 7274 seq_printf(seq, "\n %llu blocks", 7275 (unsigned long long) 7276 mddev->array_sectors / 2); 7277 else 7278 seq_printf(seq, "\n %llu blocks", 7279 (unsigned long long)sectors / 2); 7280 } 7281 if (mddev->persistent) { 7282 if (mddev->major_version != 0 || 7283 mddev->minor_version != 90) { 7284 seq_printf(seq," super %d.%d", 7285 mddev->major_version, 7286 mddev->minor_version); 7287 } 7288 } else if (mddev->external) 7289 seq_printf(seq, " super external:%s", 7290 mddev->metadata_type); 7291 else 7292 seq_printf(seq, " super non-persistent"); 7293 7294 if (mddev->pers) { 7295 mddev->pers->status(seq, mddev); 7296 seq_printf(seq, "\n "); 7297 if (mddev->pers->sync_request) { 7298 if (mddev->curr_resync > 2) { 7299 status_resync(seq, mddev); 7300 seq_printf(seq, "\n "); 7301 } else if (mddev->curr_resync >= 1) 7302 seq_printf(seq, "\tresync=DELAYED\n "); 7303 else if (mddev->recovery_cp < MaxSector) 7304 seq_printf(seq, "\tresync=PENDING\n "); 7305 } 7306 } else 7307 seq_printf(seq, "\n "); 7308 7309 bitmap_status(seq, mddev->bitmap); 7310 7311 seq_printf(seq, "\n"); 7312 } 7313 spin_unlock(&mddev->lock); 7314 7315 return 0; 7316 } 7317 7318 static const struct seq_operations md_seq_ops = { 7319 .start = md_seq_start, 7320 .next = md_seq_next, 7321 .stop = md_seq_stop, 7322 .show = md_seq_show, 7323 }; 7324 7325 static int md_seq_open(struct inode *inode, struct file *file) 7326 { 7327 struct seq_file *seq; 7328 int error; 7329 7330 error = seq_open(file, &md_seq_ops); 7331 if (error) 7332 return error; 7333 7334 seq = file->private_data; 7335 seq->poll_event = atomic_read(&md_event_count); 7336 return error; 7337 } 7338 7339 static int md_unloading; 7340 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 7341 { 7342 struct seq_file *seq = filp->private_data; 7343 int mask; 7344 7345 if (md_unloading) 7346 return POLLIN|POLLRDNORM|POLLERR|POLLPRI; 7347 poll_wait(filp, &md_event_waiters, wait); 7348 7349 /* always allow read */ 7350 mask = POLLIN | POLLRDNORM; 7351 7352 if (seq->poll_event != atomic_read(&md_event_count)) 7353 mask |= POLLERR | POLLPRI; 7354 return mask; 7355 } 7356 7357 static const struct file_operations md_seq_fops = { 7358 .owner = THIS_MODULE, 7359 .open = md_seq_open, 7360 .read = seq_read, 7361 .llseek = seq_lseek, 7362 .release = seq_release_private, 7363 .poll = mdstat_poll, 7364 }; 7365 7366 int register_md_personality(struct md_personality *p) 7367 { 7368 printk(KERN_INFO "md: %s personality registered for level %d\n", 7369 p->name, p->level); 7370 spin_lock(&pers_lock); 7371 list_add_tail(&p->list, &pers_list); 7372 spin_unlock(&pers_lock); 7373 return 0; 7374 } 7375 EXPORT_SYMBOL(register_md_personality); 7376 7377 int unregister_md_personality(struct md_personality *p) 7378 { 7379 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 7380 spin_lock(&pers_lock); 7381 list_del_init(&p->list); 7382 spin_unlock(&pers_lock); 7383 return 0; 7384 } 7385 EXPORT_SYMBOL(unregister_md_personality); 7386 7387 int register_md_cluster_operations(struct md_cluster_operations *ops, struct module *module) 7388 { 7389 if (md_cluster_ops != NULL) 7390 return -EALREADY; 7391 spin_lock(&pers_lock); 7392 md_cluster_ops = ops; 7393 md_cluster_mod = module; 7394 spin_unlock(&pers_lock); 7395 return 0; 7396 } 7397 EXPORT_SYMBOL(register_md_cluster_operations); 7398 7399 int unregister_md_cluster_operations(void) 7400 { 7401 spin_lock(&pers_lock); 7402 md_cluster_ops = NULL; 7403 spin_unlock(&pers_lock); 7404 return 0; 7405 } 7406 EXPORT_SYMBOL(unregister_md_cluster_operations); 7407 7408 int md_setup_cluster(struct mddev *mddev, int nodes) 7409 { 7410 int err; 7411 7412 err = request_module("md-cluster"); 7413 if (err) { 7414 pr_err("md-cluster module not found.\n"); 7415 return err; 7416 } 7417 7418 spin_lock(&pers_lock); 7419 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) { 7420 spin_unlock(&pers_lock); 7421 return -ENOENT; 7422 } 7423 spin_unlock(&pers_lock); 7424 7425 return md_cluster_ops->join(mddev, nodes); 7426 } 7427 7428 void md_cluster_stop(struct mddev *mddev) 7429 { 7430 if (!md_cluster_ops) 7431 return; 7432 md_cluster_ops->leave(mddev); 7433 module_put(md_cluster_mod); 7434 } 7435 7436 static int is_mddev_idle(struct mddev *mddev, int init) 7437 { 7438 struct md_rdev *rdev; 7439 int idle; 7440 int curr_events; 7441 7442 idle = 1; 7443 rcu_read_lock(); 7444 rdev_for_each_rcu(rdev, mddev) { 7445 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 7446 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + 7447 (int)part_stat_read(&disk->part0, sectors[1]) - 7448 atomic_read(&disk->sync_io); 7449 /* sync IO will cause sync_io to increase before the disk_stats 7450 * as sync_io is counted when a request starts, and 7451 * disk_stats is counted when it completes. 7452 * So resync activity will cause curr_events to be smaller than 7453 * when there was no such activity. 7454 * non-sync IO will cause disk_stat to increase without 7455 * increasing sync_io so curr_events will (eventually) 7456 * be larger than it was before. Once it becomes 7457 * substantially larger, the test below will cause 7458 * the array to appear non-idle, and resync will slow 7459 * down. 7460 * If there is a lot of outstanding resync activity when 7461 * we set last_event to curr_events, then all that activity 7462 * completing might cause the array to appear non-idle 7463 * and resync will be slowed down even though there might 7464 * not have been non-resync activity. This will only 7465 * happen once though. 'last_events' will soon reflect 7466 * the state where there is little or no outstanding 7467 * resync requests, and further resync activity will 7468 * always make curr_events less than last_events. 7469 * 7470 */ 7471 if (init || curr_events - rdev->last_events > 64) { 7472 rdev->last_events = curr_events; 7473 idle = 0; 7474 } 7475 } 7476 rcu_read_unlock(); 7477 return idle; 7478 } 7479 7480 void md_done_sync(struct mddev *mddev, int blocks, int ok) 7481 { 7482 /* another "blocks" (512byte) blocks have been synced */ 7483 atomic_sub(blocks, &mddev->recovery_active); 7484 wake_up(&mddev->recovery_wait); 7485 if (!ok) { 7486 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7487 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); 7488 md_wakeup_thread(mddev->thread); 7489 // stop recovery, signal do_sync .... 7490 } 7491 } 7492 EXPORT_SYMBOL(md_done_sync); 7493 7494 /* md_write_start(mddev, bi) 7495 * If we need to update some array metadata (e.g. 'active' flag 7496 * in superblock) before writing, schedule a superblock update 7497 * and wait for it to complete. 7498 */ 7499 void md_write_start(struct mddev *mddev, struct bio *bi) 7500 { 7501 int did_change = 0; 7502 if (bio_data_dir(bi) != WRITE) 7503 return; 7504 7505 BUG_ON(mddev->ro == 1); 7506 if (mddev->ro == 2) { 7507 /* need to switch to read/write */ 7508 mddev->ro = 0; 7509 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7510 md_wakeup_thread(mddev->thread); 7511 md_wakeup_thread(mddev->sync_thread); 7512 did_change = 1; 7513 } 7514 atomic_inc(&mddev->writes_pending); 7515 if (mddev->safemode == 1) 7516 mddev->safemode = 0; 7517 if (mddev->in_sync) { 7518 spin_lock(&mddev->lock); 7519 if (mddev->in_sync) { 7520 mddev->in_sync = 0; 7521 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7522 set_bit(MD_CHANGE_PENDING, &mddev->flags); 7523 md_wakeup_thread(mddev->thread); 7524 did_change = 1; 7525 } 7526 spin_unlock(&mddev->lock); 7527 } 7528 if (did_change) 7529 sysfs_notify_dirent_safe(mddev->sysfs_state); 7530 wait_event(mddev->sb_wait, 7531 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 7532 } 7533 EXPORT_SYMBOL(md_write_start); 7534 7535 void md_write_end(struct mddev *mddev) 7536 { 7537 if (atomic_dec_and_test(&mddev->writes_pending)) { 7538 if (mddev->safemode == 2) 7539 md_wakeup_thread(mddev->thread); 7540 else if (mddev->safemode_delay) 7541 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 7542 } 7543 } 7544 EXPORT_SYMBOL(md_write_end); 7545 7546 /* md_allow_write(mddev) 7547 * Calling this ensures that the array is marked 'active' so that writes 7548 * may proceed without blocking. It is important to call this before 7549 * attempting a GFP_KERNEL allocation while holding the mddev lock. 7550 * Must be called with mddev_lock held. 7551 * 7552 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock 7553 * is dropped, so return -EAGAIN after notifying userspace. 7554 */ 7555 int md_allow_write(struct mddev *mddev) 7556 { 7557 if (!mddev->pers) 7558 return 0; 7559 if (mddev->ro) 7560 return 0; 7561 if (!mddev->pers->sync_request) 7562 return 0; 7563 7564 spin_lock(&mddev->lock); 7565 if (mddev->in_sync) { 7566 mddev->in_sync = 0; 7567 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7568 set_bit(MD_CHANGE_PENDING, &mddev->flags); 7569 if (mddev->safemode_delay && 7570 mddev->safemode == 0) 7571 mddev->safemode = 1; 7572 spin_unlock(&mddev->lock); 7573 if (mddev_is_clustered(mddev)) 7574 md_cluster_ops->metadata_update_start(mddev); 7575 md_update_sb(mddev, 0); 7576 if (mddev_is_clustered(mddev)) 7577 md_cluster_ops->metadata_update_finish(mddev); 7578 sysfs_notify_dirent_safe(mddev->sysfs_state); 7579 } else 7580 spin_unlock(&mddev->lock); 7581 7582 if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 7583 return -EAGAIN; 7584 else 7585 return 0; 7586 } 7587 EXPORT_SYMBOL_GPL(md_allow_write); 7588 7589 #define SYNC_MARKS 10 7590 #define SYNC_MARK_STEP (3*HZ) 7591 #define UPDATE_FREQUENCY (5*60*HZ) 7592 void md_do_sync(struct md_thread *thread) 7593 { 7594 struct mddev *mddev = thread->mddev; 7595 struct mddev *mddev2; 7596 unsigned int currspeed = 0, 7597 window; 7598 sector_t max_sectors,j, io_sectors, recovery_done; 7599 unsigned long mark[SYNC_MARKS]; 7600 unsigned long update_time; 7601 sector_t mark_cnt[SYNC_MARKS]; 7602 int last_mark,m; 7603 struct list_head *tmp; 7604 sector_t last_check; 7605 int skipped = 0; 7606 struct md_rdev *rdev; 7607 char *desc, *action = NULL; 7608 struct blk_plug plug; 7609 7610 /* just incase thread restarts... */ 7611 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7612 return; 7613 if (mddev->ro) {/* never try to sync a read-only array */ 7614 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7615 return; 7616 } 7617 7618 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7619 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 7620 desc = "data-check"; 7621 action = "check"; 7622 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7623 desc = "requested-resync"; 7624 action = "repair"; 7625 } else 7626 desc = "resync"; 7627 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7628 desc = "reshape"; 7629 else 7630 desc = "recovery"; 7631 7632 mddev->last_sync_action = action ?: desc; 7633 7634 /* we overload curr_resync somewhat here. 7635 * 0 == not engaged in resync at all 7636 * 2 == checking that there is no conflict with another sync 7637 * 1 == like 2, but have yielded to allow conflicting resync to 7638 * commense 7639 * other == active in resync - this many blocks 7640 * 7641 * Before starting a resync we must have set curr_resync to 7642 * 2, and then checked that every "conflicting" array has curr_resync 7643 * less than ours. When we find one that is the same or higher 7644 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 7645 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 7646 * This will mean we have to start checking from the beginning again. 7647 * 7648 */ 7649 7650 do { 7651 mddev->curr_resync = 2; 7652 7653 try_again: 7654 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7655 goto skip; 7656 for_each_mddev(mddev2, tmp) { 7657 if (mddev2 == mddev) 7658 continue; 7659 if (!mddev->parallel_resync 7660 && mddev2->curr_resync 7661 && match_mddev_units(mddev, mddev2)) { 7662 DEFINE_WAIT(wq); 7663 if (mddev < mddev2 && mddev->curr_resync == 2) { 7664 /* arbitrarily yield */ 7665 mddev->curr_resync = 1; 7666 wake_up(&resync_wait); 7667 } 7668 if (mddev > mddev2 && mddev->curr_resync == 1) 7669 /* no need to wait here, we can wait the next 7670 * time 'round when curr_resync == 2 7671 */ 7672 continue; 7673 /* We need to wait 'interruptible' so as not to 7674 * contribute to the load average, and not to 7675 * be caught by 'softlockup' 7676 */ 7677 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 7678 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7679 mddev2->curr_resync >= mddev->curr_resync) { 7680 printk(KERN_INFO "md: delaying %s of %s" 7681 " until %s has finished (they" 7682 " share one or more physical units)\n", 7683 desc, mdname(mddev), mdname(mddev2)); 7684 mddev_put(mddev2); 7685 if (signal_pending(current)) 7686 flush_signals(current); 7687 schedule(); 7688 finish_wait(&resync_wait, &wq); 7689 goto try_again; 7690 } 7691 finish_wait(&resync_wait, &wq); 7692 } 7693 } 7694 } while (mddev->curr_resync < 2); 7695 7696 j = 0; 7697 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7698 /* resync follows the size requested by the personality, 7699 * which defaults to physical size, but can be virtual size 7700 */ 7701 max_sectors = mddev->resync_max_sectors; 7702 atomic64_set(&mddev->resync_mismatches, 0); 7703 /* we don't use the checkpoint if there's a bitmap */ 7704 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7705 j = mddev->resync_min; 7706 else if (!mddev->bitmap) 7707 j = mddev->recovery_cp; 7708 7709 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7710 max_sectors = mddev->resync_max_sectors; 7711 else { 7712 /* recovery follows the physical size of devices */ 7713 max_sectors = mddev->dev_sectors; 7714 j = MaxSector; 7715 rcu_read_lock(); 7716 rdev_for_each_rcu(rdev, mddev) 7717 if (rdev->raid_disk >= 0 && 7718 !test_bit(Faulty, &rdev->flags) && 7719 !test_bit(In_sync, &rdev->flags) && 7720 rdev->recovery_offset < j) 7721 j = rdev->recovery_offset; 7722 rcu_read_unlock(); 7723 7724 /* If there is a bitmap, we need to make sure all 7725 * writes that started before we added a spare 7726 * complete before we start doing a recovery. 7727 * Otherwise the write might complete and (via 7728 * bitmap_endwrite) set a bit in the bitmap after the 7729 * recovery has checked that bit and skipped that 7730 * region. 7731 */ 7732 if (mddev->bitmap) { 7733 mddev->pers->quiesce(mddev, 1); 7734 mddev->pers->quiesce(mddev, 0); 7735 } 7736 } 7737 7738 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); 7739 printk(KERN_INFO "md: minimum _guaranteed_ speed:" 7740 " %d KB/sec/disk.\n", speed_min(mddev)); 7741 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 7742 "(but not more than %d KB/sec) for %s.\n", 7743 speed_max(mddev), desc); 7744 7745 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 7746 7747 io_sectors = 0; 7748 for (m = 0; m < SYNC_MARKS; m++) { 7749 mark[m] = jiffies; 7750 mark_cnt[m] = io_sectors; 7751 } 7752 last_mark = 0; 7753 mddev->resync_mark = mark[last_mark]; 7754 mddev->resync_mark_cnt = mark_cnt[last_mark]; 7755 7756 /* 7757 * Tune reconstruction: 7758 */ 7759 window = 32*(PAGE_SIZE/512); 7760 printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n", 7761 window/2, (unsigned long long)max_sectors/2); 7762 7763 atomic_set(&mddev->recovery_active, 0); 7764 last_check = 0; 7765 7766 if (j>2) { 7767 printk(KERN_INFO 7768 "md: resuming %s of %s from checkpoint.\n", 7769 desc, mdname(mddev)); 7770 mddev->curr_resync = j; 7771 } else 7772 mddev->curr_resync = 3; /* no longer delayed */ 7773 mddev->curr_resync_completed = j; 7774 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 7775 md_new_event(mddev); 7776 update_time = jiffies; 7777 7778 if (mddev_is_clustered(mddev)) 7779 md_cluster_ops->resync_start(mddev, j, max_sectors); 7780 7781 blk_start_plug(&plug); 7782 while (j < max_sectors) { 7783 sector_t sectors; 7784 7785 skipped = 0; 7786 7787 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 7788 ((mddev->curr_resync > mddev->curr_resync_completed && 7789 (mddev->curr_resync - mddev->curr_resync_completed) 7790 > (max_sectors >> 4)) || 7791 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) || 7792 (j - mddev->curr_resync_completed)*2 7793 >= mddev->resync_max - mddev->curr_resync_completed 7794 )) { 7795 /* time to update curr_resync_completed */ 7796 wait_event(mddev->recovery_wait, 7797 atomic_read(&mddev->recovery_active) == 0); 7798 mddev->curr_resync_completed = j; 7799 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 7800 j > mddev->recovery_cp) 7801 mddev->recovery_cp = j; 7802 update_time = jiffies; 7803 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7804 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 7805 } 7806 7807 while (j >= mddev->resync_max && 7808 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7809 /* As this condition is controlled by user-space, 7810 * we can block indefinitely, so use '_interruptible' 7811 * to avoid triggering warnings. 7812 */ 7813 flush_signals(current); /* just in case */ 7814 wait_event_interruptible(mddev->recovery_wait, 7815 mddev->resync_max > j 7816 || test_bit(MD_RECOVERY_INTR, 7817 &mddev->recovery)); 7818 } 7819 7820 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7821 break; 7822 7823 sectors = mddev->pers->sync_request(mddev, j, &skipped); 7824 if (sectors == 0) { 7825 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7826 break; 7827 } 7828 7829 if (!skipped) { /* actual IO requested */ 7830 io_sectors += sectors; 7831 atomic_add(sectors, &mddev->recovery_active); 7832 } 7833 7834 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7835 break; 7836 7837 j += sectors; 7838 if (j > 2) 7839 mddev->curr_resync = j; 7840 if (mddev_is_clustered(mddev)) 7841 md_cluster_ops->resync_info_update(mddev, j, max_sectors); 7842 mddev->curr_mark_cnt = io_sectors; 7843 if (last_check == 0) 7844 /* this is the earliest that rebuild will be 7845 * visible in /proc/mdstat 7846 */ 7847 md_new_event(mddev); 7848 7849 if (last_check + window > io_sectors || j == max_sectors) 7850 continue; 7851 7852 last_check = io_sectors; 7853 repeat: 7854 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 7855 /* step marks */ 7856 int next = (last_mark+1) % SYNC_MARKS; 7857 7858 mddev->resync_mark = mark[next]; 7859 mddev->resync_mark_cnt = mark_cnt[next]; 7860 mark[next] = jiffies; 7861 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 7862 last_mark = next; 7863 } 7864 7865 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7866 break; 7867 7868 /* 7869 * this loop exits only if either when we are slower than 7870 * the 'hard' speed limit, or the system was IO-idle for 7871 * a jiffy. 7872 * the system might be non-idle CPU-wise, but we only care 7873 * about not overloading the IO subsystem. (things like an 7874 * e2fsck being done on the RAID array should execute fast) 7875 */ 7876 cond_resched(); 7877 7878 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); 7879 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 7880 /((jiffies-mddev->resync_mark)/HZ +1) +1; 7881 7882 if (currspeed > speed_min(mddev)) { 7883 if (currspeed > speed_max(mddev)) { 7884 msleep(500); 7885 goto repeat; 7886 } 7887 if (!is_mddev_idle(mddev, 0)) { 7888 /* 7889 * Give other IO more of a chance. 7890 * The faster the devices, the less we wait. 7891 */ 7892 wait_event(mddev->recovery_wait, 7893 !atomic_read(&mddev->recovery_active)); 7894 } 7895 } 7896 } 7897 printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc, 7898 test_bit(MD_RECOVERY_INTR, &mddev->recovery) 7899 ? "interrupted" : "done"); 7900 /* 7901 * this also signals 'finished resyncing' to md_stop 7902 */ 7903 blk_finish_plug(&plug); 7904 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 7905 7906 /* tell personality that we are finished */ 7907 mddev->pers->sync_request(mddev, max_sectors, &skipped); 7908 7909 if (mddev_is_clustered(mddev)) 7910 md_cluster_ops->resync_finish(mddev); 7911 7912 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 7913 mddev->curr_resync > 2) { 7914 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7915 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7916 if (mddev->curr_resync >= mddev->recovery_cp) { 7917 printk(KERN_INFO 7918 "md: checkpointing %s of %s.\n", 7919 desc, mdname(mddev)); 7920 if (test_bit(MD_RECOVERY_ERROR, 7921 &mddev->recovery)) 7922 mddev->recovery_cp = 7923 mddev->curr_resync_completed; 7924 else 7925 mddev->recovery_cp = 7926 mddev->curr_resync; 7927 } 7928 } else 7929 mddev->recovery_cp = MaxSector; 7930 } else { 7931 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7932 mddev->curr_resync = MaxSector; 7933 rcu_read_lock(); 7934 rdev_for_each_rcu(rdev, mddev) 7935 if (rdev->raid_disk >= 0 && 7936 mddev->delta_disks >= 0 && 7937 !test_bit(Faulty, &rdev->flags) && 7938 !test_bit(In_sync, &rdev->flags) && 7939 rdev->recovery_offset < mddev->curr_resync) 7940 rdev->recovery_offset = mddev->curr_resync; 7941 rcu_read_unlock(); 7942 } 7943 } 7944 skip: 7945 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7946 7947 spin_lock(&mddev->lock); 7948 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7949 /* We completed so min/max setting can be forgotten if used. */ 7950 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7951 mddev->resync_min = 0; 7952 mddev->resync_max = MaxSector; 7953 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7954 mddev->resync_min = mddev->curr_resync_completed; 7955 mddev->curr_resync = 0; 7956 spin_unlock(&mddev->lock); 7957 7958 wake_up(&resync_wait); 7959 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 7960 md_wakeup_thread(mddev->thread); 7961 return; 7962 } 7963 EXPORT_SYMBOL_GPL(md_do_sync); 7964 7965 static int remove_and_add_spares(struct mddev *mddev, 7966 struct md_rdev *this) 7967 { 7968 struct md_rdev *rdev; 7969 int spares = 0; 7970 int removed = 0; 7971 7972 rdev_for_each(rdev, mddev) 7973 if ((this == NULL || rdev == this) && 7974 rdev->raid_disk >= 0 && 7975 !test_bit(Blocked, &rdev->flags) && 7976 (test_bit(Faulty, &rdev->flags) || 7977 ! test_bit(In_sync, &rdev->flags)) && 7978 atomic_read(&rdev->nr_pending)==0) { 7979 if (mddev->pers->hot_remove_disk( 7980 mddev, rdev) == 0) { 7981 sysfs_unlink_rdev(mddev, rdev); 7982 rdev->raid_disk = -1; 7983 removed++; 7984 } 7985 } 7986 if (removed && mddev->kobj.sd) 7987 sysfs_notify(&mddev->kobj, NULL, "degraded"); 7988 7989 if (this) 7990 goto no_add; 7991 7992 rdev_for_each(rdev, mddev) { 7993 if (rdev->raid_disk >= 0 && 7994 !test_bit(In_sync, &rdev->flags) && 7995 !test_bit(Faulty, &rdev->flags)) 7996 spares++; 7997 if (rdev->raid_disk >= 0) 7998 continue; 7999 if (test_bit(Faulty, &rdev->flags)) 8000 continue; 8001 if (mddev->ro && 8002 ! (rdev->saved_raid_disk >= 0 && 8003 !test_bit(Bitmap_sync, &rdev->flags))) 8004 continue; 8005 8006 if (rdev->saved_raid_disk < 0) 8007 rdev->recovery_offset = 0; 8008 if (mddev->pers-> 8009 hot_add_disk(mddev, rdev) == 0) { 8010 if (sysfs_link_rdev(mddev, rdev)) 8011 /* failure here is OK */; 8012 spares++; 8013 md_new_event(mddev); 8014 set_bit(MD_CHANGE_DEVS, &mddev->flags); 8015 } 8016 } 8017 no_add: 8018 if (removed) 8019 set_bit(MD_CHANGE_DEVS, &mddev->flags); 8020 return spares; 8021 } 8022 8023 static void md_start_sync(struct work_struct *ws) 8024 { 8025 struct mddev *mddev = container_of(ws, struct mddev, del_work); 8026 8027 mddev->sync_thread = md_register_thread(md_do_sync, 8028 mddev, 8029 "resync"); 8030 if (!mddev->sync_thread) { 8031 printk(KERN_ERR "%s: could not start resync" 8032 " thread...\n", 8033 mdname(mddev)); 8034 /* leave the spares where they are, it shouldn't hurt */ 8035 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8036 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8037 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 8038 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 8039 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8040 wake_up(&resync_wait); 8041 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 8042 &mddev->recovery)) 8043 if (mddev->sysfs_action) 8044 sysfs_notify_dirent_safe(mddev->sysfs_action); 8045 } else 8046 md_wakeup_thread(mddev->sync_thread); 8047 sysfs_notify_dirent_safe(mddev->sysfs_action); 8048 md_new_event(mddev); 8049 } 8050 8051 /* 8052 * This routine is regularly called by all per-raid-array threads to 8053 * deal with generic issues like resync and super-block update. 8054 * Raid personalities that don't have a thread (linear/raid0) do not 8055 * need this as they never do any recovery or update the superblock. 8056 * 8057 * It does not do any resync itself, but rather "forks" off other threads 8058 * to do that as needed. 8059 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 8060 * "->recovery" and create a thread at ->sync_thread. 8061 * When the thread finishes it sets MD_RECOVERY_DONE 8062 * and wakeups up this thread which will reap the thread and finish up. 8063 * This thread also removes any faulty devices (with nr_pending == 0). 8064 * 8065 * The overall approach is: 8066 * 1/ if the superblock needs updating, update it. 8067 * 2/ If a recovery thread is running, don't do anything else. 8068 * 3/ If recovery has finished, clean up, possibly marking spares active. 8069 * 4/ If there are any faulty devices, remove them. 8070 * 5/ If array is degraded, try to add spares devices 8071 * 6/ If array has spares or is not in-sync, start a resync thread. 8072 */ 8073 void md_check_recovery(struct mddev *mddev) 8074 { 8075 if (mddev->suspended) 8076 return; 8077 8078 if (mddev->bitmap) 8079 bitmap_daemon_work(mddev); 8080 8081 if (signal_pending(current)) { 8082 if (mddev->pers->sync_request && !mddev->external) { 8083 printk(KERN_INFO "md: %s in immediate safe mode\n", 8084 mdname(mddev)); 8085 mddev->safemode = 2; 8086 } 8087 flush_signals(current); 8088 } 8089 8090 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 8091 return; 8092 if ( ! ( 8093 (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) || 8094 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 8095 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 8096 (mddev->external == 0 && mddev->safemode == 1) || 8097 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 8098 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 8099 )) 8100 return; 8101 8102 if (mddev_trylock(mddev)) { 8103 int spares = 0; 8104 8105 if (mddev->ro) { 8106 /* On a read-only array we can: 8107 * - remove failed devices 8108 * - add already-in_sync devices if the array itself 8109 * is in-sync. 8110 * As we only add devices that are already in-sync, 8111 * we can activate the spares immediately. 8112 */ 8113 remove_and_add_spares(mddev, NULL); 8114 /* There is no thread, but we need to call 8115 * ->spare_active and clear saved_raid_disk 8116 */ 8117 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8118 md_reap_sync_thread(mddev); 8119 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8120 goto unlock; 8121 } 8122 8123 if (!mddev->external) { 8124 int did_change = 0; 8125 spin_lock(&mddev->lock); 8126 if (mddev->safemode && 8127 !atomic_read(&mddev->writes_pending) && 8128 !mddev->in_sync && 8129 mddev->recovery_cp == MaxSector) { 8130 mddev->in_sync = 1; 8131 did_change = 1; 8132 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 8133 } 8134 if (mddev->safemode == 1) 8135 mddev->safemode = 0; 8136 spin_unlock(&mddev->lock); 8137 if (did_change) 8138 sysfs_notify_dirent_safe(mddev->sysfs_state); 8139 } 8140 8141 if (mddev->flags & MD_UPDATE_SB_FLAGS) { 8142 if (mddev_is_clustered(mddev)) 8143 md_cluster_ops->metadata_update_start(mddev); 8144 md_update_sb(mddev, 0); 8145 if (mddev_is_clustered(mddev)) 8146 md_cluster_ops->metadata_update_finish(mddev); 8147 } 8148 8149 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 8150 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 8151 /* resync/recovery still happening */ 8152 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8153 goto unlock; 8154 } 8155 if (mddev->sync_thread) { 8156 md_reap_sync_thread(mddev); 8157 goto unlock; 8158 } 8159 /* Set RUNNING before clearing NEEDED to avoid 8160 * any transients in the value of "sync_action". 8161 */ 8162 mddev->curr_resync_completed = 0; 8163 spin_lock(&mddev->lock); 8164 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8165 spin_unlock(&mddev->lock); 8166 /* Clear some bits that don't mean anything, but 8167 * might be left set 8168 */ 8169 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 8170 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 8171 8172 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 8173 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 8174 goto not_running; 8175 /* no recovery is running. 8176 * remove any failed drives, then 8177 * add spares if possible. 8178 * Spares are also removed and re-added, to allow 8179 * the personality to fail the re-add. 8180 */ 8181 8182 if (mddev->reshape_position != MaxSector) { 8183 if (mddev->pers->check_reshape == NULL || 8184 mddev->pers->check_reshape(mddev) != 0) 8185 /* Cannot proceed */ 8186 goto not_running; 8187 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8188 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8189 } else if ((spares = remove_and_add_spares(mddev, NULL))) { 8190 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8191 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 8192 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 8193 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8194 } else if (mddev->recovery_cp < MaxSector) { 8195 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8196 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8197 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 8198 /* nothing to be done ... */ 8199 goto not_running; 8200 8201 if (mddev->pers->sync_request) { 8202 if (spares) { 8203 /* We are adding a device or devices to an array 8204 * which has the bitmap stored on all devices. 8205 * So make sure all bitmap pages get written 8206 */ 8207 bitmap_write_all(mddev->bitmap); 8208 } 8209 INIT_WORK(&mddev->del_work, md_start_sync); 8210 queue_work(md_misc_wq, &mddev->del_work); 8211 goto unlock; 8212 } 8213 not_running: 8214 if (!mddev->sync_thread) { 8215 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8216 wake_up(&resync_wait); 8217 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 8218 &mddev->recovery)) 8219 if (mddev->sysfs_action) 8220 sysfs_notify_dirent_safe(mddev->sysfs_action); 8221 } 8222 unlock: 8223 wake_up(&mddev->sb_wait); 8224 mddev_unlock(mddev); 8225 } 8226 } 8227 EXPORT_SYMBOL(md_check_recovery); 8228 8229 void md_reap_sync_thread(struct mddev *mddev) 8230 { 8231 struct md_rdev *rdev; 8232 8233 /* resync has finished, collect result */ 8234 md_unregister_thread(&mddev->sync_thread); 8235 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8236 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 8237 /* success...*/ 8238 /* activate any spares */ 8239 if (mddev->pers->spare_active(mddev)) { 8240 sysfs_notify(&mddev->kobj, NULL, 8241 "degraded"); 8242 set_bit(MD_CHANGE_DEVS, &mddev->flags); 8243 } 8244 } 8245 if (mddev_is_clustered(mddev)) 8246 md_cluster_ops->metadata_update_start(mddev); 8247 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8248 mddev->pers->finish_reshape) 8249 mddev->pers->finish_reshape(mddev); 8250 8251 /* If array is no-longer degraded, then any saved_raid_disk 8252 * information must be scrapped. 8253 */ 8254 if (!mddev->degraded) 8255 rdev_for_each(rdev, mddev) 8256 rdev->saved_raid_disk = -1; 8257 8258 md_update_sb(mddev, 1); 8259 if (mddev_is_clustered(mddev)) 8260 md_cluster_ops->metadata_update_finish(mddev); 8261 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8262 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8263 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8264 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 8265 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 8266 wake_up(&resync_wait); 8267 /* flag recovery needed just to double check */ 8268 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8269 sysfs_notify_dirent_safe(mddev->sysfs_action); 8270 md_new_event(mddev); 8271 if (mddev->event_work.func) 8272 queue_work(md_misc_wq, &mddev->event_work); 8273 } 8274 EXPORT_SYMBOL(md_reap_sync_thread); 8275 8276 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) 8277 { 8278 sysfs_notify_dirent_safe(rdev->sysfs_state); 8279 wait_event_timeout(rdev->blocked_wait, 8280 !test_bit(Blocked, &rdev->flags) && 8281 !test_bit(BlockedBadBlocks, &rdev->flags), 8282 msecs_to_jiffies(5000)); 8283 rdev_dec_pending(rdev, mddev); 8284 } 8285 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 8286 8287 void md_finish_reshape(struct mddev *mddev) 8288 { 8289 /* called be personality module when reshape completes. */ 8290 struct md_rdev *rdev; 8291 8292 rdev_for_each(rdev, mddev) { 8293 if (rdev->data_offset > rdev->new_data_offset) 8294 rdev->sectors += rdev->data_offset - rdev->new_data_offset; 8295 else 8296 rdev->sectors -= rdev->new_data_offset - rdev->data_offset; 8297 rdev->data_offset = rdev->new_data_offset; 8298 } 8299 } 8300 EXPORT_SYMBOL(md_finish_reshape); 8301 8302 /* Bad block management. 8303 * We can record which blocks on each device are 'bad' and so just 8304 * fail those blocks, or that stripe, rather than the whole device. 8305 * Entries in the bad-block table are 64bits wide. This comprises: 8306 * Length of bad-range, in sectors: 0-511 for lengths 1-512 8307 * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes) 8308 * A 'shift' can be set so that larger blocks are tracked and 8309 * consequently larger devices can be covered. 8310 * 'Acknowledged' flag - 1 bit. - the most significant bit. 8311 * 8312 * Locking of the bad-block table uses a seqlock so md_is_badblock 8313 * might need to retry if it is very unlucky. 8314 * We will sometimes want to check for bad blocks in a bi_end_io function, 8315 * so we use the write_seqlock_irq variant. 8316 * 8317 * When looking for a bad block we specify a range and want to 8318 * know if any block in the range is bad. So we binary-search 8319 * to the last range that starts at-or-before the given endpoint, 8320 * (or "before the sector after the target range") 8321 * then see if it ends after the given start. 8322 * We return 8323 * 0 if there are no known bad blocks in the range 8324 * 1 if there are known bad block which are all acknowledged 8325 * -1 if there are bad blocks which have not yet been acknowledged in metadata. 8326 * plus the start/length of the first bad section we overlap. 8327 */ 8328 int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, 8329 sector_t *first_bad, int *bad_sectors) 8330 { 8331 int hi; 8332 int lo; 8333 u64 *p = bb->page; 8334 int rv; 8335 sector_t target = s + sectors; 8336 unsigned seq; 8337 8338 if (bb->shift > 0) { 8339 /* round the start down, and the end up */ 8340 s >>= bb->shift; 8341 target += (1<<bb->shift) - 1; 8342 target >>= bb->shift; 8343 sectors = target - s; 8344 } 8345 /* 'target' is now the first block after the bad range */ 8346 8347 retry: 8348 seq = read_seqbegin(&bb->lock); 8349 lo = 0; 8350 rv = 0; 8351 hi = bb->count; 8352 8353 /* Binary search between lo and hi for 'target' 8354 * i.e. for the last range that starts before 'target' 8355 */ 8356 /* INVARIANT: ranges before 'lo' and at-or-after 'hi' 8357 * are known not to be the last range before target. 8358 * VARIANT: hi-lo is the number of possible 8359 * ranges, and decreases until it reaches 1 8360 */ 8361 while (hi - lo > 1) { 8362 int mid = (lo + hi) / 2; 8363 sector_t a = BB_OFFSET(p[mid]); 8364 if (a < target) 8365 /* This could still be the one, earlier ranges 8366 * could not. */ 8367 lo = mid; 8368 else 8369 /* This and later ranges are definitely out. */ 8370 hi = mid; 8371 } 8372 /* 'lo' might be the last that started before target, but 'hi' isn't */ 8373 if (hi > lo) { 8374 /* need to check all range that end after 's' to see if 8375 * any are unacknowledged. 8376 */ 8377 while (lo >= 0 && 8378 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { 8379 if (BB_OFFSET(p[lo]) < target) { 8380 /* starts before the end, and finishes after 8381 * the start, so they must overlap 8382 */ 8383 if (rv != -1 && BB_ACK(p[lo])) 8384 rv = 1; 8385 else 8386 rv = -1; 8387 *first_bad = BB_OFFSET(p[lo]); 8388 *bad_sectors = BB_LEN(p[lo]); 8389 } 8390 lo--; 8391 } 8392 } 8393 8394 if (read_seqretry(&bb->lock, seq)) 8395 goto retry; 8396 8397 return rv; 8398 } 8399 EXPORT_SYMBOL_GPL(md_is_badblock); 8400 8401 /* 8402 * Add a range of bad blocks to the table. 8403 * This might extend the table, or might contract it 8404 * if two adjacent ranges can be merged. 8405 * We binary-search to find the 'insertion' point, then 8406 * decide how best to handle it. 8407 */ 8408 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, 8409 int acknowledged) 8410 { 8411 u64 *p; 8412 int lo, hi; 8413 int rv = 1; 8414 unsigned long flags; 8415 8416 if (bb->shift < 0) 8417 /* badblocks are disabled */ 8418 return 0; 8419 8420 if (bb->shift) { 8421 /* round the start down, and the end up */ 8422 sector_t next = s + sectors; 8423 s >>= bb->shift; 8424 next += (1<<bb->shift) - 1; 8425 next >>= bb->shift; 8426 sectors = next - s; 8427 } 8428 8429 write_seqlock_irqsave(&bb->lock, flags); 8430 8431 p = bb->page; 8432 lo = 0; 8433 hi = bb->count; 8434 /* Find the last range that starts at-or-before 's' */ 8435 while (hi - lo > 1) { 8436 int mid = (lo + hi) / 2; 8437 sector_t a = BB_OFFSET(p[mid]); 8438 if (a <= s) 8439 lo = mid; 8440 else 8441 hi = mid; 8442 } 8443 if (hi > lo && BB_OFFSET(p[lo]) > s) 8444 hi = lo; 8445 8446 if (hi > lo) { 8447 /* we found a range that might merge with the start 8448 * of our new range 8449 */ 8450 sector_t a = BB_OFFSET(p[lo]); 8451 sector_t e = a + BB_LEN(p[lo]); 8452 int ack = BB_ACK(p[lo]); 8453 if (e >= s) { 8454 /* Yes, we can merge with a previous range */ 8455 if (s == a && s + sectors >= e) 8456 /* new range covers old */ 8457 ack = acknowledged; 8458 else 8459 ack = ack && acknowledged; 8460 8461 if (e < s + sectors) 8462 e = s + sectors; 8463 if (e - a <= BB_MAX_LEN) { 8464 p[lo] = BB_MAKE(a, e-a, ack); 8465 s = e; 8466 } else { 8467 /* does not all fit in one range, 8468 * make p[lo] maximal 8469 */ 8470 if (BB_LEN(p[lo]) != BB_MAX_LEN) 8471 p[lo] = BB_MAKE(a, BB_MAX_LEN, ack); 8472 s = a + BB_MAX_LEN; 8473 } 8474 sectors = e - s; 8475 } 8476 } 8477 if (sectors && hi < bb->count) { 8478 /* 'hi' points to the first range that starts after 's'. 8479 * Maybe we can merge with the start of that range */ 8480 sector_t a = BB_OFFSET(p[hi]); 8481 sector_t e = a + BB_LEN(p[hi]); 8482 int ack = BB_ACK(p[hi]); 8483 if (a <= s + sectors) { 8484 /* merging is possible */ 8485 if (e <= s + sectors) { 8486 /* full overlap */ 8487 e = s + sectors; 8488 ack = acknowledged; 8489 } else 8490 ack = ack && acknowledged; 8491 8492 a = s; 8493 if (e - a <= BB_MAX_LEN) { 8494 p[hi] = BB_MAKE(a, e-a, ack); 8495 s = e; 8496 } else { 8497 p[hi] = BB_MAKE(a, BB_MAX_LEN, ack); 8498 s = a + BB_MAX_LEN; 8499 } 8500 sectors = e - s; 8501 lo = hi; 8502 hi++; 8503 } 8504 } 8505 if (sectors == 0 && hi < bb->count) { 8506 /* we might be able to combine lo and hi */ 8507 /* Note: 's' is at the end of 'lo' */ 8508 sector_t a = BB_OFFSET(p[hi]); 8509 int lolen = BB_LEN(p[lo]); 8510 int hilen = BB_LEN(p[hi]); 8511 int newlen = lolen + hilen - (s - a); 8512 if (s >= a && newlen < BB_MAX_LEN) { 8513 /* yes, we can combine them */ 8514 int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]); 8515 p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack); 8516 memmove(p + hi, p + hi + 1, 8517 (bb->count - hi - 1) * 8); 8518 bb->count--; 8519 } 8520 } 8521 while (sectors) { 8522 /* didn't merge (it all). 8523 * Need to add a range just before 'hi' */ 8524 if (bb->count >= MD_MAX_BADBLOCKS) { 8525 /* No room for more */ 8526 rv = 0; 8527 break; 8528 } else { 8529 int this_sectors = sectors; 8530 memmove(p + hi + 1, p + hi, 8531 (bb->count - hi) * 8); 8532 bb->count++; 8533 8534 if (this_sectors > BB_MAX_LEN) 8535 this_sectors = BB_MAX_LEN; 8536 p[hi] = BB_MAKE(s, this_sectors, acknowledged); 8537 sectors -= this_sectors; 8538 s += this_sectors; 8539 } 8540 } 8541 8542 bb->changed = 1; 8543 if (!acknowledged) 8544 bb->unacked_exist = 1; 8545 write_sequnlock_irqrestore(&bb->lock, flags); 8546 8547 return rv; 8548 } 8549 8550 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 8551 int is_new) 8552 { 8553 int rv; 8554 if (is_new) 8555 s += rdev->new_data_offset; 8556 else 8557 s += rdev->data_offset; 8558 rv = md_set_badblocks(&rdev->badblocks, 8559 s, sectors, 0); 8560 if (rv) { 8561 /* Make sure they get written out promptly */ 8562 sysfs_notify_dirent_safe(rdev->sysfs_state); 8563 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); 8564 md_wakeup_thread(rdev->mddev->thread); 8565 } 8566 return rv; 8567 } 8568 EXPORT_SYMBOL_GPL(rdev_set_badblocks); 8569 8570 /* 8571 * Remove a range of bad blocks from the table. 8572 * This may involve extending the table if we spilt a region, 8573 * but it must not fail. So if the table becomes full, we just 8574 * drop the remove request. 8575 */ 8576 static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors) 8577 { 8578 u64 *p; 8579 int lo, hi; 8580 sector_t target = s + sectors; 8581 int rv = 0; 8582 8583 if (bb->shift > 0) { 8584 /* When clearing we round the start up and the end down. 8585 * This should not matter as the shift should align with 8586 * the block size and no rounding should ever be needed. 8587 * However it is better the think a block is bad when it 8588 * isn't than to think a block is not bad when it is. 8589 */ 8590 s += (1<<bb->shift) - 1; 8591 s >>= bb->shift; 8592 target >>= bb->shift; 8593 sectors = target - s; 8594 } 8595 8596 write_seqlock_irq(&bb->lock); 8597 8598 p = bb->page; 8599 lo = 0; 8600 hi = bb->count; 8601 /* Find the last range that starts before 'target' */ 8602 while (hi - lo > 1) { 8603 int mid = (lo + hi) / 2; 8604 sector_t a = BB_OFFSET(p[mid]); 8605 if (a < target) 8606 lo = mid; 8607 else 8608 hi = mid; 8609 } 8610 if (hi > lo) { 8611 /* p[lo] is the last range that could overlap the 8612 * current range. Earlier ranges could also overlap, 8613 * but only this one can overlap the end of the range. 8614 */ 8615 if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) { 8616 /* Partial overlap, leave the tail of this range */ 8617 int ack = BB_ACK(p[lo]); 8618 sector_t a = BB_OFFSET(p[lo]); 8619 sector_t end = a + BB_LEN(p[lo]); 8620 8621 if (a < s) { 8622 /* we need to split this range */ 8623 if (bb->count >= MD_MAX_BADBLOCKS) { 8624 rv = -ENOSPC; 8625 goto out; 8626 } 8627 memmove(p+lo+1, p+lo, (bb->count - lo) * 8); 8628 bb->count++; 8629 p[lo] = BB_MAKE(a, s-a, ack); 8630 lo++; 8631 } 8632 p[lo] = BB_MAKE(target, end - target, ack); 8633 /* there is no longer an overlap */ 8634 hi = lo; 8635 lo--; 8636 } 8637 while (lo >= 0 && 8638 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { 8639 /* This range does overlap */ 8640 if (BB_OFFSET(p[lo]) < s) { 8641 /* Keep the early parts of this range. */ 8642 int ack = BB_ACK(p[lo]); 8643 sector_t start = BB_OFFSET(p[lo]); 8644 p[lo] = BB_MAKE(start, s - start, ack); 8645 /* now low doesn't overlap, so.. */ 8646 break; 8647 } 8648 lo--; 8649 } 8650 /* 'lo' is strictly before, 'hi' is strictly after, 8651 * anything between needs to be discarded 8652 */ 8653 if (hi - lo > 1) { 8654 memmove(p+lo+1, p+hi, (bb->count - hi) * 8); 8655 bb->count -= (hi - lo - 1); 8656 } 8657 } 8658 8659 bb->changed = 1; 8660 out: 8661 write_sequnlock_irq(&bb->lock); 8662 return rv; 8663 } 8664 8665 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 8666 int is_new) 8667 { 8668 if (is_new) 8669 s += rdev->new_data_offset; 8670 else 8671 s += rdev->data_offset; 8672 return md_clear_badblocks(&rdev->badblocks, 8673 s, sectors); 8674 } 8675 EXPORT_SYMBOL_GPL(rdev_clear_badblocks); 8676 8677 /* 8678 * Acknowledge all bad blocks in a list. 8679 * This only succeeds if ->changed is clear. It is used by 8680 * in-kernel metadata updates 8681 */ 8682 void md_ack_all_badblocks(struct badblocks *bb) 8683 { 8684 if (bb->page == NULL || bb->changed) 8685 /* no point even trying */ 8686 return; 8687 write_seqlock_irq(&bb->lock); 8688 8689 if (bb->changed == 0 && bb->unacked_exist) { 8690 u64 *p = bb->page; 8691 int i; 8692 for (i = 0; i < bb->count ; i++) { 8693 if (!BB_ACK(p[i])) { 8694 sector_t start = BB_OFFSET(p[i]); 8695 int len = BB_LEN(p[i]); 8696 p[i] = BB_MAKE(start, len, 1); 8697 } 8698 } 8699 bb->unacked_exist = 0; 8700 } 8701 write_sequnlock_irq(&bb->lock); 8702 } 8703 EXPORT_SYMBOL_GPL(md_ack_all_badblocks); 8704 8705 /* sysfs access to bad-blocks list. 8706 * We present two files. 8707 * 'bad-blocks' lists sector numbers and lengths of ranges that 8708 * are recorded as bad. The list is truncated to fit within 8709 * the one-page limit of sysfs. 8710 * Writing "sector length" to this file adds an acknowledged 8711 * bad block list. 8712 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet 8713 * been acknowledged. Writing to this file adds bad blocks 8714 * without acknowledging them. This is largely for testing. 8715 */ 8716 8717 static ssize_t 8718 badblocks_show(struct badblocks *bb, char *page, int unack) 8719 { 8720 size_t len; 8721 int i; 8722 u64 *p = bb->page; 8723 unsigned seq; 8724 8725 if (bb->shift < 0) 8726 return 0; 8727 8728 retry: 8729 seq = read_seqbegin(&bb->lock); 8730 8731 len = 0; 8732 i = 0; 8733 8734 while (len < PAGE_SIZE && i < bb->count) { 8735 sector_t s = BB_OFFSET(p[i]); 8736 unsigned int length = BB_LEN(p[i]); 8737 int ack = BB_ACK(p[i]); 8738 i++; 8739 8740 if (unack && ack) 8741 continue; 8742 8743 len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n", 8744 (unsigned long long)s << bb->shift, 8745 length << bb->shift); 8746 } 8747 if (unack && len == 0) 8748 bb->unacked_exist = 0; 8749 8750 if (read_seqretry(&bb->lock, seq)) 8751 goto retry; 8752 8753 return len; 8754 } 8755 8756 #define DO_DEBUG 1 8757 8758 static ssize_t 8759 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack) 8760 { 8761 unsigned long long sector; 8762 int length; 8763 char newline; 8764 #ifdef DO_DEBUG 8765 /* Allow clearing via sysfs *only* for testing/debugging. 8766 * Normally only a successful write may clear a badblock 8767 */ 8768 int clear = 0; 8769 if (page[0] == '-') { 8770 clear = 1; 8771 page++; 8772 } 8773 #endif /* DO_DEBUG */ 8774 8775 switch (sscanf(page, "%llu %d%c", §or, &length, &newline)) { 8776 case 3: 8777 if (newline != '\n') 8778 return -EINVAL; 8779 case 2: 8780 if (length <= 0) 8781 return -EINVAL; 8782 break; 8783 default: 8784 return -EINVAL; 8785 } 8786 8787 #ifdef DO_DEBUG 8788 if (clear) { 8789 md_clear_badblocks(bb, sector, length); 8790 return len; 8791 } 8792 #endif /* DO_DEBUG */ 8793 if (md_set_badblocks(bb, sector, length, !unack)) 8794 return len; 8795 else 8796 return -ENOSPC; 8797 } 8798 8799 static int md_notify_reboot(struct notifier_block *this, 8800 unsigned long code, void *x) 8801 { 8802 struct list_head *tmp; 8803 struct mddev *mddev; 8804 int need_delay = 0; 8805 8806 for_each_mddev(mddev, tmp) { 8807 if (mddev_trylock(mddev)) { 8808 if (mddev->pers) 8809 __md_stop_writes(mddev); 8810 if (mddev->persistent) 8811 mddev->safemode = 2; 8812 mddev_unlock(mddev); 8813 } 8814 need_delay = 1; 8815 } 8816 /* 8817 * certain more exotic SCSI devices are known to be 8818 * volatile wrt too early system reboots. While the 8819 * right place to handle this issue is the given 8820 * driver, we do want to have a safe RAID driver ... 8821 */ 8822 if (need_delay) 8823 mdelay(1000*1); 8824 8825 return NOTIFY_DONE; 8826 } 8827 8828 static struct notifier_block md_notifier = { 8829 .notifier_call = md_notify_reboot, 8830 .next = NULL, 8831 .priority = INT_MAX, /* before any real devices */ 8832 }; 8833 8834 static void md_geninit(void) 8835 { 8836 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 8837 8838 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); 8839 } 8840 8841 static int __init md_init(void) 8842 { 8843 int ret = -ENOMEM; 8844 8845 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); 8846 if (!md_wq) 8847 goto err_wq; 8848 8849 md_misc_wq = alloc_workqueue("md_misc", 0, 0); 8850 if (!md_misc_wq) 8851 goto err_misc_wq; 8852 8853 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0) 8854 goto err_md; 8855 8856 if ((ret = register_blkdev(0, "mdp")) < 0) 8857 goto err_mdp; 8858 mdp_major = ret; 8859 8860 blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE, 8861 md_probe, NULL, NULL); 8862 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 8863 md_probe, NULL, NULL); 8864 8865 register_reboot_notifier(&md_notifier); 8866 raid_table_header = register_sysctl_table(raid_root_table); 8867 8868 md_geninit(); 8869 return 0; 8870 8871 err_mdp: 8872 unregister_blkdev(MD_MAJOR, "md"); 8873 err_md: 8874 destroy_workqueue(md_misc_wq); 8875 err_misc_wq: 8876 destroy_workqueue(md_wq); 8877 err_wq: 8878 return ret; 8879 } 8880 8881 void md_reload_sb(struct mddev *mddev) 8882 { 8883 struct md_rdev *rdev, *tmp; 8884 8885 rdev_for_each_safe(rdev, tmp, mddev) { 8886 rdev->sb_loaded = 0; 8887 ClearPageUptodate(rdev->sb_page); 8888 } 8889 mddev->raid_disks = 0; 8890 analyze_sbs(mddev); 8891 rdev_for_each_safe(rdev, tmp, mddev) { 8892 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 8893 /* since we don't write to faulty devices, we figure out if the 8894 * disk is faulty by comparing events 8895 */ 8896 if (mddev->events > sb->events) 8897 set_bit(Faulty, &rdev->flags); 8898 } 8899 8900 } 8901 EXPORT_SYMBOL(md_reload_sb); 8902 8903 #ifndef MODULE 8904 8905 /* 8906 * Searches all registered partitions for autorun RAID arrays 8907 * at boot time. 8908 */ 8909 8910 static LIST_HEAD(all_detected_devices); 8911 struct detected_devices_node { 8912 struct list_head list; 8913 dev_t dev; 8914 }; 8915 8916 void md_autodetect_dev(dev_t dev) 8917 { 8918 struct detected_devices_node *node_detected_dev; 8919 8920 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 8921 if (node_detected_dev) { 8922 node_detected_dev->dev = dev; 8923 list_add_tail(&node_detected_dev->list, &all_detected_devices); 8924 } else { 8925 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed" 8926 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev)); 8927 } 8928 } 8929 8930 static void autostart_arrays(int part) 8931 { 8932 struct md_rdev *rdev; 8933 struct detected_devices_node *node_detected_dev; 8934 dev_t dev; 8935 int i_scanned, i_passed; 8936 8937 i_scanned = 0; 8938 i_passed = 0; 8939 8940 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 8941 8942 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 8943 i_scanned++; 8944 node_detected_dev = list_entry(all_detected_devices.next, 8945 struct detected_devices_node, list); 8946 list_del(&node_detected_dev->list); 8947 dev = node_detected_dev->dev; 8948 kfree(node_detected_dev); 8949 rdev = md_import_device(dev,0, 90); 8950 if (IS_ERR(rdev)) 8951 continue; 8952 8953 if (test_bit(Faulty, &rdev->flags)) 8954 continue; 8955 8956 set_bit(AutoDetected, &rdev->flags); 8957 list_add(&rdev->same_set, &pending_raid_disks); 8958 i_passed++; 8959 } 8960 8961 printk(KERN_INFO "md: Scanned %d and added %d devices.\n", 8962 i_scanned, i_passed); 8963 8964 autorun_devices(part); 8965 } 8966 8967 #endif /* !MODULE */ 8968 8969 static __exit void md_exit(void) 8970 { 8971 struct mddev *mddev; 8972 struct list_head *tmp; 8973 int delay = 1; 8974 8975 blk_unregister_region(MKDEV(MD_MAJOR,0), 512); 8976 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 8977 8978 unregister_blkdev(MD_MAJOR,"md"); 8979 unregister_blkdev(mdp_major, "mdp"); 8980 unregister_reboot_notifier(&md_notifier); 8981 unregister_sysctl_table(raid_table_header); 8982 8983 /* We cannot unload the modules while some process is 8984 * waiting for us in select() or poll() - wake them up 8985 */ 8986 md_unloading = 1; 8987 while (waitqueue_active(&md_event_waiters)) { 8988 /* not safe to leave yet */ 8989 wake_up(&md_event_waiters); 8990 msleep(delay); 8991 delay += delay; 8992 } 8993 remove_proc_entry("mdstat", NULL); 8994 8995 for_each_mddev(mddev, tmp) { 8996 export_array(mddev); 8997 mddev->hold_active = 0; 8998 } 8999 destroy_workqueue(md_misc_wq); 9000 destroy_workqueue(md_wq); 9001 } 9002 9003 subsys_initcall(md_init); 9004 module_exit(md_exit) 9005 9006 static int get_ro(char *buffer, struct kernel_param *kp) 9007 { 9008 return sprintf(buffer, "%d", start_readonly); 9009 } 9010 static int set_ro(const char *val, struct kernel_param *kp) 9011 { 9012 char *e; 9013 int num = simple_strtoul(val, &e, 10); 9014 if (*val && (*e == '\0' || *e == '\n')) { 9015 start_readonly = num; 9016 return 0; 9017 } 9018 return -EINVAL; 9019 } 9020 9021 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 9022 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 9023 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 9024 9025 MODULE_LICENSE("GPL"); 9026 MODULE_DESCRIPTION("MD RAID framework"); 9027 MODULE_ALIAS("md"); 9028 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 9029