1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 #include <linux/kthread.h> 36 #include <linux/blkdev.h> 37 #include <linux/sysctl.h> 38 #include <linux/seq_file.h> 39 #include <linux/fs.h> 40 #include <linux/poll.h> 41 #include <linux/ctype.h> 42 #include <linux/string.h> 43 #include <linux/hdreg.h> 44 #include <linux/proc_fs.h> 45 #include <linux/random.h> 46 #include <linux/module.h> 47 #include <linux/reboot.h> 48 #include <linux/file.h> 49 #include <linux/compat.h> 50 #include <linux/delay.h> 51 #include <linux/raid/md_p.h> 52 #include <linux/raid/md_u.h> 53 #include <linux/slab.h> 54 #include "md.h" 55 #include "bitmap.h" 56 #include "md-cluster.h" 57 58 #ifndef MODULE 59 static void autostart_arrays(int part); 60 #endif 61 62 /* pers_list is a list of registered personalities protected 63 * by pers_lock. 64 * pers_lock does extra service to protect accesses to 65 * mddev->thread when the mutex cannot be held. 66 */ 67 static LIST_HEAD(pers_list); 68 static DEFINE_SPINLOCK(pers_lock); 69 70 struct md_cluster_operations *md_cluster_ops; 71 EXPORT_SYMBOL(md_cluster_ops); 72 struct module *md_cluster_mod; 73 EXPORT_SYMBOL(md_cluster_mod); 74 75 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 76 static struct workqueue_struct *md_wq; 77 static struct workqueue_struct *md_misc_wq; 78 79 static int remove_and_add_spares(struct mddev *mddev, 80 struct md_rdev *this); 81 static void mddev_detach(struct mddev *mddev); 82 83 /* 84 * Default number of read corrections we'll attempt on an rdev 85 * before ejecting it from the array. We divide the read error 86 * count by 2 for every hour elapsed between read errors. 87 */ 88 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 89 /* 90 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 91 * is 1000 KB/sec, so the extra system load does not show up that much. 92 * Increase it if you want to have more _guaranteed_ speed. Note that 93 * the RAID driver will use the maximum available bandwidth if the IO 94 * subsystem is idle. There is also an 'absolute maximum' reconstruction 95 * speed limit - in case reconstruction slows down your system despite 96 * idle IO detection. 97 * 98 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 99 * or /sys/block/mdX/md/sync_speed_{min,max} 100 */ 101 102 static int sysctl_speed_limit_min = 1000; 103 static int sysctl_speed_limit_max = 200000; 104 static inline int speed_min(struct mddev *mddev) 105 { 106 return mddev->sync_speed_min ? 107 mddev->sync_speed_min : sysctl_speed_limit_min; 108 } 109 110 static inline int speed_max(struct mddev *mddev) 111 { 112 return mddev->sync_speed_max ? 113 mddev->sync_speed_max : sysctl_speed_limit_max; 114 } 115 116 static struct ctl_table_header *raid_table_header; 117 118 static struct ctl_table raid_table[] = { 119 { 120 .procname = "speed_limit_min", 121 .data = &sysctl_speed_limit_min, 122 .maxlen = sizeof(int), 123 .mode = S_IRUGO|S_IWUSR, 124 .proc_handler = proc_dointvec, 125 }, 126 { 127 .procname = "speed_limit_max", 128 .data = &sysctl_speed_limit_max, 129 .maxlen = sizeof(int), 130 .mode = S_IRUGO|S_IWUSR, 131 .proc_handler = proc_dointvec, 132 }, 133 { } 134 }; 135 136 static struct ctl_table raid_dir_table[] = { 137 { 138 .procname = "raid", 139 .maxlen = 0, 140 .mode = S_IRUGO|S_IXUGO, 141 .child = raid_table, 142 }, 143 { } 144 }; 145 146 static struct ctl_table raid_root_table[] = { 147 { 148 .procname = "dev", 149 .maxlen = 0, 150 .mode = 0555, 151 .child = raid_dir_table, 152 }, 153 { } 154 }; 155 156 static const struct block_device_operations md_fops; 157 158 static int start_readonly; 159 160 /* bio_clone_mddev 161 * like bio_clone, but with a local bio set 162 */ 163 164 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 165 struct mddev *mddev) 166 { 167 struct bio *b; 168 169 if (!mddev || !mddev->bio_set) 170 return bio_alloc(gfp_mask, nr_iovecs); 171 172 b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set); 173 if (!b) 174 return NULL; 175 return b; 176 } 177 EXPORT_SYMBOL_GPL(bio_alloc_mddev); 178 179 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, 180 struct mddev *mddev) 181 { 182 if (!mddev || !mddev->bio_set) 183 return bio_clone(bio, gfp_mask); 184 185 return bio_clone_bioset(bio, gfp_mask, mddev->bio_set); 186 } 187 EXPORT_SYMBOL_GPL(bio_clone_mddev); 188 189 /* 190 * We have a system wide 'event count' that is incremented 191 * on any 'interesting' event, and readers of /proc/mdstat 192 * can use 'poll' or 'select' to find out when the event 193 * count increases. 194 * 195 * Events are: 196 * start array, stop array, error, add device, remove device, 197 * start build, activate spare 198 */ 199 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 200 static atomic_t md_event_count; 201 void md_new_event(struct mddev *mddev) 202 { 203 atomic_inc(&md_event_count); 204 wake_up(&md_event_waiters); 205 } 206 EXPORT_SYMBOL_GPL(md_new_event); 207 208 /* Alternate version that can be called from interrupts 209 * when calling sysfs_notify isn't needed. 210 */ 211 static void md_new_event_inintr(struct mddev *mddev) 212 { 213 atomic_inc(&md_event_count); 214 wake_up(&md_event_waiters); 215 } 216 217 /* 218 * Enables to iterate over all existing md arrays 219 * all_mddevs_lock protects this list. 220 */ 221 static LIST_HEAD(all_mddevs); 222 static DEFINE_SPINLOCK(all_mddevs_lock); 223 224 /* 225 * iterates through all used mddevs in the system. 226 * We take care to grab the all_mddevs_lock whenever navigating 227 * the list, and to always hold a refcount when unlocked. 228 * Any code which breaks out of this loop while own 229 * a reference to the current mddev and must mddev_put it. 230 */ 231 #define for_each_mddev(_mddev,_tmp) \ 232 \ 233 for (({ spin_lock(&all_mddevs_lock); \ 234 _tmp = all_mddevs.next; \ 235 _mddev = NULL;}); \ 236 ({ if (_tmp != &all_mddevs) \ 237 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\ 238 spin_unlock(&all_mddevs_lock); \ 239 if (_mddev) mddev_put(_mddev); \ 240 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \ 241 _tmp != &all_mddevs;}); \ 242 ({ spin_lock(&all_mddevs_lock); \ 243 _tmp = _tmp->next;}) \ 244 ) 245 246 /* Rather than calling directly into the personality make_request function, 247 * IO requests come here first so that we can check if the device is 248 * being suspended pending a reconfiguration. 249 * We hold a refcount over the call to ->make_request. By the time that 250 * call has finished, the bio has been linked into some internal structure 251 * and so is visible to ->quiesce(), so we don't need the refcount any more. 252 */ 253 static void md_make_request(struct request_queue *q, struct bio *bio) 254 { 255 const int rw = bio_data_dir(bio); 256 struct mddev *mddev = q->queuedata; 257 unsigned int sectors; 258 int cpu; 259 260 if (mddev == NULL || mddev->pers == NULL 261 || !mddev->ready) { 262 bio_io_error(bio); 263 return; 264 } 265 if (mddev->ro == 1 && unlikely(rw == WRITE)) { 266 bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS); 267 return; 268 } 269 smp_rmb(); /* Ensure implications of 'active' are visible */ 270 rcu_read_lock(); 271 if (mddev->suspended) { 272 DEFINE_WAIT(__wait); 273 for (;;) { 274 prepare_to_wait(&mddev->sb_wait, &__wait, 275 TASK_UNINTERRUPTIBLE); 276 if (!mddev->suspended) 277 break; 278 rcu_read_unlock(); 279 schedule(); 280 rcu_read_lock(); 281 } 282 finish_wait(&mddev->sb_wait, &__wait); 283 } 284 atomic_inc(&mddev->active_io); 285 rcu_read_unlock(); 286 287 /* 288 * save the sectors now since our bio can 289 * go away inside make_request 290 */ 291 sectors = bio_sectors(bio); 292 mddev->pers->make_request(mddev, bio); 293 294 cpu = part_stat_lock(); 295 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 296 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); 297 part_stat_unlock(); 298 299 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 300 wake_up(&mddev->sb_wait); 301 } 302 303 /* mddev_suspend makes sure no new requests are submitted 304 * to the device, and that any requests that have been submitted 305 * are completely handled. 306 * Once mddev_detach() is called and completes, the module will be 307 * completely unused. 308 */ 309 void mddev_suspend(struct mddev *mddev) 310 { 311 BUG_ON(mddev->suspended); 312 mddev->suspended = 1; 313 synchronize_rcu(); 314 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 315 mddev->pers->quiesce(mddev, 1); 316 317 del_timer_sync(&mddev->safemode_timer); 318 } 319 EXPORT_SYMBOL_GPL(mddev_suspend); 320 321 void mddev_resume(struct mddev *mddev) 322 { 323 mddev->suspended = 0; 324 wake_up(&mddev->sb_wait); 325 mddev->pers->quiesce(mddev, 0); 326 327 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 328 md_wakeup_thread(mddev->thread); 329 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 330 } 331 EXPORT_SYMBOL_GPL(mddev_resume); 332 333 int mddev_congested(struct mddev *mddev, int bits) 334 { 335 struct md_personality *pers = mddev->pers; 336 int ret = 0; 337 338 rcu_read_lock(); 339 if (mddev->suspended) 340 ret = 1; 341 else if (pers && pers->congested) 342 ret = pers->congested(mddev, bits); 343 rcu_read_unlock(); 344 return ret; 345 } 346 EXPORT_SYMBOL_GPL(mddev_congested); 347 static int md_congested(void *data, int bits) 348 { 349 struct mddev *mddev = data; 350 return mddev_congested(mddev, bits); 351 } 352 353 static int md_mergeable_bvec(struct request_queue *q, 354 struct bvec_merge_data *bvm, 355 struct bio_vec *biovec) 356 { 357 struct mddev *mddev = q->queuedata; 358 int ret; 359 rcu_read_lock(); 360 if (mddev->suspended) { 361 /* Must always allow one vec */ 362 if (bvm->bi_size == 0) 363 ret = biovec->bv_len; 364 else 365 ret = 0; 366 } else { 367 struct md_personality *pers = mddev->pers; 368 if (pers && pers->mergeable_bvec) 369 ret = pers->mergeable_bvec(mddev, bvm, biovec); 370 else 371 ret = biovec->bv_len; 372 } 373 rcu_read_unlock(); 374 return ret; 375 } 376 /* 377 * Generic flush handling for md 378 */ 379 380 static void md_end_flush(struct bio *bio, int err) 381 { 382 struct md_rdev *rdev = bio->bi_private; 383 struct mddev *mddev = rdev->mddev; 384 385 rdev_dec_pending(rdev, mddev); 386 387 if (atomic_dec_and_test(&mddev->flush_pending)) { 388 /* The pre-request flush has finished */ 389 queue_work(md_wq, &mddev->flush_work); 390 } 391 bio_put(bio); 392 } 393 394 static void md_submit_flush_data(struct work_struct *ws); 395 396 static void submit_flushes(struct work_struct *ws) 397 { 398 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 399 struct md_rdev *rdev; 400 401 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 402 atomic_set(&mddev->flush_pending, 1); 403 rcu_read_lock(); 404 rdev_for_each_rcu(rdev, mddev) 405 if (rdev->raid_disk >= 0 && 406 !test_bit(Faulty, &rdev->flags)) { 407 /* Take two references, one is dropped 408 * when request finishes, one after 409 * we reclaim rcu_read_lock 410 */ 411 struct bio *bi; 412 atomic_inc(&rdev->nr_pending); 413 atomic_inc(&rdev->nr_pending); 414 rcu_read_unlock(); 415 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); 416 bi->bi_end_io = md_end_flush; 417 bi->bi_private = rdev; 418 bi->bi_bdev = rdev->bdev; 419 atomic_inc(&mddev->flush_pending); 420 submit_bio(WRITE_FLUSH, bi); 421 rcu_read_lock(); 422 rdev_dec_pending(rdev, mddev); 423 } 424 rcu_read_unlock(); 425 if (atomic_dec_and_test(&mddev->flush_pending)) 426 queue_work(md_wq, &mddev->flush_work); 427 } 428 429 static void md_submit_flush_data(struct work_struct *ws) 430 { 431 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 432 struct bio *bio = mddev->flush_bio; 433 434 if (bio->bi_iter.bi_size == 0) 435 /* an empty barrier - all done */ 436 bio_endio(bio, 0); 437 else { 438 bio->bi_rw &= ~REQ_FLUSH; 439 mddev->pers->make_request(mddev, bio); 440 } 441 442 mddev->flush_bio = NULL; 443 wake_up(&mddev->sb_wait); 444 } 445 446 void md_flush_request(struct mddev *mddev, struct bio *bio) 447 { 448 spin_lock_irq(&mddev->lock); 449 wait_event_lock_irq(mddev->sb_wait, 450 !mddev->flush_bio, 451 mddev->lock); 452 mddev->flush_bio = bio; 453 spin_unlock_irq(&mddev->lock); 454 455 INIT_WORK(&mddev->flush_work, submit_flushes); 456 queue_work(md_wq, &mddev->flush_work); 457 } 458 EXPORT_SYMBOL(md_flush_request); 459 460 void md_unplug(struct blk_plug_cb *cb, bool from_schedule) 461 { 462 struct mddev *mddev = cb->data; 463 md_wakeup_thread(mddev->thread); 464 kfree(cb); 465 } 466 EXPORT_SYMBOL(md_unplug); 467 468 static inline struct mddev *mddev_get(struct mddev *mddev) 469 { 470 atomic_inc(&mddev->active); 471 return mddev; 472 } 473 474 static void mddev_delayed_delete(struct work_struct *ws); 475 476 static void mddev_put(struct mddev *mddev) 477 { 478 struct bio_set *bs = NULL; 479 480 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 481 return; 482 if (!mddev->raid_disks && list_empty(&mddev->disks) && 483 mddev->ctime == 0 && !mddev->hold_active) { 484 /* Array is not configured at all, and not held active, 485 * so destroy it */ 486 list_del_init(&mddev->all_mddevs); 487 bs = mddev->bio_set; 488 mddev->bio_set = NULL; 489 if (mddev->gendisk) { 490 /* We did a probe so need to clean up. Call 491 * queue_work inside the spinlock so that 492 * flush_workqueue() after mddev_find will 493 * succeed in waiting for the work to be done. 494 */ 495 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 496 queue_work(md_misc_wq, &mddev->del_work); 497 } else 498 kfree(mddev); 499 } 500 spin_unlock(&all_mddevs_lock); 501 if (bs) 502 bioset_free(bs); 503 } 504 505 void mddev_init(struct mddev *mddev) 506 { 507 mutex_init(&mddev->open_mutex); 508 mutex_init(&mddev->reconfig_mutex); 509 mutex_init(&mddev->bitmap_info.mutex); 510 INIT_LIST_HEAD(&mddev->disks); 511 INIT_LIST_HEAD(&mddev->all_mddevs); 512 init_timer(&mddev->safemode_timer); 513 atomic_set(&mddev->active, 1); 514 atomic_set(&mddev->openers, 0); 515 atomic_set(&mddev->active_io, 0); 516 spin_lock_init(&mddev->lock); 517 atomic_set(&mddev->flush_pending, 0); 518 init_waitqueue_head(&mddev->sb_wait); 519 init_waitqueue_head(&mddev->recovery_wait); 520 mddev->reshape_position = MaxSector; 521 mddev->reshape_backwards = 0; 522 mddev->last_sync_action = "none"; 523 mddev->resync_min = 0; 524 mddev->resync_max = MaxSector; 525 mddev->level = LEVEL_NONE; 526 } 527 EXPORT_SYMBOL_GPL(mddev_init); 528 529 static struct mddev *mddev_find(dev_t unit) 530 { 531 struct mddev *mddev, *new = NULL; 532 533 if (unit && MAJOR(unit) != MD_MAJOR) 534 unit &= ~((1<<MdpMinorShift)-1); 535 536 retry: 537 spin_lock(&all_mddevs_lock); 538 539 if (unit) { 540 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 541 if (mddev->unit == unit) { 542 mddev_get(mddev); 543 spin_unlock(&all_mddevs_lock); 544 kfree(new); 545 return mddev; 546 } 547 548 if (new) { 549 list_add(&new->all_mddevs, &all_mddevs); 550 spin_unlock(&all_mddevs_lock); 551 new->hold_active = UNTIL_IOCTL; 552 return new; 553 } 554 } else if (new) { 555 /* find an unused unit number */ 556 static int next_minor = 512; 557 int start = next_minor; 558 int is_free = 0; 559 int dev = 0; 560 while (!is_free) { 561 dev = MKDEV(MD_MAJOR, next_minor); 562 next_minor++; 563 if (next_minor > MINORMASK) 564 next_minor = 0; 565 if (next_minor == start) { 566 /* Oh dear, all in use. */ 567 spin_unlock(&all_mddevs_lock); 568 kfree(new); 569 return NULL; 570 } 571 572 is_free = 1; 573 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 574 if (mddev->unit == dev) { 575 is_free = 0; 576 break; 577 } 578 } 579 new->unit = dev; 580 new->md_minor = MINOR(dev); 581 new->hold_active = UNTIL_STOP; 582 list_add(&new->all_mddevs, &all_mddevs); 583 spin_unlock(&all_mddevs_lock); 584 return new; 585 } 586 spin_unlock(&all_mddevs_lock); 587 588 new = kzalloc(sizeof(*new), GFP_KERNEL); 589 if (!new) 590 return NULL; 591 592 new->unit = unit; 593 if (MAJOR(unit) == MD_MAJOR) 594 new->md_minor = MINOR(unit); 595 else 596 new->md_minor = MINOR(unit) >> MdpMinorShift; 597 598 mddev_init(new); 599 600 goto retry; 601 } 602 603 static struct attribute_group md_redundancy_group; 604 605 void mddev_unlock(struct mddev *mddev) 606 { 607 if (mddev->to_remove) { 608 /* These cannot be removed under reconfig_mutex as 609 * an access to the files will try to take reconfig_mutex 610 * while holding the file unremovable, which leads to 611 * a deadlock. 612 * So hold set sysfs_active while the remove in happeing, 613 * and anything else which might set ->to_remove or my 614 * otherwise change the sysfs namespace will fail with 615 * -EBUSY if sysfs_active is still set. 616 * We set sysfs_active under reconfig_mutex and elsewhere 617 * test it under the same mutex to ensure its correct value 618 * is seen. 619 */ 620 struct attribute_group *to_remove = mddev->to_remove; 621 mddev->to_remove = NULL; 622 mddev->sysfs_active = 1; 623 mutex_unlock(&mddev->reconfig_mutex); 624 625 if (mddev->kobj.sd) { 626 if (to_remove != &md_redundancy_group) 627 sysfs_remove_group(&mddev->kobj, to_remove); 628 if (mddev->pers == NULL || 629 mddev->pers->sync_request == NULL) { 630 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 631 if (mddev->sysfs_action) 632 sysfs_put(mddev->sysfs_action); 633 mddev->sysfs_action = NULL; 634 } 635 } 636 mddev->sysfs_active = 0; 637 } else 638 mutex_unlock(&mddev->reconfig_mutex); 639 640 /* As we've dropped the mutex we need a spinlock to 641 * make sure the thread doesn't disappear 642 */ 643 spin_lock(&pers_lock); 644 md_wakeup_thread(mddev->thread); 645 spin_unlock(&pers_lock); 646 } 647 EXPORT_SYMBOL_GPL(mddev_unlock); 648 649 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) 650 { 651 struct md_rdev *rdev; 652 653 rdev_for_each_rcu(rdev, mddev) 654 if (rdev->desc_nr == nr) 655 return rdev; 656 657 return NULL; 658 } 659 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu); 660 661 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) 662 { 663 struct md_rdev *rdev; 664 665 rdev_for_each(rdev, mddev) 666 if (rdev->bdev->bd_dev == dev) 667 return rdev; 668 669 return NULL; 670 } 671 672 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev) 673 { 674 struct md_rdev *rdev; 675 676 rdev_for_each_rcu(rdev, mddev) 677 if (rdev->bdev->bd_dev == dev) 678 return rdev; 679 680 return NULL; 681 } 682 683 static struct md_personality *find_pers(int level, char *clevel) 684 { 685 struct md_personality *pers; 686 list_for_each_entry(pers, &pers_list, list) { 687 if (level != LEVEL_NONE && pers->level == level) 688 return pers; 689 if (strcmp(pers->name, clevel)==0) 690 return pers; 691 } 692 return NULL; 693 } 694 695 /* return the offset of the super block in 512byte sectors */ 696 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) 697 { 698 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; 699 return MD_NEW_SIZE_SECTORS(num_sectors); 700 } 701 702 static int alloc_disk_sb(struct md_rdev *rdev) 703 { 704 rdev->sb_page = alloc_page(GFP_KERNEL); 705 if (!rdev->sb_page) { 706 printk(KERN_ALERT "md: out of memory.\n"); 707 return -ENOMEM; 708 } 709 710 return 0; 711 } 712 713 void md_rdev_clear(struct md_rdev *rdev) 714 { 715 if (rdev->sb_page) { 716 put_page(rdev->sb_page); 717 rdev->sb_loaded = 0; 718 rdev->sb_page = NULL; 719 rdev->sb_start = 0; 720 rdev->sectors = 0; 721 } 722 if (rdev->bb_page) { 723 put_page(rdev->bb_page); 724 rdev->bb_page = NULL; 725 } 726 kfree(rdev->badblocks.page); 727 rdev->badblocks.page = NULL; 728 } 729 EXPORT_SYMBOL_GPL(md_rdev_clear); 730 731 static void super_written(struct bio *bio, int error) 732 { 733 struct md_rdev *rdev = bio->bi_private; 734 struct mddev *mddev = rdev->mddev; 735 736 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 737 printk("md: super_written gets error=%d, uptodate=%d\n", 738 error, test_bit(BIO_UPTODATE, &bio->bi_flags)); 739 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags)); 740 md_error(mddev, rdev); 741 } 742 743 if (atomic_dec_and_test(&mddev->pending_writes)) 744 wake_up(&mddev->sb_wait); 745 bio_put(bio); 746 } 747 748 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, 749 sector_t sector, int size, struct page *page) 750 { 751 /* write first size bytes of page to sector of rdev 752 * Increment mddev->pending_writes before returning 753 * and decrement it on completion, waking up sb_wait 754 * if zero is reached. 755 * If an error occurred, call md_error 756 */ 757 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 758 759 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 760 bio->bi_iter.bi_sector = sector; 761 bio_add_page(bio, page, size, 0); 762 bio->bi_private = rdev; 763 bio->bi_end_io = super_written; 764 765 atomic_inc(&mddev->pending_writes); 766 submit_bio(WRITE_FLUSH_FUA, bio); 767 } 768 769 void md_super_wait(struct mddev *mddev) 770 { 771 /* wait for all superblock writes that were scheduled to complete */ 772 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); 773 } 774 775 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 776 struct page *page, int rw, bool metadata_op) 777 { 778 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); 779 int ret; 780 781 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 782 rdev->meta_bdev : rdev->bdev; 783 if (metadata_op) 784 bio->bi_iter.bi_sector = sector + rdev->sb_start; 785 else if (rdev->mddev->reshape_position != MaxSector && 786 (rdev->mddev->reshape_backwards == 787 (sector >= rdev->mddev->reshape_position))) 788 bio->bi_iter.bi_sector = sector + rdev->new_data_offset; 789 else 790 bio->bi_iter.bi_sector = sector + rdev->data_offset; 791 bio_add_page(bio, page, size, 0); 792 submit_bio_wait(rw, bio); 793 794 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 795 bio_put(bio); 796 return ret; 797 } 798 EXPORT_SYMBOL_GPL(sync_page_io); 799 800 static int read_disk_sb(struct md_rdev *rdev, int size) 801 { 802 char b[BDEVNAME_SIZE]; 803 804 if (rdev->sb_loaded) 805 return 0; 806 807 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true)) 808 goto fail; 809 rdev->sb_loaded = 1; 810 return 0; 811 812 fail: 813 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 814 bdevname(rdev->bdev,b)); 815 return -EINVAL; 816 } 817 818 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 819 { 820 return sb1->set_uuid0 == sb2->set_uuid0 && 821 sb1->set_uuid1 == sb2->set_uuid1 && 822 sb1->set_uuid2 == sb2->set_uuid2 && 823 sb1->set_uuid3 == sb2->set_uuid3; 824 } 825 826 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 827 { 828 int ret; 829 mdp_super_t *tmp1, *tmp2; 830 831 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 832 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 833 834 if (!tmp1 || !tmp2) { 835 ret = 0; 836 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n"); 837 goto abort; 838 } 839 840 *tmp1 = *sb1; 841 *tmp2 = *sb2; 842 843 /* 844 * nr_disks is not constant 845 */ 846 tmp1->nr_disks = 0; 847 tmp2->nr_disks = 0; 848 849 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 850 abort: 851 kfree(tmp1); 852 kfree(tmp2); 853 return ret; 854 } 855 856 static u32 md_csum_fold(u32 csum) 857 { 858 csum = (csum & 0xffff) + (csum >> 16); 859 return (csum & 0xffff) + (csum >> 16); 860 } 861 862 static unsigned int calc_sb_csum(mdp_super_t *sb) 863 { 864 u64 newcsum = 0; 865 u32 *sb32 = (u32*)sb; 866 int i; 867 unsigned int disk_csum, csum; 868 869 disk_csum = sb->sb_csum; 870 sb->sb_csum = 0; 871 872 for (i = 0; i < MD_SB_BYTES/4 ; i++) 873 newcsum += sb32[i]; 874 csum = (newcsum & 0xffffffff) + (newcsum>>32); 875 876 #ifdef CONFIG_ALPHA 877 /* This used to use csum_partial, which was wrong for several 878 * reasons including that different results are returned on 879 * different architectures. It isn't critical that we get exactly 880 * the same return value as before (we always csum_fold before 881 * testing, and that removes any differences). However as we 882 * know that csum_partial always returned a 16bit value on 883 * alphas, do a fold to maximise conformity to previous behaviour. 884 */ 885 sb->sb_csum = md_csum_fold(disk_csum); 886 #else 887 sb->sb_csum = disk_csum; 888 #endif 889 return csum; 890 } 891 892 /* 893 * Handle superblock details. 894 * We want to be able to handle multiple superblock formats 895 * so we have a common interface to them all, and an array of 896 * different handlers. 897 * We rely on user-space to write the initial superblock, and support 898 * reading and updating of superblocks. 899 * Interface methods are: 900 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version) 901 * loads and validates a superblock on dev. 902 * if refdev != NULL, compare superblocks on both devices 903 * Return: 904 * 0 - dev has a superblock that is compatible with refdev 905 * 1 - dev has a superblock that is compatible and newer than refdev 906 * so dev should be used as the refdev in future 907 * -EINVAL superblock incompatible or invalid 908 * -othererror e.g. -EIO 909 * 910 * int validate_super(struct mddev *mddev, struct md_rdev *dev) 911 * Verify that dev is acceptable into mddev. 912 * The first time, mddev->raid_disks will be 0, and data from 913 * dev should be merged in. Subsequent calls check that dev 914 * is new enough. Return 0 or -EINVAL 915 * 916 * void sync_super(struct mddev *mddev, struct md_rdev *dev) 917 * Update the superblock for rdev with data in mddev 918 * This does not write to disc. 919 * 920 */ 921 922 struct super_type { 923 char *name; 924 struct module *owner; 925 int (*load_super)(struct md_rdev *rdev, 926 struct md_rdev *refdev, 927 int minor_version); 928 int (*validate_super)(struct mddev *mddev, 929 struct md_rdev *rdev); 930 void (*sync_super)(struct mddev *mddev, 931 struct md_rdev *rdev); 932 unsigned long long (*rdev_size_change)(struct md_rdev *rdev, 933 sector_t num_sectors); 934 int (*allow_new_offset)(struct md_rdev *rdev, 935 unsigned long long new_offset); 936 }; 937 938 /* 939 * Check that the given mddev has no bitmap. 940 * 941 * This function is called from the run method of all personalities that do not 942 * support bitmaps. It prints an error message and returns non-zero if mddev 943 * has a bitmap. Otherwise, it returns 0. 944 * 945 */ 946 int md_check_no_bitmap(struct mddev *mddev) 947 { 948 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 949 return 0; 950 printk(KERN_ERR "%s: bitmaps are not supported for %s\n", 951 mdname(mddev), mddev->pers->name); 952 return 1; 953 } 954 EXPORT_SYMBOL(md_check_no_bitmap); 955 956 /* 957 * load_super for 0.90.0 958 */ 959 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 960 { 961 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 962 mdp_super_t *sb; 963 int ret; 964 965 /* 966 * Calculate the position of the superblock (512byte sectors), 967 * it's at the end of the disk. 968 * 969 * It also happens to be a multiple of 4Kb. 970 */ 971 rdev->sb_start = calc_dev_sboffset(rdev); 972 973 ret = read_disk_sb(rdev, MD_SB_BYTES); 974 if (ret) return ret; 975 976 ret = -EINVAL; 977 978 bdevname(rdev->bdev, b); 979 sb = page_address(rdev->sb_page); 980 981 if (sb->md_magic != MD_SB_MAGIC) { 982 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 983 b); 984 goto abort; 985 } 986 987 if (sb->major_version != 0 || 988 sb->minor_version < 90 || 989 sb->minor_version > 91) { 990 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 991 sb->major_version, sb->minor_version, 992 b); 993 goto abort; 994 } 995 996 if (sb->raid_disks <= 0) 997 goto abort; 998 999 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 1000 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 1001 b); 1002 goto abort; 1003 } 1004 1005 rdev->preferred_minor = sb->md_minor; 1006 rdev->data_offset = 0; 1007 rdev->new_data_offset = 0; 1008 rdev->sb_size = MD_SB_BYTES; 1009 rdev->badblocks.shift = -1; 1010 1011 if (sb->level == LEVEL_MULTIPATH) 1012 rdev->desc_nr = -1; 1013 else 1014 rdev->desc_nr = sb->this_disk.number; 1015 1016 if (!refdev) { 1017 ret = 1; 1018 } else { 1019 __u64 ev1, ev2; 1020 mdp_super_t *refsb = page_address(refdev->sb_page); 1021 if (!uuid_equal(refsb, sb)) { 1022 printk(KERN_WARNING "md: %s has different UUID to %s\n", 1023 b, bdevname(refdev->bdev,b2)); 1024 goto abort; 1025 } 1026 if (!sb_equal(refsb, sb)) { 1027 printk(KERN_WARNING "md: %s has same UUID" 1028 " but different superblock to %s\n", 1029 b, bdevname(refdev->bdev, b2)); 1030 goto abort; 1031 } 1032 ev1 = md_event(sb); 1033 ev2 = md_event(refsb); 1034 if (ev1 > ev2) 1035 ret = 1; 1036 else 1037 ret = 0; 1038 } 1039 rdev->sectors = rdev->sb_start; 1040 /* Limit to 4TB as metadata cannot record more than that. 1041 * (not needed for Linear and RAID0 as metadata doesn't 1042 * record this size) 1043 */ 1044 if (rdev->sectors >= (2ULL << 32) && sb->level >= 1) 1045 rdev->sectors = (2ULL << 32) - 2; 1046 1047 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) 1048 /* "this cannot possibly happen" ... */ 1049 ret = -EINVAL; 1050 1051 abort: 1052 return ret; 1053 } 1054 1055 /* 1056 * validate_super for 0.90.0 1057 */ 1058 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) 1059 { 1060 mdp_disk_t *desc; 1061 mdp_super_t *sb = page_address(rdev->sb_page); 1062 __u64 ev1 = md_event(sb); 1063 1064 rdev->raid_disk = -1; 1065 clear_bit(Faulty, &rdev->flags); 1066 clear_bit(In_sync, &rdev->flags); 1067 clear_bit(Bitmap_sync, &rdev->flags); 1068 clear_bit(WriteMostly, &rdev->flags); 1069 1070 if (mddev->raid_disks == 0) { 1071 mddev->major_version = 0; 1072 mddev->minor_version = sb->minor_version; 1073 mddev->patch_version = sb->patch_version; 1074 mddev->external = 0; 1075 mddev->chunk_sectors = sb->chunk_size >> 9; 1076 mddev->ctime = sb->ctime; 1077 mddev->utime = sb->utime; 1078 mddev->level = sb->level; 1079 mddev->clevel[0] = 0; 1080 mddev->layout = sb->layout; 1081 mddev->raid_disks = sb->raid_disks; 1082 mddev->dev_sectors = ((sector_t)sb->size) * 2; 1083 mddev->events = ev1; 1084 mddev->bitmap_info.offset = 0; 1085 mddev->bitmap_info.space = 0; 1086 /* bitmap can use 60 K after the 4K superblocks */ 1087 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1088 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 1089 mddev->reshape_backwards = 0; 1090 1091 if (mddev->minor_version >= 91) { 1092 mddev->reshape_position = sb->reshape_position; 1093 mddev->delta_disks = sb->delta_disks; 1094 mddev->new_level = sb->new_level; 1095 mddev->new_layout = sb->new_layout; 1096 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1097 if (mddev->delta_disks < 0) 1098 mddev->reshape_backwards = 1; 1099 } else { 1100 mddev->reshape_position = MaxSector; 1101 mddev->delta_disks = 0; 1102 mddev->new_level = mddev->level; 1103 mddev->new_layout = mddev->layout; 1104 mddev->new_chunk_sectors = mddev->chunk_sectors; 1105 } 1106 1107 if (sb->state & (1<<MD_SB_CLEAN)) 1108 mddev->recovery_cp = MaxSector; 1109 else { 1110 if (sb->events_hi == sb->cp_events_hi && 1111 sb->events_lo == sb->cp_events_lo) { 1112 mddev->recovery_cp = sb->recovery_cp; 1113 } else 1114 mddev->recovery_cp = 0; 1115 } 1116 1117 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1118 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1119 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1120 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1121 1122 mddev->max_disks = MD_SB_DISKS; 1123 1124 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1125 mddev->bitmap_info.file == NULL) { 1126 mddev->bitmap_info.offset = 1127 mddev->bitmap_info.default_offset; 1128 mddev->bitmap_info.space = 1129 mddev->bitmap_info.default_space; 1130 } 1131 1132 } else if (mddev->pers == NULL) { 1133 /* Insist on good event counter while assembling, except 1134 * for spares (which don't need an event count) */ 1135 ++ev1; 1136 if (sb->disks[rdev->desc_nr].state & ( 1137 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1138 if (ev1 < mddev->events) 1139 return -EINVAL; 1140 } else if (mddev->bitmap) { 1141 /* if adding to array with a bitmap, then we can accept an 1142 * older device ... but not too old. 1143 */ 1144 if (ev1 < mddev->bitmap->events_cleared) 1145 return 0; 1146 if (ev1 < mddev->events) 1147 set_bit(Bitmap_sync, &rdev->flags); 1148 } else { 1149 if (ev1 < mddev->events) 1150 /* just a hot-add of a new device, leave raid_disk at -1 */ 1151 return 0; 1152 } 1153 1154 if (mddev->level != LEVEL_MULTIPATH) { 1155 desc = sb->disks + rdev->desc_nr; 1156 1157 if (desc->state & (1<<MD_DISK_FAULTY)) 1158 set_bit(Faulty, &rdev->flags); 1159 else if (desc->state & (1<<MD_DISK_SYNC) /* && 1160 desc->raid_disk < mddev->raid_disks */) { 1161 set_bit(In_sync, &rdev->flags); 1162 rdev->raid_disk = desc->raid_disk; 1163 rdev->saved_raid_disk = desc->raid_disk; 1164 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1165 /* active but not in sync implies recovery up to 1166 * reshape position. We don't know exactly where 1167 * that is, so set to zero for now */ 1168 if (mddev->minor_version >= 91) { 1169 rdev->recovery_offset = 0; 1170 rdev->raid_disk = desc->raid_disk; 1171 } 1172 } 1173 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1174 set_bit(WriteMostly, &rdev->flags); 1175 } else /* MULTIPATH are always insync */ 1176 set_bit(In_sync, &rdev->flags); 1177 return 0; 1178 } 1179 1180 /* 1181 * sync_super for 0.90.0 1182 */ 1183 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) 1184 { 1185 mdp_super_t *sb; 1186 struct md_rdev *rdev2; 1187 int next_spare = mddev->raid_disks; 1188 1189 /* make rdev->sb match mddev data.. 1190 * 1191 * 1/ zero out disks 1192 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1193 * 3/ any empty disks < next_spare become removed 1194 * 1195 * disks[0] gets initialised to REMOVED because 1196 * we cannot be sure from other fields if it has 1197 * been initialised or not. 1198 */ 1199 int i; 1200 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1201 1202 rdev->sb_size = MD_SB_BYTES; 1203 1204 sb = page_address(rdev->sb_page); 1205 1206 memset(sb, 0, sizeof(*sb)); 1207 1208 sb->md_magic = MD_SB_MAGIC; 1209 sb->major_version = mddev->major_version; 1210 sb->patch_version = mddev->patch_version; 1211 sb->gvalid_words = 0; /* ignored */ 1212 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1213 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1214 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1215 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1216 1217 sb->ctime = mddev->ctime; 1218 sb->level = mddev->level; 1219 sb->size = mddev->dev_sectors / 2; 1220 sb->raid_disks = mddev->raid_disks; 1221 sb->md_minor = mddev->md_minor; 1222 sb->not_persistent = 0; 1223 sb->utime = mddev->utime; 1224 sb->state = 0; 1225 sb->events_hi = (mddev->events>>32); 1226 sb->events_lo = (u32)mddev->events; 1227 1228 if (mddev->reshape_position == MaxSector) 1229 sb->minor_version = 90; 1230 else { 1231 sb->minor_version = 91; 1232 sb->reshape_position = mddev->reshape_position; 1233 sb->new_level = mddev->new_level; 1234 sb->delta_disks = mddev->delta_disks; 1235 sb->new_layout = mddev->new_layout; 1236 sb->new_chunk = mddev->new_chunk_sectors << 9; 1237 } 1238 mddev->minor_version = sb->minor_version; 1239 if (mddev->in_sync) 1240 { 1241 sb->recovery_cp = mddev->recovery_cp; 1242 sb->cp_events_hi = (mddev->events>>32); 1243 sb->cp_events_lo = (u32)mddev->events; 1244 if (mddev->recovery_cp == MaxSector) 1245 sb->state = (1<< MD_SB_CLEAN); 1246 } else 1247 sb->recovery_cp = 0; 1248 1249 sb->layout = mddev->layout; 1250 sb->chunk_size = mddev->chunk_sectors << 9; 1251 1252 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1253 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1254 1255 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1256 rdev_for_each(rdev2, mddev) { 1257 mdp_disk_t *d; 1258 int desc_nr; 1259 int is_active = test_bit(In_sync, &rdev2->flags); 1260 1261 if (rdev2->raid_disk >= 0 && 1262 sb->minor_version >= 91) 1263 /* we have nowhere to store the recovery_offset, 1264 * but if it is not below the reshape_position, 1265 * we can piggy-back on that. 1266 */ 1267 is_active = 1; 1268 if (rdev2->raid_disk < 0 || 1269 test_bit(Faulty, &rdev2->flags)) 1270 is_active = 0; 1271 if (is_active) 1272 desc_nr = rdev2->raid_disk; 1273 else 1274 desc_nr = next_spare++; 1275 rdev2->desc_nr = desc_nr; 1276 d = &sb->disks[rdev2->desc_nr]; 1277 nr_disks++; 1278 d->number = rdev2->desc_nr; 1279 d->major = MAJOR(rdev2->bdev->bd_dev); 1280 d->minor = MINOR(rdev2->bdev->bd_dev); 1281 if (is_active) 1282 d->raid_disk = rdev2->raid_disk; 1283 else 1284 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1285 if (test_bit(Faulty, &rdev2->flags)) 1286 d->state = (1<<MD_DISK_FAULTY); 1287 else if (is_active) { 1288 d->state = (1<<MD_DISK_ACTIVE); 1289 if (test_bit(In_sync, &rdev2->flags)) 1290 d->state |= (1<<MD_DISK_SYNC); 1291 active++; 1292 working++; 1293 } else { 1294 d->state = 0; 1295 spare++; 1296 working++; 1297 } 1298 if (test_bit(WriteMostly, &rdev2->flags)) 1299 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1300 } 1301 /* now set the "removed" and "faulty" bits on any missing devices */ 1302 for (i=0 ; i < mddev->raid_disks ; i++) { 1303 mdp_disk_t *d = &sb->disks[i]; 1304 if (d->state == 0 && d->number == 0) { 1305 d->number = i; 1306 d->raid_disk = i; 1307 d->state = (1<<MD_DISK_REMOVED); 1308 d->state |= (1<<MD_DISK_FAULTY); 1309 failed++; 1310 } 1311 } 1312 sb->nr_disks = nr_disks; 1313 sb->active_disks = active; 1314 sb->working_disks = working; 1315 sb->failed_disks = failed; 1316 sb->spare_disks = spare; 1317 1318 sb->this_disk = sb->disks[rdev->desc_nr]; 1319 sb->sb_csum = calc_sb_csum(sb); 1320 } 1321 1322 /* 1323 * rdev_size_change for 0.90.0 1324 */ 1325 static unsigned long long 1326 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1327 { 1328 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1329 return 0; /* component must fit device */ 1330 if (rdev->mddev->bitmap_info.offset) 1331 return 0; /* can't move bitmap */ 1332 rdev->sb_start = calc_dev_sboffset(rdev); 1333 if (!num_sectors || num_sectors > rdev->sb_start) 1334 num_sectors = rdev->sb_start; 1335 /* Limit to 4TB as metadata cannot record more than that. 1336 * 4TB == 2^32 KB, or 2*2^32 sectors. 1337 */ 1338 if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) 1339 num_sectors = (2ULL << 32) - 2; 1340 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1341 rdev->sb_page); 1342 md_super_wait(rdev->mddev); 1343 return num_sectors; 1344 } 1345 1346 static int 1347 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) 1348 { 1349 /* non-zero offset changes not possible with v0.90 */ 1350 return new_offset == 0; 1351 } 1352 1353 /* 1354 * version 1 superblock 1355 */ 1356 1357 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb) 1358 { 1359 __le32 disk_csum; 1360 u32 csum; 1361 unsigned long long newcsum; 1362 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1363 __le32 *isuper = (__le32*)sb; 1364 1365 disk_csum = sb->sb_csum; 1366 sb->sb_csum = 0; 1367 newcsum = 0; 1368 for (; size >= 4; size -= 4) 1369 newcsum += le32_to_cpu(*isuper++); 1370 1371 if (size == 2) 1372 newcsum += le16_to_cpu(*(__le16*) isuper); 1373 1374 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1375 sb->sb_csum = disk_csum; 1376 return cpu_to_le32(csum); 1377 } 1378 1379 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, 1380 int acknowledged); 1381 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1382 { 1383 struct mdp_superblock_1 *sb; 1384 int ret; 1385 sector_t sb_start; 1386 sector_t sectors; 1387 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1388 int bmask; 1389 1390 /* 1391 * Calculate the position of the superblock in 512byte sectors. 1392 * It is always aligned to a 4K boundary and 1393 * depeding on minor_version, it can be: 1394 * 0: At least 8K, but less than 12K, from end of device 1395 * 1: At start of device 1396 * 2: 4K from start of device. 1397 */ 1398 switch(minor_version) { 1399 case 0: 1400 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; 1401 sb_start -= 8*2; 1402 sb_start &= ~(sector_t)(4*2-1); 1403 break; 1404 case 1: 1405 sb_start = 0; 1406 break; 1407 case 2: 1408 sb_start = 8; 1409 break; 1410 default: 1411 return -EINVAL; 1412 } 1413 rdev->sb_start = sb_start; 1414 1415 /* superblock is rarely larger than 1K, but it can be larger, 1416 * and it is safe to read 4k, so we do that 1417 */ 1418 ret = read_disk_sb(rdev, 4096); 1419 if (ret) return ret; 1420 1421 sb = page_address(rdev->sb_page); 1422 1423 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1424 sb->major_version != cpu_to_le32(1) || 1425 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1426 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1427 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1428 return -EINVAL; 1429 1430 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1431 printk("md: invalid superblock checksum on %s\n", 1432 bdevname(rdev->bdev,b)); 1433 return -EINVAL; 1434 } 1435 if (le64_to_cpu(sb->data_size) < 10) { 1436 printk("md: data_size too small on %s\n", 1437 bdevname(rdev->bdev,b)); 1438 return -EINVAL; 1439 } 1440 if (sb->pad0 || 1441 sb->pad3[0] || 1442 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) 1443 /* Some padding is non-zero, might be a new feature */ 1444 return -EINVAL; 1445 1446 rdev->preferred_minor = 0xffff; 1447 rdev->data_offset = le64_to_cpu(sb->data_offset); 1448 rdev->new_data_offset = rdev->data_offset; 1449 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) && 1450 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) 1451 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); 1452 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1453 1454 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1455 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1456 if (rdev->sb_size & bmask) 1457 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1458 1459 if (minor_version 1460 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1461 return -EINVAL; 1462 if (minor_version 1463 && rdev->new_data_offset < sb_start + (rdev->sb_size/512)) 1464 return -EINVAL; 1465 1466 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1467 rdev->desc_nr = -1; 1468 else 1469 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1470 1471 if (!rdev->bb_page) { 1472 rdev->bb_page = alloc_page(GFP_KERNEL); 1473 if (!rdev->bb_page) 1474 return -ENOMEM; 1475 } 1476 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && 1477 rdev->badblocks.count == 0) { 1478 /* need to load the bad block list. 1479 * Currently we limit it to one page. 1480 */ 1481 s32 offset; 1482 sector_t bb_sector; 1483 u64 *bbp; 1484 int i; 1485 int sectors = le16_to_cpu(sb->bblog_size); 1486 if (sectors > (PAGE_SIZE / 512)) 1487 return -EINVAL; 1488 offset = le32_to_cpu(sb->bblog_offset); 1489 if (offset == 0) 1490 return -EINVAL; 1491 bb_sector = (long long)offset; 1492 if (!sync_page_io(rdev, bb_sector, sectors << 9, 1493 rdev->bb_page, READ, true)) 1494 return -EIO; 1495 bbp = (u64 *)page_address(rdev->bb_page); 1496 rdev->badblocks.shift = sb->bblog_shift; 1497 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { 1498 u64 bb = le64_to_cpu(*bbp); 1499 int count = bb & (0x3ff); 1500 u64 sector = bb >> 10; 1501 sector <<= sb->bblog_shift; 1502 count <<= sb->bblog_shift; 1503 if (bb + 1 == 0) 1504 break; 1505 if (md_set_badblocks(&rdev->badblocks, 1506 sector, count, 1) == 0) 1507 return -EINVAL; 1508 } 1509 } else if (sb->bblog_offset != 0) 1510 rdev->badblocks.shift = 0; 1511 1512 if (!refdev) { 1513 ret = 1; 1514 } else { 1515 __u64 ev1, ev2; 1516 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); 1517 1518 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1519 sb->level != refsb->level || 1520 sb->layout != refsb->layout || 1521 sb->chunksize != refsb->chunksize) { 1522 printk(KERN_WARNING "md: %s has strangely different" 1523 " superblock to %s\n", 1524 bdevname(rdev->bdev,b), 1525 bdevname(refdev->bdev,b2)); 1526 return -EINVAL; 1527 } 1528 ev1 = le64_to_cpu(sb->events); 1529 ev2 = le64_to_cpu(refsb->events); 1530 1531 if (ev1 > ev2) 1532 ret = 1; 1533 else 1534 ret = 0; 1535 } 1536 if (minor_version) { 1537 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); 1538 sectors -= rdev->data_offset; 1539 } else 1540 sectors = rdev->sb_start; 1541 if (sectors < le64_to_cpu(sb->data_size)) 1542 return -EINVAL; 1543 rdev->sectors = le64_to_cpu(sb->data_size); 1544 return ret; 1545 } 1546 1547 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) 1548 { 1549 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 1550 __u64 ev1 = le64_to_cpu(sb->events); 1551 1552 rdev->raid_disk = -1; 1553 clear_bit(Faulty, &rdev->flags); 1554 clear_bit(In_sync, &rdev->flags); 1555 clear_bit(Bitmap_sync, &rdev->flags); 1556 clear_bit(WriteMostly, &rdev->flags); 1557 1558 if (mddev->raid_disks == 0) { 1559 mddev->major_version = 1; 1560 mddev->patch_version = 0; 1561 mddev->external = 0; 1562 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1563 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1564 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1565 mddev->level = le32_to_cpu(sb->level); 1566 mddev->clevel[0] = 0; 1567 mddev->layout = le32_to_cpu(sb->layout); 1568 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1569 mddev->dev_sectors = le64_to_cpu(sb->size); 1570 mddev->events = ev1; 1571 mddev->bitmap_info.offset = 0; 1572 mddev->bitmap_info.space = 0; 1573 /* Default location for bitmap is 1K after superblock 1574 * using 3K - total of 4K 1575 */ 1576 mddev->bitmap_info.default_offset = 1024 >> 9; 1577 mddev->bitmap_info.default_space = (4096-1024) >> 9; 1578 mddev->reshape_backwards = 0; 1579 1580 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1581 memcpy(mddev->uuid, sb->set_uuid, 16); 1582 1583 mddev->max_disks = (4096-256)/2; 1584 1585 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1586 mddev->bitmap_info.file == NULL) { 1587 mddev->bitmap_info.offset = 1588 (__s32)le32_to_cpu(sb->bitmap_offset); 1589 /* Metadata doesn't record how much space is available. 1590 * For 1.0, we assume we can use up to the superblock 1591 * if before, else to 4K beyond superblock. 1592 * For others, assume no change is possible. 1593 */ 1594 if (mddev->minor_version > 0) 1595 mddev->bitmap_info.space = 0; 1596 else if (mddev->bitmap_info.offset > 0) 1597 mddev->bitmap_info.space = 1598 8 - mddev->bitmap_info.offset; 1599 else 1600 mddev->bitmap_info.space = 1601 -mddev->bitmap_info.offset; 1602 } 1603 1604 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1605 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1606 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1607 mddev->new_level = le32_to_cpu(sb->new_level); 1608 mddev->new_layout = le32_to_cpu(sb->new_layout); 1609 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1610 if (mddev->delta_disks < 0 || 1611 (mddev->delta_disks == 0 && 1612 (le32_to_cpu(sb->feature_map) 1613 & MD_FEATURE_RESHAPE_BACKWARDS))) 1614 mddev->reshape_backwards = 1; 1615 } else { 1616 mddev->reshape_position = MaxSector; 1617 mddev->delta_disks = 0; 1618 mddev->new_level = mddev->level; 1619 mddev->new_layout = mddev->layout; 1620 mddev->new_chunk_sectors = mddev->chunk_sectors; 1621 } 1622 1623 } else if (mddev->pers == NULL) { 1624 /* Insist of good event counter while assembling, except for 1625 * spares (which don't need an event count) */ 1626 ++ev1; 1627 if (rdev->desc_nr >= 0 && 1628 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1629 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe) 1630 if (ev1 < mddev->events) 1631 return -EINVAL; 1632 } else if (mddev->bitmap) { 1633 /* If adding to array with a bitmap, then we can accept an 1634 * older device, but not too old. 1635 */ 1636 if (ev1 < mddev->bitmap->events_cleared) 1637 return 0; 1638 if (ev1 < mddev->events) 1639 set_bit(Bitmap_sync, &rdev->flags); 1640 } else { 1641 if (ev1 < mddev->events) 1642 /* just a hot-add of a new device, leave raid_disk at -1 */ 1643 return 0; 1644 } 1645 if (mddev->level != LEVEL_MULTIPATH) { 1646 int role; 1647 if (rdev->desc_nr < 0 || 1648 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1649 role = 0xffff; 1650 rdev->desc_nr = -1; 1651 } else 1652 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1653 switch(role) { 1654 case 0xffff: /* spare */ 1655 break; 1656 case 0xfffe: /* faulty */ 1657 set_bit(Faulty, &rdev->flags); 1658 break; 1659 default: 1660 rdev->saved_raid_disk = role; 1661 if ((le32_to_cpu(sb->feature_map) & 1662 MD_FEATURE_RECOVERY_OFFSET)) { 1663 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1664 if (!(le32_to_cpu(sb->feature_map) & 1665 MD_FEATURE_RECOVERY_BITMAP)) 1666 rdev->saved_raid_disk = -1; 1667 } else 1668 set_bit(In_sync, &rdev->flags); 1669 rdev->raid_disk = role; 1670 break; 1671 } 1672 if (sb->devflags & WriteMostly1) 1673 set_bit(WriteMostly, &rdev->flags); 1674 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) 1675 set_bit(Replacement, &rdev->flags); 1676 } else /* MULTIPATH are always insync */ 1677 set_bit(In_sync, &rdev->flags); 1678 1679 return 0; 1680 } 1681 1682 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) 1683 { 1684 struct mdp_superblock_1 *sb; 1685 struct md_rdev *rdev2; 1686 int max_dev, i; 1687 /* make rdev->sb match mddev and rdev data. */ 1688 1689 sb = page_address(rdev->sb_page); 1690 1691 sb->feature_map = 0; 1692 sb->pad0 = 0; 1693 sb->recovery_offset = cpu_to_le64(0); 1694 memset(sb->pad3, 0, sizeof(sb->pad3)); 1695 1696 sb->utime = cpu_to_le64((__u64)mddev->utime); 1697 sb->events = cpu_to_le64(mddev->events); 1698 if (mddev->in_sync) 1699 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1700 else 1701 sb->resync_offset = cpu_to_le64(0); 1702 1703 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1704 1705 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1706 sb->size = cpu_to_le64(mddev->dev_sectors); 1707 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 1708 sb->level = cpu_to_le32(mddev->level); 1709 sb->layout = cpu_to_le32(mddev->layout); 1710 1711 if (test_bit(WriteMostly, &rdev->flags)) 1712 sb->devflags |= WriteMostly1; 1713 else 1714 sb->devflags &= ~WriteMostly1; 1715 sb->data_offset = cpu_to_le64(rdev->data_offset); 1716 sb->data_size = cpu_to_le64(rdev->sectors); 1717 1718 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1719 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 1720 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1721 } 1722 1723 if (rdev->raid_disk >= 0 && 1724 !test_bit(In_sync, &rdev->flags)) { 1725 sb->feature_map |= 1726 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1727 sb->recovery_offset = 1728 cpu_to_le64(rdev->recovery_offset); 1729 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) 1730 sb->feature_map |= 1731 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP); 1732 } 1733 if (test_bit(Replacement, &rdev->flags)) 1734 sb->feature_map |= 1735 cpu_to_le32(MD_FEATURE_REPLACEMENT); 1736 1737 if (mddev->reshape_position != MaxSector) { 1738 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1739 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1740 sb->new_layout = cpu_to_le32(mddev->new_layout); 1741 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1742 sb->new_level = cpu_to_le32(mddev->new_level); 1743 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 1744 if (mddev->delta_disks == 0 && 1745 mddev->reshape_backwards) 1746 sb->feature_map 1747 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS); 1748 if (rdev->new_data_offset != rdev->data_offset) { 1749 sb->feature_map 1750 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET); 1751 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset 1752 - rdev->data_offset)); 1753 } 1754 } 1755 1756 if (rdev->badblocks.count == 0) 1757 /* Nothing to do for bad blocks*/ ; 1758 else if (sb->bblog_offset == 0) 1759 /* Cannot record bad blocks on this device */ 1760 md_error(mddev, rdev); 1761 else { 1762 struct badblocks *bb = &rdev->badblocks; 1763 u64 *bbp = (u64 *)page_address(rdev->bb_page); 1764 u64 *p = bb->page; 1765 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); 1766 if (bb->changed) { 1767 unsigned seq; 1768 1769 retry: 1770 seq = read_seqbegin(&bb->lock); 1771 1772 memset(bbp, 0xff, PAGE_SIZE); 1773 1774 for (i = 0 ; i < bb->count ; i++) { 1775 u64 internal_bb = p[i]; 1776 u64 store_bb = ((BB_OFFSET(internal_bb) << 10) 1777 | BB_LEN(internal_bb)); 1778 bbp[i] = cpu_to_le64(store_bb); 1779 } 1780 bb->changed = 0; 1781 if (read_seqretry(&bb->lock, seq)) 1782 goto retry; 1783 1784 bb->sector = (rdev->sb_start + 1785 (int)le32_to_cpu(sb->bblog_offset)); 1786 bb->size = le16_to_cpu(sb->bblog_size); 1787 } 1788 } 1789 1790 max_dev = 0; 1791 rdev_for_each(rdev2, mddev) 1792 if (rdev2->desc_nr+1 > max_dev) 1793 max_dev = rdev2->desc_nr+1; 1794 1795 if (max_dev > le32_to_cpu(sb->max_dev)) { 1796 int bmask; 1797 sb->max_dev = cpu_to_le32(max_dev); 1798 rdev->sb_size = max_dev * 2 + 256; 1799 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1800 if (rdev->sb_size & bmask) 1801 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1802 } else 1803 max_dev = le32_to_cpu(sb->max_dev); 1804 1805 for (i=0; i<max_dev;i++) 1806 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1807 1808 rdev_for_each(rdev2, mddev) { 1809 i = rdev2->desc_nr; 1810 if (test_bit(Faulty, &rdev2->flags)) 1811 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1812 else if (test_bit(In_sync, &rdev2->flags)) 1813 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1814 else if (rdev2->raid_disk >= 0) 1815 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1816 else 1817 sb->dev_roles[i] = cpu_to_le16(0xffff); 1818 } 1819 1820 sb->sb_csum = calc_sb_1_csum(sb); 1821 } 1822 1823 static unsigned long long 1824 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1825 { 1826 struct mdp_superblock_1 *sb; 1827 sector_t max_sectors; 1828 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1829 return 0; /* component must fit device */ 1830 if (rdev->data_offset != rdev->new_data_offset) 1831 return 0; /* too confusing */ 1832 if (rdev->sb_start < rdev->data_offset) { 1833 /* minor versions 1 and 2; superblock before data */ 1834 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; 1835 max_sectors -= rdev->data_offset; 1836 if (!num_sectors || num_sectors > max_sectors) 1837 num_sectors = max_sectors; 1838 } else if (rdev->mddev->bitmap_info.offset) { 1839 /* minor version 0 with bitmap we can't move */ 1840 return 0; 1841 } else { 1842 /* minor version 0; superblock after data */ 1843 sector_t sb_start; 1844 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2; 1845 sb_start &= ~(sector_t)(4*2 - 1); 1846 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 1847 if (!num_sectors || num_sectors > max_sectors) 1848 num_sectors = max_sectors; 1849 rdev->sb_start = sb_start; 1850 } 1851 sb = page_address(rdev->sb_page); 1852 sb->data_size = cpu_to_le64(num_sectors); 1853 sb->super_offset = rdev->sb_start; 1854 sb->sb_csum = calc_sb_1_csum(sb); 1855 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1856 rdev->sb_page); 1857 md_super_wait(rdev->mddev); 1858 return num_sectors; 1859 1860 } 1861 1862 static int 1863 super_1_allow_new_offset(struct md_rdev *rdev, 1864 unsigned long long new_offset) 1865 { 1866 /* All necessary checks on new >= old have been done */ 1867 struct bitmap *bitmap; 1868 if (new_offset >= rdev->data_offset) 1869 return 1; 1870 1871 /* with 1.0 metadata, there is no metadata to tread on 1872 * so we can always move back */ 1873 if (rdev->mddev->minor_version == 0) 1874 return 1; 1875 1876 /* otherwise we must be sure not to step on 1877 * any metadata, so stay: 1878 * 36K beyond start of superblock 1879 * beyond end of badblocks 1880 * beyond write-intent bitmap 1881 */ 1882 if (rdev->sb_start + (32+4)*2 > new_offset) 1883 return 0; 1884 bitmap = rdev->mddev->bitmap; 1885 if (bitmap && !rdev->mddev->bitmap_info.file && 1886 rdev->sb_start + rdev->mddev->bitmap_info.offset + 1887 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset) 1888 return 0; 1889 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) 1890 return 0; 1891 1892 return 1; 1893 } 1894 1895 static struct super_type super_types[] = { 1896 [0] = { 1897 .name = "0.90.0", 1898 .owner = THIS_MODULE, 1899 .load_super = super_90_load, 1900 .validate_super = super_90_validate, 1901 .sync_super = super_90_sync, 1902 .rdev_size_change = super_90_rdev_size_change, 1903 .allow_new_offset = super_90_allow_new_offset, 1904 }, 1905 [1] = { 1906 .name = "md-1", 1907 .owner = THIS_MODULE, 1908 .load_super = super_1_load, 1909 .validate_super = super_1_validate, 1910 .sync_super = super_1_sync, 1911 .rdev_size_change = super_1_rdev_size_change, 1912 .allow_new_offset = super_1_allow_new_offset, 1913 }, 1914 }; 1915 1916 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) 1917 { 1918 if (mddev->sync_super) { 1919 mddev->sync_super(mddev, rdev); 1920 return; 1921 } 1922 1923 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); 1924 1925 super_types[mddev->major_version].sync_super(mddev, rdev); 1926 } 1927 1928 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) 1929 { 1930 struct md_rdev *rdev, *rdev2; 1931 1932 rcu_read_lock(); 1933 rdev_for_each_rcu(rdev, mddev1) 1934 rdev_for_each_rcu(rdev2, mddev2) 1935 if (rdev->bdev->bd_contains == 1936 rdev2->bdev->bd_contains) { 1937 rcu_read_unlock(); 1938 return 1; 1939 } 1940 rcu_read_unlock(); 1941 return 0; 1942 } 1943 1944 static LIST_HEAD(pending_raid_disks); 1945 1946 /* 1947 * Try to register data integrity profile for an mddev 1948 * 1949 * This is called when an array is started and after a disk has been kicked 1950 * from the array. It only succeeds if all working and active component devices 1951 * are integrity capable with matching profiles. 1952 */ 1953 int md_integrity_register(struct mddev *mddev) 1954 { 1955 struct md_rdev *rdev, *reference = NULL; 1956 1957 if (list_empty(&mddev->disks)) 1958 return 0; /* nothing to do */ 1959 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) 1960 return 0; /* shouldn't register, or already is */ 1961 rdev_for_each(rdev, mddev) { 1962 /* skip spares and non-functional disks */ 1963 if (test_bit(Faulty, &rdev->flags)) 1964 continue; 1965 if (rdev->raid_disk < 0) 1966 continue; 1967 if (!reference) { 1968 /* Use the first rdev as the reference */ 1969 reference = rdev; 1970 continue; 1971 } 1972 /* does this rdev's profile match the reference profile? */ 1973 if (blk_integrity_compare(reference->bdev->bd_disk, 1974 rdev->bdev->bd_disk) < 0) 1975 return -EINVAL; 1976 } 1977 if (!reference || !bdev_get_integrity(reference->bdev)) 1978 return 0; 1979 /* 1980 * All component devices are integrity capable and have matching 1981 * profiles, register the common profile for the md device. 1982 */ 1983 if (blk_integrity_register(mddev->gendisk, 1984 bdev_get_integrity(reference->bdev)) != 0) { 1985 printk(KERN_ERR "md: failed to register integrity for %s\n", 1986 mdname(mddev)); 1987 return -EINVAL; 1988 } 1989 printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev)); 1990 if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) { 1991 printk(KERN_ERR "md: failed to create integrity pool for %s\n", 1992 mdname(mddev)); 1993 return -EINVAL; 1994 } 1995 return 0; 1996 } 1997 EXPORT_SYMBOL(md_integrity_register); 1998 1999 /* Disable data integrity if non-capable/non-matching disk is being added */ 2000 void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) 2001 { 2002 struct blk_integrity *bi_rdev; 2003 struct blk_integrity *bi_mddev; 2004 2005 if (!mddev->gendisk) 2006 return; 2007 2008 bi_rdev = bdev_get_integrity(rdev->bdev); 2009 bi_mddev = blk_get_integrity(mddev->gendisk); 2010 2011 if (!bi_mddev) /* nothing to do */ 2012 return; 2013 if (rdev->raid_disk < 0) /* skip spares */ 2014 return; 2015 if (bi_rdev && blk_integrity_compare(mddev->gendisk, 2016 rdev->bdev->bd_disk) >= 0) 2017 return; 2018 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev)); 2019 blk_integrity_unregister(mddev->gendisk); 2020 } 2021 EXPORT_SYMBOL(md_integrity_add_rdev); 2022 2023 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) 2024 { 2025 char b[BDEVNAME_SIZE]; 2026 struct kobject *ko; 2027 int err; 2028 2029 /* prevent duplicates */ 2030 if (find_rdev(mddev, rdev->bdev->bd_dev)) 2031 return -EEXIST; 2032 2033 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 2034 if (rdev->sectors && (mddev->dev_sectors == 0 || 2035 rdev->sectors < mddev->dev_sectors)) { 2036 if (mddev->pers) { 2037 /* Cannot change size, so fail 2038 * If mddev->level <= 0, then we don't care 2039 * about aligning sizes (e.g. linear) 2040 */ 2041 if (mddev->level > 0) 2042 return -ENOSPC; 2043 } else 2044 mddev->dev_sectors = rdev->sectors; 2045 } 2046 2047 /* Verify rdev->desc_nr is unique. 2048 * If it is -1, assign a free number, else 2049 * check number is not in use 2050 */ 2051 rcu_read_lock(); 2052 if (rdev->desc_nr < 0) { 2053 int choice = 0; 2054 if (mddev->pers) 2055 choice = mddev->raid_disks; 2056 while (md_find_rdev_nr_rcu(mddev, choice)) 2057 choice++; 2058 rdev->desc_nr = choice; 2059 } else { 2060 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { 2061 rcu_read_unlock(); 2062 return -EBUSY; 2063 } 2064 } 2065 rcu_read_unlock(); 2066 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 2067 printk(KERN_WARNING "md: %s: array is limited to %d devices\n", 2068 mdname(mddev), mddev->max_disks); 2069 return -EBUSY; 2070 } 2071 bdevname(rdev->bdev,b); 2072 strreplace(b, '/', '!'); 2073 2074 rdev->mddev = mddev; 2075 printk(KERN_INFO "md: bind<%s>\n", b); 2076 2077 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 2078 goto fail; 2079 2080 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 2081 if (sysfs_create_link(&rdev->kobj, ko, "block")) 2082 /* failure here is OK */; 2083 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 2084 2085 list_add_rcu(&rdev->same_set, &mddev->disks); 2086 bd_link_disk_holder(rdev->bdev, mddev->gendisk); 2087 2088 /* May as well allow recovery to be retried once */ 2089 mddev->recovery_disabled++; 2090 2091 return 0; 2092 2093 fail: 2094 printk(KERN_WARNING "md: failed to register dev-%s for %s\n", 2095 b, mdname(mddev)); 2096 return err; 2097 } 2098 2099 static void md_delayed_delete(struct work_struct *ws) 2100 { 2101 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); 2102 kobject_del(&rdev->kobj); 2103 kobject_put(&rdev->kobj); 2104 } 2105 2106 static void unbind_rdev_from_array(struct md_rdev *rdev) 2107 { 2108 char b[BDEVNAME_SIZE]; 2109 2110 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); 2111 list_del_rcu(&rdev->same_set); 2112 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 2113 rdev->mddev = NULL; 2114 sysfs_remove_link(&rdev->kobj, "block"); 2115 sysfs_put(rdev->sysfs_state); 2116 rdev->sysfs_state = NULL; 2117 rdev->badblocks.count = 0; 2118 /* We need to delay this, otherwise we can deadlock when 2119 * writing to 'remove' to "dev/state". We also need 2120 * to delay it due to rcu usage. 2121 */ 2122 synchronize_rcu(); 2123 INIT_WORK(&rdev->del_work, md_delayed_delete); 2124 kobject_get(&rdev->kobj); 2125 queue_work(md_misc_wq, &rdev->del_work); 2126 } 2127 2128 /* 2129 * prevent the device from being mounted, repartitioned or 2130 * otherwise reused by a RAID array (or any other kernel 2131 * subsystem), by bd_claiming the device. 2132 */ 2133 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) 2134 { 2135 int err = 0; 2136 struct block_device *bdev; 2137 char b[BDEVNAME_SIZE]; 2138 2139 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2140 shared ? (struct md_rdev *)lock_rdev : rdev); 2141 if (IS_ERR(bdev)) { 2142 printk(KERN_ERR "md: could not open %s.\n", 2143 __bdevname(dev, b)); 2144 return PTR_ERR(bdev); 2145 } 2146 rdev->bdev = bdev; 2147 return err; 2148 } 2149 2150 static void unlock_rdev(struct md_rdev *rdev) 2151 { 2152 struct block_device *bdev = rdev->bdev; 2153 rdev->bdev = NULL; 2154 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2155 } 2156 2157 void md_autodetect_dev(dev_t dev); 2158 2159 static void export_rdev(struct md_rdev *rdev) 2160 { 2161 char b[BDEVNAME_SIZE]; 2162 2163 printk(KERN_INFO "md: export_rdev(%s)\n", 2164 bdevname(rdev->bdev,b)); 2165 md_rdev_clear(rdev); 2166 #ifndef MODULE 2167 if (test_bit(AutoDetected, &rdev->flags)) 2168 md_autodetect_dev(rdev->bdev->bd_dev); 2169 #endif 2170 unlock_rdev(rdev); 2171 kobject_put(&rdev->kobj); 2172 } 2173 2174 void md_kick_rdev_from_array(struct md_rdev *rdev) 2175 { 2176 unbind_rdev_from_array(rdev); 2177 export_rdev(rdev); 2178 } 2179 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array); 2180 2181 static void export_array(struct mddev *mddev) 2182 { 2183 struct md_rdev *rdev; 2184 2185 while (!list_empty(&mddev->disks)) { 2186 rdev = list_first_entry(&mddev->disks, struct md_rdev, 2187 same_set); 2188 md_kick_rdev_from_array(rdev); 2189 } 2190 mddev->raid_disks = 0; 2191 mddev->major_version = 0; 2192 } 2193 2194 static void sync_sbs(struct mddev *mddev, int nospares) 2195 { 2196 /* Update each superblock (in-memory image), but 2197 * if we are allowed to, skip spares which already 2198 * have the right event counter, or have one earlier 2199 * (which would mean they aren't being marked as dirty 2200 * with the rest of the array) 2201 */ 2202 struct md_rdev *rdev; 2203 rdev_for_each(rdev, mddev) { 2204 if (rdev->sb_events == mddev->events || 2205 (nospares && 2206 rdev->raid_disk < 0 && 2207 rdev->sb_events+1 == mddev->events)) { 2208 /* Don't update this superblock */ 2209 rdev->sb_loaded = 2; 2210 } else { 2211 sync_super(mddev, rdev); 2212 rdev->sb_loaded = 1; 2213 } 2214 } 2215 } 2216 2217 void md_update_sb(struct mddev *mddev, int force_change) 2218 { 2219 struct md_rdev *rdev; 2220 int sync_req; 2221 int nospares = 0; 2222 int any_badblocks_changed = 0; 2223 2224 if (mddev->ro) { 2225 if (force_change) 2226 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2227 return; 2228 } 2229 repeat: 2230 /* First make sure individual recovery_offsets are correct */ 2231 rdev_for_each(rdev, mddev) { 2232 if (rdev->raid_disk >= 0 && 2233 mddev->delta_disks >= 0 && 2234 !test_bit(In_sync, &rdev->flags) && 2235 mddev->curr_resync_completed > rdev->recovery_offset) 2236 rdev->recovery_offset = mddev->curr_resync_completed; 2237 2238 } 2239 if (!mddev->persistent) { 2240 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); 2241 clear_bit(MD_CHANGE_DEVS, &mddev->flags); 2242 if (!mddev->external) { 2243 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2244 rdev_for_each(rdev, mddev) { 2245 if (rdev->badblocks.changed) { 2246 rdev->badblocks.changed = 0; 2247 md_ack_all_badblocks(&rdev->badblocks); 2248 md_error(mddev, rdev); 2249 } 2250 clear_bit(Blocked, &rdev->flags); 2251 clear_bit(BlockedBadBlocks, &rdev->flags); 2252 wake_up(&rdev->blocked_wait); 2253 } 2254 } 2255 wake_up(&mddev->sb_wait); 2256 return; 2257 } 2258 2259 spin_lock(&mddev->lock); 2260 2261 mddev->utime = get_seconds(); 2262 2263 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 2264 force_change = 1; 2265 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 2266 /* just a clean<-> dirty transition, possibly leave spares alone, 2267 * though if events isn't the right even/odd, we will have to do 2268 * spares after all 2269 */ 2270 nospares = 1; 2271 if (force_change) 2272 nospares = 0; 2273 if (mddev->degraded) 2274 /* If the array is degraded, then skipping spares is both 2275 * dangerous and fairly pointless. 2276 * Dangerous because a device that was removed from the array 2277 * might have a event_count that still looks up-to-date, 2278 * so it can be re-added without a resync. 2279 * Pointless because if there are any spares to skip, 2280 * then a recovery will happen and soon that array won't 2281 * be degraded any more and the spare can go back to sleep then. 2282 */ 2283 nospares = 0; 2284 2285 sync_req = mddev->in_sync; 2286 2287 /* If this is just a dirty<->clean transition, and the array is clean 2288 * and 'events' is odd, we can roll back to the previous clean state */ 2289 if (nospares 2290 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2291 && mddev->can_decrease_events 2292 && mddev->events != 1) { 2293 mddev->events--; 2294 mddev->can_decrease_events = 0; 2295 } else { 2296 /* otherwise we have to go forward and ... */ 2297 mddev->events ++; 2298 mddev->can_decrease_events = nospares; 2299 } 2300 2301 /* 2302 * This 64-bit counter should never wrap. 2303 * Either we are in around ~1 trillion A.C., assuming 2304 * 1 reboot per second, or we have a bug... 2305 */ 2306 WARN_ON(mddev->events == 0); 2307 2308 rdev_for_each(rdev, mddev) { 2309 if (rdev->badblocks.changed) 2310 any_badblocks_changed++; 2311 if (test_bit(Faulty, &rdev->flags)) 2312 set_bit(FaultRecorded, &rdev->flags); 2313 } 2314 2315 sync_sbs(mddev, nospares); 2316 spin_unlock(&mddev->lock); 2317 2318 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", 2319 mdname(mddev), mddev->in_sync); 2320 2321 bitmap_update_sb(mddev->bitmap); 2322 rdev_for_each(rdev, mddev) { 2323 char b[BDEVNAME_SIZE]; 2324 2325 if (rdev->sb_loaded != 1) 2326 continue; /* no noise on spare devices */ 2327 2328 if (!test_bit(Faulty, &rdev->flags)) { 2329 md_super_write(mddev,rdev, 2330 rdev->sb_start, rdev->sb_size, 2331 rdev->sb_page); 2332 pr_debug("md: (write) %s's sb offset: %llu\n", 2333 bdevname(rdev->bdev, b), 2334 (unsigned long long)rdev->sb_start); 2335 rdev->sb_events = mddev->events; 2336 if (rdev->badblocks.size) { 2337 md_super_write(mddev, rdev, 2338 rdev->badblocks.sector, 2339 rdev->badblocks.size << 9, 2340 rdev->bb_page); 2341 rdev->badblocks.size = 0; 2342 } 2343 2344 } else 2345 pr_debug("md: %s (skipping faulty)\n", 2346 bdevname(rdev->bdev, b)); 2347 2348 if (mddev->level == LEVEL_MULTIPATH) 2349 /* only need to write one superblock... */ 2350 break; 2351 } 2352 md_super_wait(mddev); 2353 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ 2354 2355 spin_lock(&mddev->lock); 2356 if (mddev->in_sync != sync_req || 2357 test_bit(MD_CHANGE_DEVS, &mddev->flags)) { 2358 /* have to write it out again */ 2359 spin_unlock(&mddev->lock); 2360 goto repeat; 2361 } 2362 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2363 spin_unlock(&mddev->lock); 2364 wake_up(&mddev->sb_wait); 2365 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2366 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2367 2368 rdev_for_each(rdev, mddev) { 2369 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) 2370 clear_bit(Blocked, &rdev->flags); 2371 2372 if (any_badblocks_changed) 2373 md_ack_all_badblocks(&rdev->badblocks); 2374 clear_bit(BlockedBadBlocks, &rdev->flags); 2375 wake_up(&rdev->blocked_wait); 2376 } 2377 } 2378 EXPORT_SYMBOL(md_update_sb); 2379 2380 static int add_bound_rdev(struct md_rdev *rdev) 2381 { 2382 struct mddev *mddev = rdev->mddev; 2383 int err = 0; 2384 2385 if (!mddev->pers->hot_remove_disk) { 2386 /* If there is hot_add_disk but no hot_remove_disk 2387 * then added disks for geometry changes, 2388 * and should be added immediately. 2389 */ 2390 super_types[mddev->major_version]. 2391 validate_super(mddev, rdev); 2392 err = mddev->pers->hot_add_disk(mddev, rdev); 2393 if (err) { 2394 unbind_rdev_from_array(rdev); 2395 export_rdev(rdev); 2396 return err; 2397 } 2398 } 2399 sysfs_notify_dirent_safe(rdev->sysfs_state); 2400 2401 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2402 if (mddev->degraded) 2403 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 2404 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2405 md_new_event(mddev); 2406 md_wakeup_thread(mddev->thread); 2407 return 0; 2408 } 2409 2410 /* words written to sysfs files may, or may not, be \n terminated. 2411 * We want to accept with case. For this we use cmd_match. 2412 */ 2413 static int cmd_match(const char *cmd, const char *str) 2414 { 2415 /* See if cmd, written into a sysfs file, matches 2416 * str. They must either be the same, or cmd can 2417 * have a trailing newline 2418 */ 2419 while (*cmd && *str && *cmd == *str) { 2420 cmd++; 2421 str++; 2422 } 2423 if (*cmd == '\n') 2424 cmd++; 2425 if (*str || *cmd) 2426 return 0; 2427 return 1; 2428 } 2429 2430 struct rdev_sysfs_entry { 2431 struct attribute attr; 2432 ssize_t (*show)(struct md_rdev *, char *); 2433 ssize_t (*store)(struct md_rdev *, const char *, size_t); 2434 }; 2435 2436 static ssize_t 2437 state_show(struct md_rdev *rdev, char *page) 2438 { 2439 char *sep = ""; 2440 size_t len = 0; 2441 unsigned long flags = ACCESS_ONCE(rdev->flags); 2442 2443 if (test_bit(Faulty, &flags) || 2444 rdev->badblocks.unacked_exist) { 2445 len+= sprintf(page+len, "%sfaulty",sep); 2446 sep = ","; 2447 } 2448 if (test_bit(In_sync, &flags)) { 2449 len += sprintf(page+len, "%sin_sync",sep); 2450 sep = ","; 2451 } 2452 if (test_bit(WriteMostly, &flags)) { 2453 len += sprintf(page+len, "%swrite_mostly",sep); 2454 sep = ","; 2455 } 2456 if (test_bit(Blocked, &flags) || 2457 (rdev->badblocks.unacked_exist 2458 && !test_bit(Faulty, &flags))) { 2459 len += sprintf(page+len, "%sblocked", sep); 2460 sep = ","; 2461 } 2462 if (!test_bit(Faulty, &flags) && 2463 !test_bit(In_sync, &flags)) { 2464 len += sprintf(page+len, "%sspare", sep); 2465 sep = ","; 2466 } 2467 if (test_bit(WriteErrorSeen, &flags)) { 2468 len += sprintf(page+len, "%swrite_error", sep); 2469 sep = ","; 2470 } 2471 if (test_bit(WantReplacement, &flags)) { 2472 len += sprintf(page+len, "%swant_replacement", sep); 2473 sep = ","; 2474 } 2475 if (test_bit(Replacement, &flags)) { 2476 len += sprintf(page+len, "%sreplacement", sep); 2477 sep = ","; 2478 } 2479 2480 return len+sprintf(page+len, "\n"); 2481 } 2482 2483 static ssize_t 2484 state_store(struct md_rdev *rdev, const char *buf, size_t len) 2485 { 2486 /* can write 2487 * faulty - simulates an error 2488 * remove - disconnects the device 2489 * writemostly - sets write_mostly 2490 * -writemostly - clears write_mostly 2491 * blocked - sets the Blocked flags 2492 * -blocked - clears the Blocked and possibly simulates an error 2493 * insync - sets Insync providing device isn't active 2494 * -insync - clear Insync for a device with a slot assigned, 2495 * so that it gets rebuilt based on bitmap 2496 * write_error - sets WriteErrorSeen 2497 * -write_error - clears WriteErrorSeen 2498 */ 2499 int err = -EINVAL; 2500 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2501 md_error(rdev->mddev, rdev); 2502 if (test_bit(Faulty, &rdev->flags)) 2503 err = 0; 2504 else 2505 err = -EBUSY; 2506 } else if (cmd_match(buf, "remove")) { 2507 if (rdev->raid_disk >= 0) 2508 err = -EBUSY; 2509 else { 2510 struct mddev *mddev = rdev->mddev; 2511 if (mddev_is_clustered(mddev)) 2512 md_cluster_ops->remove_disk(mddev, rdev); 2513 md_kick_rdev_from_array(rdev); 2514 if (mddev_is_clustered(mddev)) 2515 md_cluster_ops->metadata_update_start(mddev); 2516 if (mddev->pers) 2517 md_update_sb(mddev, 1); 2518 md_new_event(mddev); 2519 if (mddev_is_clustered(mddev)) 2520 md_cluster_ops->metadata_update_finish(mddev); 2521 err = 0; 2522 } 2523 } else if (cmd_match(buf, "writemostly")) { 2524 set_bit(WriteMostly, &rdev->flags); 2525 err = 0; 2526 } else if (cmd_match(buf, "-writemostly")) { 2527 clear_bit(WriteMostly, &rdev->flags); 2528 err = 0; 2529 } else if (cmd_match(buf, "blocked")) { 2530 set_bit(Blocked, &rdev->flags); 2531 err = 0; 2532 } else if (cmd_match(buf, "-blocked")) { 2533 if (!test_bit(Faulty, &rdev->flags) && 2534 rdev->badblocks.unacked_exist) { 2535 /* metadata handler doesn't understand badblocks, 2536 * so we need to fail the device 2537 */ 2538 md_error(rdev->mddev, rdev); 2539 } 2540 clear_bit(Blocked, &rdev->flags); 2541 clear_bit(BlockedBadBlocks, &rdev->flags); 2542 wake_up(&rdev->blocked_wait); 2543 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2544 md_wakeup_thread(rdev->mddev->thread); 2545 2546 err = 0; 2547 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2548 set_bit(In_sync, &rdev->flags); 2549 err = 0; 2550 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) { 2551 if (rdev->mddev->pers == NULL) { 2552 clear_bit(In_sync, &rdev->flags); 2553 rdev->saved_raid_disk = rdev->raid_disk; 2554 rdev->raid_disk = -1; 2555 err = 0; 2556 } 2557 } else if (cmd_match(buf, "write_error")) { 2558 set_bit(WriteErrorSeen, &rdev->flags); 2559 err = 0; 2560 } else if (cmd_match(buf, "-write_error")) { 2561 clear_bit(WriteErrorSeen, &rdev->flags); 2562 err = 0; 2563 } else if (cmd_match(buf, "want_replacement")) { 2564 /* Any non-spare device that is not a replacement can 2565 * become want_replacement at any time, but we then need to 2566 * check if recovery is needed. 2567 */ 2568 if (rdev->raid_disk >= 0 && 2569 !test_bit(Replacement, &rdev->flags)) 2570 set_bit(WantReplacement, &rdev->flags); 2571 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2572 md_wakeup_thread(rdev->mddev->thread); 2573 err = 0; 2574 } else if (cmd_match(buf, "-want_replacement")) { 2575 /* Clearing 'want_replacement' is always allowed. 2576 * Once replacements starts it is too late though. 2577 */ 2578 err = 0; 2579 clear_bit(WantReplacement, &rdev->flags); 2580 } else if (cmd_match(buf, "replacement")) { 2581 /* Can only set a device as a replacement when array has not 2582 * yet been started. Once running, replacement is automatic 2583 * from spares, or by assigning 'slot'. 2584 */ 2585 if (rdev->mddev->pers) 2586 err = -EBUSY; 2587 else { 2588 set_bit(Replacement, &rdev->flags); 2589 err = 0; 2590 } 2591 } else if (cmd_match(buf, "-replacement")) { 2592 /* Similarly, can only clear Replacement before start */ 2593 if (rdev->mddev->pers) 2594 err = -EBUSY; 2595 else { 2596 clear_bit(Replacement, &rdev->flags); 2597 err = 0; 2598 } 2599 } else if (cmd_match(buf, "re-add")) { 2600 if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) { 2601 /* clear_bit is performed _after_ all the devices 2602 * have their local Faulty bit cleared. If any writes 2603 * happen in the meantime in the local node, they 2604 * will land in the local bitmap, which will be synced 2605 * by this node eventually 2606 */ 2607 if (!mddev_is_clustered(rdev->mddev) || 2608 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) { 2609 clear_bit(Faulty, &rdev->flags); 2610 err = add_bound_rdev(rdev); 2611 } 2612 } else 2613 err = -EBUSY; 2614 } 2615 if (!err) 2616 sysfs_notify_dirent_safe(rdev->sysfs_state); 2617 return err ? err : len; 2618 } 2619 static struct rdev_sysfs_entry rdev_state = 2620 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store); 2621 2622 static ssize_t 2623 errors_show(struct md_rdev *rdev, char *page) 2624 { 2625 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 2626 } 2627 2628 static ssize_t 2629 errors_store(struct md_rdev *rdev, const char *buf, size_t len) 2630 { 2631 unsigned int n; 2632 int rv; 2633 2634 rv = kstrtouint(buf, 10, &n); 2635 if (rv < 0) 2636 return rv; 2637 atomic_set(&rdev->corrected_errors, n); 2638 return len; 2639 } 2640 static struct rdev_sysfs_entry rdev_errors = 2641 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 2642 2643 static ssize_t 2644 slot_show(struct md_rdev *rdev, char *page) 2645 { 2646 if (rdev->raid_disk < 0) 2647 return sprintf(page, "none\n"); 2648 else 2649 return sprintf(page, "%d\n", rdev->raid_disk); 2650 } 2651 2652 static ssize_t 2653 slot_store(struct md_rdev *rdev, const char *buf, size_t len) 2654 { 2655 int slot; 2656 int err; 2657 2658 if (strncmp(buf, "none", 4)==0) 2659 slot = -1; 2660 else { 2661 err = kstrtouint(buf, 10, (unsigned int *)&slot); 2662 if (err < 0) 2663 return err; 2664 } 2665 if (rdev->mddev->pers && slot == -1) { 2666 /* Setting 'slot' on an active array requires also 2667 * updating the 'rd%d' link, and communicating 2668 * with the personality with ->hot_*_disk. 2669 * For now we only support removing 2670 * failed/spare devices. This normally happens automatically, 2671 * but not when the metadata is externally managed. 2672 */ 2673 if (rdev->raid_disk == -1) 2674 return -EEXIST; 2675 /* personality does all needed checks */ 2676 if (rdev->mddev->pers->hot_remove_disk == NULL) 2677 return -EINVAL; 2678 clear_bit(Blocked, &rdev->flags); 2679 remove_and_add_spares(rdev->mddev, rdev); 2680 if (rdev->raid_disk >= 0) 2681 return -EBUSY; 2682 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2683 md_wakeup_thread(rdev->mddev->thread); 2684 } else if (rdev->mddev->pers) { 2685 /* Activating a spare .. or possibly reactivating 2686 * if we ever get bitmaps working here. 2687 */ 2688 2689 if (rdev->raid_disk != -1) 2690 return -EBUSY; 2691 2692 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) 2693 return -EBUSY; 2694 2695 if (rdev->mddev->pers->hot_add_disk == NULL) 2696 return -EINVAL; 2697 2698 if (slot >= rdev->mddev->raid_disks && 2699 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 2700 return -ENOSPC; 2701 2702 rdev->raid_disk = slot; 2703 if (test_bit(In_sync, &rdev->flags)) 2704 rdev->saved_raid_disk = slot; 2705 else 2706 rdev->saved_raid_disk = -1; 2707 clear_bit(In_sync, &rdev->flags); 2708 clear_bit(Bitmap_sync, &rdev->flags); 2709 err = rdev->mddev->pers-> 2710 hot_add_disk(rdev->mddev, rdev); 2711 if (err) { 2712 rdev->raid_disk = -1; 2713 return err; 2714 } else 2715 sysfs_notify_dirent_safe(rdev->sysfs_state); 2716 if (sysfs_link_rdev(rdev->mddev, rdev)) 2717 /* failure here is OK */; 2718 /* don't wakeup anyone, leave that to userspace. */ 2719 } else { 2720 if (slot >= rdev->mddev->raid_disks && 2721 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 2722 return -ENOSPC; 2723 rdev->raid_disk = slot; 2724 /* assume it is working */ 2725 clear_bit(Faulty, &rdev->flags); 2726 clear_bit(WriteMostly, &rdev->flags); 2727 set_bit(In_sync, &rdev->flags); 2728 sysfs_notify_dirent_safe(rdev->sysfs_state); 2729 } 2730 return len; 2731 } 2732 2733 static struct rdev_sysfs_entry rdev_slot = 2734 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 2735 2736 static ssize_t 2737 offset_show(struct md_rdev *rdev, char *page) 2738 { 2739 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 2740 } 2741 2742 static ssize_t 2743 offset_store(struct md_rdev *rdev, const char *buf, size_t len) 2744 { 2745 unsigned long long offset; 2746 if (kstrtoull(buf, 10, &offset) < 0) 2747 return -EINVAL; 2748 if (rdev->mddev->pers && rdev->raid_disk >= 0) 2749 return -EBUSY; 2750 if (rdev->sectors && rdev->mddev->external) 2751 /* Must set offset before size, so overlap checks 2752 * can be sane */ 2753 return -EBUSY; 2754 rdev->data_offset = offset; 2755 rdev->new_data_offset = offset; 2756 return len; 2757 } 2758 2759 static struct rdev_sysfs_entry rdev_offset = 2760 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 2761 2762 static ssize_t new_offset_show(struct md_rdev *rdev, char *page) 2763 { 2764 return sprintf(page, "%llu\n", 2765 (unsigned long long)rdev->new_data_offset); 2766 } 2767 2768 static ssize_t new_offset_store(struct md_rdev *rdev, 2769 const char *buf, size_t len) 2770 { 2771 unsigned long long new_offset; 2772 struct mddev *mddev = rdev->mddev; 2773 2774 if (kstrtoull(buf, 10, &new_offset) < 0) 2775 return -EINVAL; 2776 2777 if (mddev->sync_thread || 2778 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) 2779 return -EBUSY; 2780 if (new_offset == rdev->data_offset) 2781 /* reset is always permitted */ 2782 ; 2783 else if (new_offset > rdev->data_offset) { 2784 /* must not push array size beyond rdev_sectors */ 2785 if (new_offset - rdev->data_offset 2786 + mddev->dev_sectors > rdev->sectors) 2787 return -E2BIG; 2788 } 2789 /* Metadata worries about other space details. */ 2790 2791 /* decreasing the offset is inconsistent with a backwards 2792 * reshape. 2793 */ 2794 if (new_offset < rdev->data_offset && 2795 mddev->reshape_backwards) 2796 return -EINVAL; 2797 /* Increasing offset is inconsistent with forwards 2798 * reshape. reshape_direction should be set to 2799 * 'backwards' first. 2800 */ 2801 if (new_offset > rdev->data_offset && 2802 !mddev->reshape_backwards) 2803 return -EINVAL; 2804 2805 if (mddev->pers && mddev->persistent && 2806 !super_types[mddev->major_version] 2807 .allow_new_offset(rdev, new_offset)) 2808 return -E2BIG; 2809 rdev->new_data_offset = new_offset; 2810 if (new_offset > rdev->data_offset) 2811 mddev->reshape_backwards = 1; 2812 else if (new_offset < rdev->data_offset) 2813 mddev->reshape_backwards = 0; 2814 2815 return len; 2816 } 2817 static struct rdev_sysfs_entry rdev_new_offset = 2818 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store); 2819 2820 static ssize_t 2821 rdev_size_show(struct md_rdev *rdev, char *page) 2822 { 2823 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 2824 } 2825 2826 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 2827 { 2828 /* check if two start/length pairs overlap */ 2829 if (s1+l1 <= s2) 2830 return 0; 2831 if (s2+l2 <= s1) 2832 return 0; 2833 return 1; 2834 } 2835 2836 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 2837 { 2838 unsigned long long blocks; 2839 sector_t new; 2840 2841 if (kstrtoull(buf, 10, &blocks) < 0) 2842 return -EINVAL; 2843 2844 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 2845 return -EINVAL; /* sector conversion overflow */ 2846 2847 new = blocks * 2; 2848 if (new != blocks * 2) 2849 return -EINVAL; /* unsigned long long to sector_t overflow */ 2850 2851 *sectors = new; 2852 return 0; 2853 } 2854 2855 static ssize_t 2856 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) 2857 { 2858 struct mddev *my_mddev = rdev->mddev; 2859 sector_t oldsectors = rdev->sectors; 2860 sector_t sectors; 2861 2862 if (strict_blocks_to_sectors(buf, §ors) < 0) 2863 return -EINVAL; 2864 if (rdev->data_offset != rdev->new_data_offset) 2865 return -EINVAL; /* too confusing */ 2866 if (my_mddev->pers && rdev->raid_disk >= 0) { 2867 if (my_mddev->persistent) { 2868 sectors = super_types[my_mddev->major_version]. 2869 rdev_size_change(rdev, sectors); 2870 if (!sectors) 2871 return -EBUSY; 2872 } else if (!sectors) 2873 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 2874 rdev->data_offset; 2875 if (!my_mddev->pers->resize) 2876 /* Cannot change size for RAID0 or Linear etc */ 2877 return -EINVAL; 2878 } 2879 if (sectors < my_mddev->dev_sectors) 2880 return -EINVAL; /* component must fit device */ 2881 2882 rdev->sectors = sectors; 2883 if (sectors > oldsectors && my_mddev->external) { 2884 /* Need to check that all other rdevs with the same 2885 * ->bdev do not overlap. 'rcu' is sufficient to walk 2886 * the rdev lists safely. 2887 * This check does not provide a hard guarantee, it 2888 * just helps avoid dangerous mistakes. 2889 */ 2890 struct mddev *mddev; 2891 int overlap = 0; 2892 struct list_head *tmp; 2893 2894 rcu_read_lock(); 2895 for_each_mddev(mddev, tmp) { 2896 struct md_rdev *rdev2; 2897 2898 rdev_for_each(rdev2, mddev) 2899 if (rdev->bdev == rdev2->bdev && 2900 rdev != rdev2 && 2901 overlaps(rdev->data_offset, rdev->sectors, 2902 rdev2->data_offset, 2903 rdev2->sectors)) { 2904 overlap = 1; 2905 break; 2906 } 2907 if (overlap) { 2908 mddev_put(mddev); 2909 break; 2910 } 2911 } 2912 rcu_read_unlock(); 2913 if (overlap) { 2914 /* Someone else could have slipped in a size 2915 * change here, but doing so is just silly. 2916 * We put oldsectors back because we *know* it is 2917 * safe, and trust userspace not to race with 2918 * itself 2919 */ 2920 rdev->sectors = oldsectors; 2921 return -EBUSY; 2922 } 2923 } 2924 return len; 2925 } 2926 2927 static struct rdev_sysfs_entry rdev_size = 2928 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 2929 2930 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) 2931 { 2932 unsigned long long recovery_start = rdev->recovery_offset; 2933 2934 if (test_bit(In_sync, &rdev->flags) || 2935 recovery_start == MaxSector) 2936 return sprintf(page, "none\n"); 2937 2938 return sprintf(page, "%llu\n", recovery_start); 2939 } 2940 2941 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) 2942 { 2943 unsigned long long recovery_start; 2944 2945 if (cmd_match(buf, "none")) 2946 recovery_start = MaxSector; 2947 else if (kstrtoull(buf, 10, &recovery_start)) 2948 return -EINVAL; 2949 2950 if (rdev->mddev->pers && 2951 rdev->raid_disk >= 0) 2952 return -EBUSY; 2953 2954 rdev->recovery_offset = recovery_start; 2955 if (recovery_start == MaxSector) 2956 set_bit(In_sync, &rdev->flags); 2957 else 2958 clear_bit(In_sync, &rdev->flags); 2959 return len; 2960 } 2961 2962 static struct rdev_sysfs_entry rdev_recovery_start = 2963 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 2964 2965 static ssize_t 2966 badblocks_show(struct badblocks *bb, char *page, int unack); 2967 static ssize_t 2968 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack); 2969 2970 static ssize_t bb_show(struct md_rdev *rdev, char *page) 2971 { 2972 return badblocks_show(&rdev->badblocks, page, 0); 2973 } 2974 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) 2975 { 2976 int rv = badblocks_store(&rdev->badblocks, page, len, 0); 2977 /* Maybe that ack was all we needed */ 2978 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) 2979 wake_up(&rdev->blocked_wait); 2980 return rv; 2981 } 2982 static struct rdev_sysfs_entry rdev_bad_blocks = 2983 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); 2984 2985 static ssize_t ubb_show(struct md_rdev *rdev, char *page) 2986 { 2987 return badblocks_show(&rdev->badblocks, page, 1); 2988 } 2989 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) 2990 { 2991 return badblocks_store(&rdev->badblocks, page, len, 1); 2992 } 2993 static struct rdev_sysfs_entry rdev_unack_bad_blocks = 2994 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store); 2995 2996 static struct attribute *rdev_default_attrs[] = { 2997 &rdev_state.attr, 2998 &rdev_errors.attr, 2999 &rdev_slot.attr, 3000 &rdev_offset.attr, 3001 &rdev_new_offset.attr, 3002 &rdev_size.attr, 3003 &rdev_recovery_start.attr, 3004 &rdev_bad_blocks.attr, 3005 &rdev_unack_bad_blocks.attr, 3006 NULL, 3007 }; 3008 static ssize_t 3009 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3010 { 3011 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3012 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3013 3014 if (!entry->show) 3015 return -EIO; 3016 if (!rdev->mddev) 3017 return -EBUSY; 3018 return entry->show(rdev, page); 3019 } 3020 3021 static ssize_t 3022 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 3023 const char *page, size_t length) 3024 { 3025 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3026 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3027 ssize_t rv; 3028 struct mddev *mddev = rdev->mddev; 3029 3030 if (!entry->store) 3031 return -EIO; 3032 if (!capable(CAP_SYS_ADMIN)) 3033 return -EACCES; 3034 rv = mddev ? mddev_lock(mddev): -EBUSY; 3035 if (!rv) { 3036 if (rdev->mddev == NULL) 3037 rv = -EBUSY; 3038 else 3039 rv = entry->store(rdev, page, length); 3040 mddev_unlock(mddev); 3041 } 3042 return rv; 3043 } 3044 3045 static void rdev_free(struct kobject *ko) 3046 { 3047 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); 3048 kfree(rdev); 3049 } 3050 static const struct sysfs_ops rdev_sysfs_ops = { 3051 .show = rdev_attr_show, 3052 .store = rdev_attr_store, 3053 }; 3054 static struct kobj_type rdev_ktype = { 3055 .release = rdev_free, 3056 .sysfs_ops = &rdev_sysfs_ops, 3057 .default_attrs = rdev_default_attrs, 3058 }; 3059 3060 int md_rdev_init(struct md_rdev *rdev) 3061 { 3062 rdev->desc_nr = -1; 3063 rdev->saved_raid_disk = -1; 3064 rdev->raid_disk = -1; 3065 rdev->flags = 0; 3066 rdev->data_offset = 0; 3067 rdev->new_data_offset = 0; 3068 rdev->sb_events = 0; 3069 rdev->last_read_error.tv_sec = 0; 3070 rdev->last_read_error.tv_nsec = 0; 3071 rdev->sb_loaded = 0; 3072 rdev->bb_page = NULL; 3073 atomic_set(&rdev->nr_pending, 0); 3074 atomic_set(&rdev->read_errors, 0); 3075 atomic_set(&rdev->corrected_errors, 0); 3076 3077 INIT_LIST_HEAD(&rdev->same_set); 3078 init_waitqueue_head(&rdev->blocked_wait); 3079 3080 /* Add space to store bad block list. 3081 * This reserves the space even on arrays where it cannot 3082 * be used - I wonder if that matters 3083 */ 3084 rdev->badblocks.count = 0; 3085 rdev->badblocks.shift = -1; /* disabled until explicitly enabled */ 3086 rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL); 3087 seqlock_init(&rdev->badblocks.lock); 3088 if (rdev->badblocks.page == NULL) 3089 return -ENOMEM; 3090 3091 return 0; 3092 } 3093 EXPORT_SYMBOL_GPL(md_rdev_init); 3094 /* 3095 * Import a device. If 'super_format' >= 0, then sanity check the superblock 3096 * 3097 * mark the device faulty if: 3098 * 3099 * - the device is nonexistent (zero size) 3100 * - the device has no valid superblock 3101 * 3102 * a faulty rdev _never_ has rdev->sb set. 3103 */ 3104 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) 3105 { 3106 char b[BDEVNAME_SIZE]; 3107 int err; 3108 struct md_rdev *rdev; 3109 sector_t size; 3110 3111 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 3112 if (!rdev) { 3113 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 3114 return ERR_PTR(-ENOMEM); 3115 } 3116 3117 err = md_rdev_init(rdev); 3118 if (err) 3119 goto abort_free; 3120 err = alloc_disk_sb(rdev); 3121 if (err) 3122 goto abort_free; 3123 3124 err = lock_rdev(rdev, newdev, super_format == -2); 3125 if (err) 3126 goto abort_free; 3127 3128 kobject_init(&rdev->kobj, &rdev_ktype); 3129 3130 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; 3131 if (!size) { 3132 printk(KERN_WARNING 3133 "md: %s has zero or unknown size, marking faulty!\n", 3134 bdevname(rdev->bdev,b)); 3135 err = -EINVAL; 3136 goto abort_free; 3137 } 3138 3139 if (super_format >= 0) { 3140 err = super_types[super_format]. 3141 load_super(rdev, NULL, super_minor); 3142 if (err == -EINVAL) { 3143 printk(KERN_WARNING 3144 "md: %s does not have a valid v%d.%d " 3145 "superblock, not importing!\n", 3146 bdevname(rdev->bdev,b), 3147 super_format, super_minor); 3148 goto abort_free; 3149 } 3150 if (err < 0) { 3151 printk(KERN_WARNING 3152 "md: could not read %s's sb, not importing!\n", 3153 bdevname(rdev->bdev,b)); 3154 goto abort_free; 3155 } 3156 } 3157 3158 return rdev; 3159 3160 abort_free: 3161 if (rdev->bdev) 3162 unlock_rdev(rdev); 3163 md_rdev_clear(rdev); 3164 kfree(rdev); 3165 return ERR_PTR(err); 3166 } 3167 3168 /* 3169 * Check a full RAID array for plausibility 3170 */ 3171 3172 static void analyze_sbs(struct mddev *mddev) 3173 { 3174 int i; 3175 struct md_rdev *rdev, *freshest, *tmp; 3176 char b[BDEVNAME_SIZE]; 3177 3178 freshest = NULL; 3179 rdev_for_each_safe(rdev, tmp, mddev) 3180 switch (super_types[mddev->major_version]. 3181 load_super(rdev, freshest, mddev->minor_version)) { 3182 case 1: 3183 freshest = rdev; 3184 break; 3185 case 0: 3186 break; 3187 default: 3188 printk( KERN_ERR \ 3189 "md: fatal superblock inconsistency in %s" 3190 " -- removing from array\n", 3191 bdevname(rdev->bdev,b)); 3192 md_kick_rdev_from_array(rdev); 3193 } 3194 3195 super_types[mddev->major_version]. 3196 validate_super(mddev, freshest); 3197 3198 i = 0; 3199 rdev_for_each_safe(rdev, tmp, mddev) { 3200 if (mddev->max_disks && 3201 (rdev->desc_nr >= mddev->max_disks || 3202 i > mddev->max_disks)) { 3203 printk(KERN_WARNING 3204 "md: %s: %s: only %d devices permitted\n", 3205 mdname(mddev), bdevname(rdev->bdev, b), 3206 mddev->max_disks); 3207 md_kick_rdev_from_array(rdev); 3208 continue; 3209 } 3210 if (rdev != freshest) { 3211 if (super_types[mddev->major_version]. 3212 validate_super(mddev, rdev)) { 3213 printk(KERN_WARNING "md: kicking non-fresh %s" 3214 " from array!\n", 3215 bdevname(rdev->bdev,b)); 3216 md_kick_rdev_from_array(rdev); 3217 continue; 3218 } 3219 /* No device should have a Candidate flag 3220 * when reading devices 3221 */ 3222 if (test_bit(Candidate, &rdev->flags)) { 3223 pr_info("md: kicking Cluster Candidate %s from array!\n", 3224 bdevname(rdev->bdev, b)); 3225 md_kick_rdev_from_array(rdev); 3226 } 3227 } 3228 if (mddev->level == LEVEL_MULTIPATH) { 3229 rdev->desc_nr = i++; 3230 rdev->raid_disk = rdev->desc_nr; 3231 set_bit(In_sync, &rdev->flags); 3232 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { 3233 rdev->raid_disk = -1; 3234 clear_bit(In_sync, &rdev->flags); 3235 } 3236 } 3237 } 3238 3239 /* Read a fixed-point number. 3240 * Numbers in sysfs attributes should be in "standard" units where 3241 * possible, so time should be in seconds. 3242 * However we internally use a a much smaller unit such as 3243 * milliseconds or jiffies. 3244 * This function takes a decimal number with a possible fractional 3245 * component, and produces an integer which is the result of 3246 * multiplying that number by 10^'scale'. 3247 * all without any floating-point arithmetic. 3248 */ 3249 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 3250 { 3251 unsigned long result = 0; 3252 long decimals = -1; 3253 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 3254 if (*cp == '.') 3255 decimals = 0; 3256 else if (decimals < scale) { 3257 unsigned int value; 3258 value = *cp - '0'; 3259 result = result * 10 + value; 3260 if (decimals >= 0) 3261 decimals++; 3262 } 3263 cp++; 3264 } 3265 if (*cp == '\n') 3266 cp++; 3267 if (*cp) 3268 return -EINVAL; 3269 if (decimals < 0) 3270 decimals = 0; 3271 while (decimals < scale) { 3272 result *= 10; 3273 decimals ++; 3274 } 3275 *res = result; 3276 return 0; 3277 } 3278 3279 static void md_safemode_timeout(unsigned long data); 3280 3281 static ssize_t 3282 safe_delay_show(struct mddev *mddev, char *page) 3283 { 3284 int msec = (mddev->safemode_delay*1000)/HZ; 3285 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 3286 } 3287 static ssize_t 3288 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) 3289 { 3290 unsigned long msec; 3291 3292 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) 3293 return -EINVAL; 3294 if (msec == 0) 3295 mddev->safemode_delay = 0; 3296 else { 3297 unsigned long old_delay = mddev->safemode_delay; 3298 unsigned long new_delay = (msec*HZ)/1000; 3299 3300 if (new_delay == 0) 3301 new_delay = 1; 3302 mddev->safemode_delay = new_delay; 3303 if (new_delay < old_delay || old_delay == 0) 3304 mod_timer(&mddev->safemode_timer, jiffies+1); 3305 } 3306 return len; 3307 } 3308 static struct md_sysfs_entry md_safe_delay = 3309 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 3310 3311 static ssize_t 3312 level_show(struct mddev *mddev, char *page) 3313 { 3314 struct md_personality *p; 3315 int ret; 3316 spin_lock(&mddev->lock); 3317 p = mddev->pers; 3318 if (p) 3319 ret = sprintf(page, "%s\n", p->name); 3320 else if (mddev->clevel[0]) 3321 ret = sprintf(page, "%s\n", mddev->clevel); 3322 else if (mddev->level != LEVEL_NONE) 3323 ret = sprintf(page, "%d\n", mddev->level); 3324 else 3325 ret = 0; 3326 spin_unlock(&mddev->lock); 3327 return ret; 3328 } 3329 3330 static ssize_t 3331 level_store(struct mddev *mddev, const char *buf, size_t len) 3332 { 3333 char clevel[16]; 3334 ssize_t rv; 3335 size_t slen = len; 3336 struct md_personality *pers, *oldpers; 3337 long level; 3338 void *priv, *oldpriv; 3339 struct md_rdev *rdev; 3340 3341 if (slen == 0 || slen >= sizeof(clevel)) 3342 return -EINVAL; 3343 3344 rv = mddev_lock(mddev); 3345 if (rv) 3346 return rv; 3347 3348 if (mddev->pers == NULL) { 3349 strncpy(mddev->clevel, buf, slen); 3350 if (mddev->clevel[slen-1] == '\n') 3351 slen--; 3352 mddev->clevel[slen] = 0; 3353 mddev->level = LEVEL_NONE; 3354 rv = len; 3355 goto out_unlock; 3356 } 3357 rv = -EROFS; 3358 if (mddev->ro) 3359 goto out_unlock; 3360 3361 /* request to change the personality. Need to ensure: 3362 * - array is not engaged in resync/recovery/reshape 3363 * - old personality can be suspended 3364 * - new personality will access other array. 3365 */ 3366 3367 rv = -EBUSY; 3368 if (mddev->sync_thread || 3369 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3370 mddev->reshape_position != MaxSector || 3371 mddev->sysfs_active) 3372 goto out_unlock; 3373 3374 rv = -EINVAL; 3375 if (!mddev->pers->quiesce) { 3376 printk(KERN_WARNING "md: %s: %s does not support online personality change\n", 3377 mdname(mddev), mddev->pers->name); 3378 goto out_unlock; 3379 } 3380 3381 /* Now find the new personality */ 3382 strncpy(clevel, buf, slen); 3383 if (clevel[slen-1] == '\n') 3384 slen--; 3385 clevel[slen] = 0; 3386 if (kstrtol(clevel, 10, &level)) 3387 level = LEVEL_NONE; 3388 3389 if (request_module("md-%s", clevel) != 0) 3390 request_module("md-level-%s", clevel); 3391 spin_lock(&pers_lock); 3392 pers = find_pers(level, clevel); 3393 if (!pers || !try_module_get(pers->owner)) { 3394 spin_unlock(&pers_lock); 3395 printk(KERN_WARNING "md: personality %s not loaded\n", clevel); 3396 rv = -EINVAL; 3397 goto out_unlock; 3398 } 3399 spin_unlock(&pers_lock); 3400 3401 if (pers == mddev->pers) { 3402 /* Nothing to do! */ 3403 module_put(pers->owner); 3404 rv = len; 3405 goto out_unlock; 3406 } 3407 if (!pers->takeover) { 3408 module_put(pers->owner); 3409 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n", 3410 mdname(mddev), clevel); 3411 rv = -EINVAL; 3412 goto out_unlock; 3413 } 3414 3415 rdev_for_each(rdev, mddev) 3416 rdev->new_raid_disk = rdev->raid_disk; 3417 3418 /* ->takeover must set new_* and/or delta_disks 3419 * if it succeeds, and may set them when it fails. 3420 */ 3421 priv = pers->takeover(mddev); 3422 if (IS_ERR(priv)) { 3423 mddev->new_level = mddev->level; 3424 mddev->new_layout = mddev->layout; 3425 mddev->new_chunk_sectors = mddev->chunk_sectors; 3426 mddev->raid_disks -= mddev->delta_disks; 3427 mddev->delta_disks = 0; 3428 mddev->reshape_backwards = 0; 3429 module_put(pers->owner); 3430 printk(KERN_WARNING "md: %s: %s would not accept array\n", 3431 mdname(mddev), clevel); 3432 rv = PTR_ERR(priv); 3433 goto out_unlock; 3434 } 3435 3436 /* Looks like we have a winner */ 3437 mddev_suspend(mddev); 3438 mddev_detach(mddev); 3439 3440 spin_lock(&mddev->lock); 3441 oldpers = mddev->pers; 3442 oldpriv = mddev->private; 3443 mddev->pers = pers; 3444 mddev->private = priv; 3445 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 3446 mddev->level = mddev->new_level; 3447 mddev->layout = mddev->new_layout; 3448 mddev->chunk_sectors = mddev->new_chunk_sectors; 3449 mddev->delta_disks = 0; 3450 mddev->reshape_backwards = 0; 3451 mddev->degraded = 0; 3452 spin_unlock(&mddev->lock); 3453 3454 if (oldpers->sync_request == NULL && 3455 mddev->external) { 3456 /* We are converting from a no-redundancy array 3457 * to a redundancy array and metadata is managed 3458 * externally so we need to be sure that writes 3459 * won't block due to a need to transition 3460 * clean->dirty 3461 * until external management is started. 3462 */ 3463 mddev->in_sync = 0; 3464 mddev->safemode_delay = 0; 3465 mddev->safemode = 0; 3466 } 3467 3468 oldpers->free(mddev, oldpriv); 3469 3470 if (oldpers->sync_request == NULL && 3471 pers->sync_request != NULL) { 3472 /* need to add the md_redundancy_group */ 3473 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 3474 printk(KERN_WARNING 3475 "md: cannot register extra attributes for %s\n", 3476 mdname(mddev)); 3477 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); 3478 } 3479 if (oldpers->sync_request != NULL && 3480 pers->sync_request == NULL) { 3481 /* need to remove the md_redundancy_group */ 3482 if (mddev->to_remove == NULL) 3483 mddev->to_remove = &md_redundancy_group; 3484 } 3485 3486 rdev_for_each(rdev, mddev) { 3487 if (rdev->raid_disk < 0) 3488 continue; 3489 if (rdev->new_raid_disk >= mddev->raid_disks) 3490 rdev->new_raid_disk = -1; 3491 if (rdev->new_raid_disk == rdev->raid_disk) 3492 continue; 3493 sysfs_unlink_rdev(mddev, rdev); 3494 } 3495 rdev_for_each(rdev, mddev) { 3496 if (rdev->raid_disk < 0) 3497 continue; 3498 if (rdev->new_raid_disk == rdev->raid_disk) 3499 continue; 3500 rdev->raid_disk = rdev->new_raid_disk; 3501 if (rdev->raid_disk < 0) 3502 clear_bit(In_sync, &rdev->flags); 3503 else { 3504 if (sysfs_link_rdev(mddev, rdev)) 3505 printk(KERN_WARNING "md: cannot register rd%d" 3506 " for %s after level change\n", 3507 rdev->raid_disk, mdname(mddev)); 3508 } 3509 } 3510 3511 if (pers->sync_request == NULL) { 3512 /* this is now an array without redundancy, so 3513 * it must always be in_sync 3514 */ 3515 mddev->in_sync = 1; 3516 del_timer_sync(&mddev->safemode_timer); 3517 } 3518 blk_set_stacking_limits(&mddev->queue->limits); 3519 pers->run(mddev); 3520 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3521 mddev_resume(mddev); 3522 if (!mddev->thread) 3523 md_update_sb(mddev, 1); 3524 sysfs_notify(&mddev->kobj, NULL, "level"); 3525 md_new_event(mddev); 3526 rv = len; 3527 out_unlock: 3528 mddev_unlock(mddev); 3529 return rv; 3530 } 3531 3532 static struct md_sysfs_entry md_level = 3533 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 3534 3535 static ssize_t 3536 layout_show(struct mddev *mddev, char *page) 3537 { 3538 /* just a number, not meaningful for all levels */ 3539 if (mddev->reshape_position != MaxSector && 3540 mddev->layout != mddev->new_layout) 3541 return sprintf(page, "%d (%d)\n", 3542 mddev->new_layout, mddev->layout); 3543 return sprintf(page, "%d\n", mddev->layout); 3544 } 3545 3546 static ssize_t 3547 layout_store(struct mddev *mddev, const char *buf, size_t len) 3548 { 3549 unsigned int n; 3550 int err; 3551 3552 err = kstrtouint(buf, 10, &n); 3553 if (err < 0) 3554 return err; 3555 err = mddev_lock(mddev); 3556 if (err) 3557 return err; 3558 3559 if (mddev->pers) { 3560 if (mddev->pers->check_reshape == NULL) 3561 err = -EBUSY; 3562 else if (mddev->ro) 3563 err = -EROFS; 3564 else { 3565 mddev->new_layout = n; 3566 err = mddev->pers->check_reshape(mddev); 3567 if (err) 3568 mddev->new_layout = mddev->layout; 3569 } 3570 } else { 3571 mddev->new_layout = n; 3572 if (mddev->reshape_position == MaxSector) 3573 mddev->layout = n; 3574 } 3575 mddev_unlock(mddev); 3576 return err ?: len; 3577 } 3578 static struct md_sysfs_entry md_layout = 3579 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 3580 3581 static ssize_t 3582 raid_disks_show(struct mddev *mddev, char *page) 3583 { 3584 if (mddev->raid_disks == 0) 3585 return 0; 3586 if (mddev->reshape_position != MaxSector && 3587 mddev->delta_disks != 0) 3588 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 3589 mddev->raid_disks - mddev->delta_disks); 3590 return sprintf(page, "%d\n", mddev->raid_disks); 3591 } 3592 3593 static int update_raid_disks(struct mddev *mddev, int raid_disks); 3594 3595 static ssize_t 3596 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) 3597 { 3598 unsigned int n; 3599 int err; 3600 3601 err = kstrtouint(buf, 10, &n); 3602 if (err < 0) 3603 return err; 3604 3605 err = mddev_lock(mddev); 3606 if (err) 3607 return err; 3608 if (mddev->pers) 3609 err = update_raid_disks(mddev, n); 3610 else if (mddev->reshape_position != MaxSector) { 3611 struct md_rdev *rdev; 3612 int olddisks = mddev->raid_disks - mddev->delta_disks; 3613 3614 err = -EINVAL; 3615 rdev_for_each(rdev, mddev) { 3616 if (olddisks < n && 3617 rdev->data_offset < rdev->new_data_offset) 3618 goto out_unlock; 3619 if (olddisks > n && 3620 rdev->data_offset > rdev->new_data_offset) 3621 goto out_unlock; 3622 } 3623 err = 0; 3624 mddev->delta_disks = n - olddisks; 3625 mddev->raid_disks = n; 3626 mddev->reshape_backwards = (mddev->delta_disks < 0); 3627 } else 3628 mddev->raid_disks = n; 3629 out_unlock: 3630 mddev_unlock(mddev); 3631 return err ? err : len; 3632 } 3633 static struct md_sysfs_entry md_raid_disks = 3634 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 3635 3636 static ssize_t 3637 chunk_size_show(struct mddev *mddev, char *page) 3638 { 3639 if (mddev->reshape_position != MaxSector && 3640 mddev->chunk_sectors != mddev->new_chunk_sectors) 3641 return sprintf(page, "%d (%d)\n", 3642 mddev->new_chunk_sectors << 9, 3643 mddev->chunk_sectors << 9); 3644 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 3645 } 3646 3647 static ssize_t 3648 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) 3649 { 3650 unsigned long n; 3651 int err; 3652 3653 err = kstrtoul(buf, 10, &n); 3654 if (err < 0) 3655 return err; 3656 3657 err = mddev_lock(mddev); 3658 if (err) 3659 return err; 3660 if (mddev->pers) { 3661 if (mddev->pers->check_reshape == NULL) 3662 err = -EBUSY; 3663 else if (mddev->ro) 3664 err = -EROFS; 3665 else { 3666 mddev->new_chunk_sectors = n >> 9; 3667 err = mddev->pers->check_reshape(mddev); 3668 if (err) 3669 mddev->new_chunk_sectors = mddev->chunk_sectors; 3670 } 3671 } else { 3672 mddev->new_chunk_sectors = n >> 9; 3673 if (mddev->reshape_position == MaxSector) 3674 mddev->chunk_sectors = n >> 9; 3675 } 3676 mddev_unlock(mddev); 3677 return err ?: len; 3678 } 3679 static struct md_sysfs_entry md_chunk_size = 3680 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 3681 3682 static ssize_t 3683 resync_start_show(struct mddev *mddev, char *page) 3684 { 3685 if (mddev->recovery_cp == MaxSector) 3686 return sprintf(page, "none\n"); 3687 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 3688 } 3689 3690 static ssize_t 3691 resync_start_store(struct mddev *mddev, const char *buf, size_t len) 3692 { 3693 unsigned long long n; 3694 int err; 3695 3696 if (cmd_match(buf, "none")) 3697 n = MaxSector; 3698 else { 3699 err = kstrtoull(buf, 10, &n); 3700 if (err < 0) 3701 return err; 3702 if (n != (sector_t)n) 3703 return -EINVAL; 3704 } 3705 3706 err = mddev_lock(mddev); 3707 if (err) 3708 return err; 3709 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 3710 err = -EBUSY; 3711 3712 if (!err) { 3713 mddev->recovery_cp = n; 3714 if (mddev->pers) 3715 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3716 } 3717 mddev_unlock(mddev); 3718 return err ?: len; 3719 } 3720 static struct md_sysfs_entry md_resync_start = 3721 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR, 3722 resync_start_show, resync_start_store); 3723 3724 /* 3725 * The array state can be: 3726 * 3727 * clear 3728 * No devices, no size, no level 3729 * Equivalent to STOP_ARRAY ioctl 3730 * inactive 3731 * May have some settings, but array is not active 3732 * all IO results in error 3733 * When written, doesn't tear down array, but just stops it 3734 * suspended (not supported yet) 3735 * All IO requests will block. The array can be reconfigured. 3736 * Writing this, if accepted, will block until array is quiescent 3737 * readonly 3738 * no resync can happen. no superblocks get written. 3739 * write requests fail 3740 * read-auto 3741 * like readonly, but behaves like 'clean' on a write request. 3742 * 3743 * clean - no pending writes, but otherwise active. 3744 * When written to inactive array, starts without resync 3745 * If a write request arrives then 3746 * if metadata is known, mark 'dirty' and switch to 'active'. 3747 * if not known, block and switch to write-pending 3748 * If written to an active array that has pending writes, then fails. 3749 * active 3750 * fully active: IO and resync can be happening. 3751 * When written to inactive array, starts with resync 3752 * 3753 * write-pending 3754 * clean, but writes are blocked waiting for 'active' to be written. 3755 * 3756 * active-idle 3757 * like active, but no writes have been seen for a while (100msec). 3758 * 3759 */ 3760 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 3761 write_pending, active_idle, bad_word}; 3762 static char *array_states[] = { 3763 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 3764 "write-pending", "active-idle", NULL }; 3765 3766 static int match_word(const char *word, char **list) 3767 { 3768 int n; 3769 for (n=0; list[n]; n++) 3770 if (cmd_match(word, list[n])) 3771 break; 3772 return n; 3773 } 3774 3775 static ssize_t 3776 array_state_show(struct mddev *mddev, char *page) 3777 { 3778 enum array_state st = inactive; 3779 3780 if (mddev->pers) 3781 switch(mddev->ro) { 3782 case 1: 3783 st = readonly; 3784 break; 3785 case 2: 3786 st = read_auto; 3787 break; 3788 case 0: 3789 if (mddev->in_sync) 3790 st = clean; 3791 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 3792 st = write_pending; 3793 else if (mddev->safemode) 3794 st = active_idle; 3795 else 3796 st = active; 3797 } 3798 else { 3799 if (list_empty(&mddev->disks) && 3800 mddev->raid_disks == 0 && 3801 mddev->dev_sectors == 0) 3802 st = clear; 3803 else 3804 st = inactive; 3805 } 3806 return sprintf(page, "%s\n", array_states[st]); 3807 } 3808 3809 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev); 3810 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev); 3811 static int do_md_run(struct mddev *mddev); 3812 static int restart_array(struct mddev *mddev); 3813 3814 static ssize_t 3815 array_state_store(struct mddev *mddev, const char *buf, size_t len) 3816 { 3817 int err; 3818 enum array_state st = match_word(buf, array_states); 3819 3820 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) { 3821 /* don't take reconfig_mutex when toggling between 3822 * clean and active 3823 */ 3824 spin_lock(&mddev->lock); 3825 if (st == active) { 3826 restart_array(mddev); 3827 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 3828 wake_up(&mddev->sb_wait); 3829 err = 0; 3830 } else /* st == clean */ { 3831 restart_array(mddev); 3832 if (atomic_read(&mddev->writes_pending) == 0) { 3833 if (mddev->in_sync == 0) { 3834 mddev->in_sync = 1; 3835 if (mddev->safemode == 1) 3836 mddev->safemode = 0; 3837 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3838 } 3839 err = 0; 3840 } else 3841 err = -EBUSY; 3842 } 3843 spin_unlock(&mddev->lock); 3844 return err ?: len; 3845 } 3846 err = mddev_lock(mddev); 3847 if (err) 3848 return err; 3849 err = -EINVAL; 3850 switch(st) { 3851 case bad_word: 3852 break; 3853 case clear: 3854 /* stopping an active array */ 3855 err = do_md_stop(mddev, 0, NULL); 3856 break; 3857 case inactive: 3858 /* stopping an active array */ 3859 if (mddev->pers) 3860 err = do_md_stop(mddev, 2, NULL); 3861 else 3862 err = 0; /* already inactive */ 3863 break; 3864 case suspended: 3865 break; /* not supported yet */ 3866 case readonly: 3867 if (mddev->pers) 3868 err = md_set_readonly(mddev, NULL); 3869 else { 3870 mddev->ro = 1; 3871 set_disk_ro(mddev->gendisk, 1); 3872 err = do_md_run(mddev); 3873 } 3874 break; 3875 case read_auto: 3876 if (mddev->pers) { 3877 if (mddev->ro == 0) 3878 err = md_set_readonly(mddev, NULL); 3879 else if (mddev->ro == 1) 3880 err = restart_array(mddev); 3881 if (err == 0) { 3882 mddev->ro = 2; 3883 set_disk_ro(mddev->gendisk, 0); 3884 } 3885 } else { 3886 mddev->ro = 2; 3887 err = do_md_run(mddev); 3888 } 3889 break; 3890 case clean: 3891 if (mddev->pers) { 3892 restart_array(mddev); 3893 spin_lock(&mddev->lock); 3894 if (atomic_read(&mddev->writes_pending) == 0) { 3895 if (mddev->in_sync == 0) { 3896 mddev->in_sync = 1; 3897 if (mddev->safemode == 1) 3898 mddev->safemode = 0; 3899 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3900 } 3901 err = 0; 3902 } else 3903 err = -EBUSY; 3904 spin_unlock(&mddev->lock); 3905 } else 3906 err = -EINVAL; 3907 break; 3908 case active: 3909 if (mddev->pers) { 3910 restart_array(mddev); 3911 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 3912 wake_up(&mddev->sb_wait); 3913 err = 0; 3914 } else { 3915 mddev->ro = 0; 3916 set_disk_ro(mddev->gendisk, 0); 3917 err = do_md_run(mddev); 3918 } 3919 break; 3920 case write_pending: 3921 case active_idle: 3922 /* these cannot be set */ 3923 break; 3924 } 3925 3926 if (!err) { 3927 if (mddev->hold_active == UNTIL_IOCTL) 3928 mddev->hold_active = 0; 3929 sysfs_notify_dirent_safe(mddev->sysfs_state); 3930 } 3931 mddev_unlock(mddev); 3932 return err ?: len; 3933 } 3934 static struct md_sysfs_entry md_array_state = 3935 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3936 3937 static ssize_t 3938 max_corrected_read_errors_show(struct mddev *mddev, char *page) { 3939 return sprintf(page, "%d\n", 3940 atomic_read(&mddev->max_corr_read_errors)); 3941 } 3942 3943 static ssize_t 3944 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) 3945 { 3946 unsigned int n; 3947 int rv; 3948 3949 rv = kstrtouint(buf, 10, &n); 3950 if (rv < 0) 3951 return rv; 3952 atomic_set(&mddev->max_corr_read_errors, n); 3953 return len; 3954 } 3955 3956 static struct md_sysfs_entry max_corr_read_errors = 3957 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 3958 max_corrected_read_errors_store); 3959 3960 static ssize_t 3961 null_show(struct mddev *mddev, char *page) 3962 { 3963 return -EINVAL; 3964 } 3965 3966 static ssize_t 3967 new_dev_store(struct mddev *mddev, const char *buf, size_t len) 3968 { 3969 /* buf must be %d:%d\n? giving major and minor numbers */ 3970 /* The new device is added to the array. 3971 * If the array has a persistent superblock, we read the 3972 * superblock to initialise info and check validity. 3973 * Otherwise, only checking done is that in bind_rdev_to_array, 3974 * which mainly checks size. 3975 */ 3976 char *e; 3977 int major = simple_strtoul(buf, &e, 10); 3978 int minor; 3979 dev_t dev; 3980 struct md_rdev *rdev; 3981 int err; 3982 3983 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 3984 return -EINVAL; 3985 minor = simple_strtoul(e+1, &e, 10); 3986 if (*e && *e != '\n') 3987 return -EINVAL; 3988 dev = MKDEV(major, minor); 3989 if (major != MAJOR(dev) || 3990 minor != MINOR(dev)) 3991 return -EOVERFLOW; 3992 3993 flush_workqueue(md_misc_wq); 3994 3995 err = mddev_lock(mddev); 3996 if (err) 3997 return err; 3998 if (mddev->persistent) { 3999 rdev = md_import_device(dev, mddev->major_version, 4000 mddev->minor_version); 4001 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 4002 struct md_rdev *rdev0 4003 = list_entry(mddev->disks.next, 4004 struct md_rdev, same_set); 4005 err = super_types[mddev->major_version] 4006 .load_super(rdev, rdev0, mddev->minor_version); 4007 if (err < 0) 4008 goto out; 4009 } 4010 } else if (mddev->external) 4011 rdev = md_import_device(dev, -2, -1); 4012 else 4013 rdev = md_import_device(dev, -1, -1); 4014 4015 if (IS_ERR(rdev)) { 4016 mddev_unlock(mddev); 4017 return PTR_ERR(rdev); 4018 } 4019 err = bind_rdev_to_array(rdev, mddev); 4020 out: 4021 if (err) 4022 export_rdev(rdev); 4023 mddev_unlock(mddev); 4024 return err ? err : len; 4025 } 4026 4027 static struct md_sysfs_entry md_new_device = 4028 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 4029 4030 static ssize_t 4031 bitmap_store(struct mddev *mddev, const char *buf, size_t len) 4032 { 4033 char *end; 4034 unsigned long chunk, end_chunk; 4035 int err; 4036 4037 err = mddev_lock(mddev); 4038 if (err) 4039 return err; 4040 if (!mddev->bitmap) 4041 goto out; 4042 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 4043 while (*buf) { 4044 chunk = end_chunk = simple_strtoul(buf, &end, 0); 4045 if (buf == end) break; 4046 if (*end == '-') { /* range */ 4047 buf = end + 1; 4048 end_chunk = simple_strtoul(buf, &end, 0); 4049 if (buf == end) break; 4050 } 4051 if (*end && !isspace(*end)) break; 4052 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 4053 buf = skip_spaces(end); 4054 } 4055 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 4056 out: 4057 mddev_unlock(mddev); 4058 return len; 4059 } 4060 4061 static struct md_sysfs_entry md_bitmap = 4062 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 4063 4064 static ssize_t 4065 size_show(struct mddev *mddev, char *page) 4066 { 4067 return sprintf(page, "%llu\n", 4068 (unsigned long long)mddev->dev_sectors / 2); 4069 } 4070 4071 static int update_size(struct mddev *mddev, sector_t num_sectors); 4072 4073 static ssize_t 4074 size_store(struct mddev *mddev, const char *buf, size_t len) 4075 { 4076 /* If array is inactive, we can reduce the component size, but 4077 * not increase it (except from 0). 4078 * If array is active, we can try an on-line resize 4079 */ 4080 sector_t sectors; 4081 int err = strict_blocks_to_sectors(buf, §ors); 4082 4083 if (err < 0) 4084 return err; 4085 err = mddev_lock(mddev); 4086 if (err) 4087 return err; 4088 if (mddev->pers) { 4089 if (mddev_is_clustered(mddev)) 4090 md_cluster_ops->metadata_update_start(mddev); 4091 err = update_size(mddev, sectors); 4092 md_update_sb(mddev, 1); 4093 if (mddev_is_clustered(mddev)) 4094 md_cluster_ops->metadata_update_finish(mddev); 4095 } else { 4096 if (mddev->dev_sectors == 0 || 4097 mddev->dev_sectors > sectors) 4098 mddev->dev_sectors = sectors; 4099 else 4100 err = -ENOSPC; 4101 } 4102 mddev_unlock(mddev); 4103 return err ? err : len; 4104 } 4105 4106 static struct md_sysfs_entry md_size = 4107 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 4108 4109 /* Metadata version. 4110 * This is one of 4111 * 'none' for arrays with no metadata (good luck...) 4112 * 'external' for arrays with externally managed metadata, 4113 * or N.M for internally known formats 4114 */ 4115 static ssize_t 4116 metadata_show(struct mddev *mddev, char *page) 4117 { 4118 if (mddev->persistent) 4119 return sprintf(page, "%d.%d\n", 4120 mddev->major_version, mddev->minor_version); 4121 else if (mddev->external) 4122 return sprintf(page, "external:%s\n", mddev->metadata_type); 4123 else 4124 return sprintf(page, "none\n"); 4125 } 4126 4127 static ssize_t 4128 metadata_store(struct mddev *mddev, const char *buf, size_t len) 4129 { 4130 int major, minor; 4131 char *e; 4132 int err; 4133 /* Changing the details of 'external' metadata is 4134 * always permitted. Otherwise there must be 4135 * no devices attached to the array. 4136 */ 4137 4138 err = mddev_lock(mddev); 4139 if (err) 4140 return err; 4141 err = -EBUSY; 4142 if (mddev->external && strncmp(buf, "external:", 9) == 0) 4143 ; 4144 else if (!list_empty(&mddev->disks)) 4145 goto out_unlock; 4146 4147 err = 0; 4148 if (cmd_match(buf, "none")) { 4149 mddev->persistent = 0; 4150 mddev->external = 0; 4151 mddev->major_version = 0; 4152 mddev->minor_version = 90; 4153 goto out_unlock; 4154 } 4155 if (strncmp(buf, "external:", 9) == 0) { 4156 size_t namelen = len-9; 4157 if (namelen >= sizeof(mddev->metadata_type)) 4158 namelen = sizeof(mddev->metadata_type)-1; 4159 strncpy(mddev->metadata_type, buf+9, namelen); 4160 mddev->metadata_type[namelen] = 0; 4161 if (namelen && mddev->metadata_type[namelen-1] == '\n') 4162 mddev->metadata_type[--namelen] = 0; 4163 mddev->persistent = 0; 4164 mddev->external = 1; 4165 mddev->major_version = 0; 4166 mddev->minor_version = 90; 4167 goto out_unlock; 4168 } 4169 major = simple_strtoul(buf, &e, 10); 4170 err = -EINVAL; 4171 if (e==buf || *e != '.') 4172 goto out_unlock; 4173 buf = e+1; 4174 minor = simple_strtoul(buf, &e, 10); 4175 if (e==buf || (*e && *e != '\n') ) 4176 goto out_unlock; 4177 err = -ENOENT; 4178 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 4179 goto out_unlock; 4180 mddev->major_version = major; 4181 mddev->minor_version = minor; 4182 mddev->persistent = 1; 4183 mddev->external = 0; 4184 err = 0; 4185 out_unlock: 4186 mddev_unlock(mddev); 4187 return err ?: len; 4188 } 4189 4190 static struct md_sysfs_entry md_metadata = 4191 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4192 4193 static ssize_t 4194 action_show(struct mddev *mddev, char *page) 4195 { 4196 char *type = "idle"; 4197 unsigned long recovery = mddev->recovery; 4198 if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 4199 type = "frozen"; 4200 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) || 4201 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) { 4202 if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 4203 type = "reshape"; 4204 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 4205 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) 4206 type = "resync"; 4207 else if (test_bit(MD_RECOVERY_CHECK, &recovery)) 4208 type = "check"; 4209 else 4210 type = "repair"; 4211 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 4212 type = "recover"; 4213 } 4214 return sprintf(page, "%s\n", type); 4215 } 4216 4217 static ssize_t 4218 action_store(struct mddev *mddev, const char *page, size_t len) 4219 { 4220 if (!mddev->pers || !mddev->pers->sync_request) 4221 return -EINVAL; 4222 4223 4224 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4225 if (cmd_match(page, "frozen")) 4226 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4227 else 4228 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4229 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 4230 mddev_lock(mddev) == 0) { 4231 flush_workqueue(md_misc_wq); 4232 if (mddev->sync_thread) { 4233 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4234 md_reap_sync_thread(mddev); 4235 } 4236 mddev_unlock(mddev); 4237 } 4238 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 4239 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 4240 return -EBUSY; 4241 else if (cmd_match(page, "resync")) 4242 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4243 else if (cmd_match(page, "recover")) { 4244 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4245 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4246 } else if (cmd_match(page, "reshape")) { 4247 int err; 4248 if (mddev->pers->start_reshape == NULL) 4249 return -EINVAL; 4250 err = mddev_lock(mddev); 4251 if (!err) { 4252 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4253 err = mddev->pers->start_reshape(mddev); 4254 mddev_unlock(mddev); 4255 } 4256 if (err) 4257 return err; 4258 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4259 } else { 4260 if (cmd_match(page, "check")) 4261 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4262 else if (!cmd_match(page, "repair")) 4263 return -EINVAL; 4264 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4265 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4266 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4267 } 4268 if (mddev->ro == 2) { 4269 /* A write to sync_action is enough to justify 4270 * canceling read-auto mode 4271 */ 4272 mddev->ro = 0; 4273 md_wakeup_thread(mddev->sync_thread); 4274 } 4275 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4276 md_wakeup_thread(mddev->thread); 4277 sysfs_notify_dirent_safe(mddev->sysfs_action); 4278 return len; 4279 } 4280 4281 static struct md_sysfs_entry md_scan_mode = 4282 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 4283 4284 static ssize_t 4285 last_sync_action_show(struct mddev *mddev, char *page) 4286 { 4287 return sprintf(page, "%s\n", mddev->last_sync_action); 4288 } 4289 4290 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action); 4291 4292 static ssize_t 4293 mismatch_cnt_show(struct mddev *mddev, char *page) 4294 { 4295 return sprintf(page, "%llu\n", 4296 (unsigned long long) 4297 atomic64_read(&mddev->resync_mismatches)); 4298 } 4299 4300 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 4301 4302 static ssize_t 4303 sync_min_show(struct mddev *mddev, char *page) 4304 { 4305 return sprintf(page, "%d (%s)\n", speed_min(mddev), 4306 mddev->sync_speed_min ? "local": "system"); 4307 } 4308 4309 static ssize_t 4310 sync_min_store(struct mddev *mddev, const char *buf, size_t len) 4311 { 4312 unsigned int min; 4313 int rv; 4314 4315 if (strncmp(buf, "system", 6)==0) { 4316 min = 0; 4317 } else { 4318 rv = kstrtouint(buf, 10, &min); 4319 if (rv < 0) 4320 return rv; 4321 if (min == 0) 4322 return -EINVAL; 4323 } 4324 mddev->sync_speed_min = min; 4325 return len; 4326 } 4327 4328 static struct md_sysfs_entry md_sync_min = 4329 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 4330 4331 static ssize_t 4332 sync_max_show(struct mddev *mddev, char *page) 4333 { 4334 return sprintf(page, "%d (%s)\n", speed_max(mddev), 4335 mddev->sync_speed_max ? "local": "system"); 4336 } 4337 4338 static ssize_t 4339 sync_max_store(struct mddev *mddev, const char *buf, size_t len) 4340 { 4341 unsigned int max; 4342 int rv; 4343 4344 if (strncmp(buf, "system", 6)==0) { 4345 max = 0; 4346 } else { 4347 rv = kstrtouint(buf, 10, &max); 4348 if (rv < 0) 4349 return rv; 4350 if (max == 0) 4351 return -EINVAL; 4352 } 4353 mddev->sync_speed_max = max; 4354 return len; 4355 } 4356 4357 static struct md_sysfs_entry md_sync_max = 4358 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 4359 4360 static ssize_t 4361 degraded_show(struct mddev *mddev, char *page) 4362 { 4363 return sprintf(page, "%d\n", mddev->degraded); 4364 } 4365 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 4366 4367 static ssize_t 4368 sync_force_parallel_show(struct mddev *mddev, char *page) 4369 { 4370 return sprintf(page, "%d\n", mddev->parallel_resync); 4371 } 4372 4373 static ssize_t 4374 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) 4375 { 4376 long n; 4377 4378 if (kstrtol(buf, 10, &n)) 4379 return -EINVAL; 4380 4381 if (n != 0 && n != 1) 4382 return -EINVAL; 4383 4384 mddev->parallel_resync = n; 4385 4386 if (mddev->sync_thread) 4387 wake_up(&resync_wait); 4388 4389 return len; 4390 } 4391 4392 /* force parallel resync, even with shared block devices */ 4393 static struct md_sysfs_entry md_sync_force_parallel = 4394 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 4395 sync_force_parallel_show, sync_force_parallel_store); 4396 4397 static ssize_t 4398 sync_speed_show(struct mddev *mddev, char *page) 4399 { 4400 unsigned long resync, dt, db; 4401 if (mddev->curr_resync == 0) 4402 return sprintf(page, "none\n"); 4403 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 4404 dt = (jiffies - mddev->resync_mark) / HZ; 4405 if (!dt) dt++; 4406 db = resync - mddev->resync_mark_cnt; 4407 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 4408 } 4409 4410 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 4411 4412 static ssize_t 4413 sync_completed_show(struct mddev *mddev, char *page) 4414 { 4415 unsigned long long max_sectors, resync; 4416 4417 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4418 return sprintf(page, "none\n"); 4419 4420 if (mddev->curr_resync == 1 || 4421 mddev->curr_resync == 2) 4422 return sprintf(page, "delayed\n"); 4423 4424 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 4425 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4426 max_sectors = mddev->resync_max_sectors; 4427 else 4428 max_sectors = mddev->dev_sectors; 4429 4430 resync = mddev->curr_resync_completed; 4431 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 4432 } 4433 4434 static struct md_sysfs_entry md_sync_completed = 4435 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL); 4436 4437 static ssize_t 4438 min_sync_show(struct mddev *mddev, char *page) 4439 { 4440 return sprintf(page, "%llu\n", 4441 (unsigned long long)mddev->resync_min); 4442 } 4443 static ssize_t 4444 min_sync_store(struct mddev *mddev, const char *buf, size_t len) 4445 { 4446 unsigned long long min; 4447 int err; 4448 4449 if (kstrtoull(buf, 10, &min)) 4450 return -EINVAL; 4451 4452 spin_lock(&mddev->lock); 4453 err = -EINVAL; 4454 if (min > mddev->resync_max) 4455 goto out_unlock; 4456 4457 err = -EBUSY; 4458 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4459 goto out_unlock; 4460 4461 /* Round down to multiple of 4K for safety */ 4462 mddev->resync_min = round_down(min, 8); 4463 err = 0; 4464 4465 out_unlock: 4466 spin_unlock(&mddev->lock); 4467 return err ?: len; 4468 } 4469 4470 static struct md_sysfs_entry md_min_sync = 4471 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 4472 4473 static ssize_t 4474 max_sync_show(struct mddev *mddev, char *page) 4475 { 4476 if (mddev->resync_max == MaxSector) 4477 return sprintf(page, "max\n"); 4478 else 4479 return sprintf(page, "%llu\n", 4480 (unsigned long long)mddev->resync_max); 4481 } 4482 static ssize_t 4483 max_sync_store(struct mddev *mddev, const char *buf, size_t len) 4484 { 4485 int err; 4486 spin_lock(&mddev->lock); 4487 if (strncmp(buf, "max", 3) == 0) 4488 mddev->resync_max = MaxSector; 4489 else { 4490 unsigned long long max; 4491 int chunk; 4492 4493 err = -EINVAL; 4494 if (kstrtoull(buf, 10, &max)) 4495 goto out_unlock; 4496 if (max < mddev->resync_min) 4497 goto out_unlock; 4498 4499 err = -EBUSY; 4500 if (max < mddev->resync_max && 4501 mddev->ro == 0 && 4502 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4503 goto out_unlock; 4504 4505 /* Must be a multiple of chunk_size */ 4506 chunk = mddev->chunk_sectors; 4507 if (chunk) { 4508 sector_t temp = max; 4509 4510 err = -EINVAL; 4511 if (sector_div(temp, chunk)) 4512 goto out_unlock; 4513 } 4514 mddev->resync_max = max; 4515 } 4516 wake_up(&mddev->recovery_wait); 4517 err = 0; 4518 out_unlock: 4519 spin_unlock(&mddev->lock); 4520 return err ?: len; 4521 } 4522 4523 static struct md_sysfs_entry md_max_sync = 4524 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 4525 4526 static ssize_t 4527 suspend_lo_show(struct mddev *mddev, char *page) 4528 { 4529 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 4530 } 4531 4532 static ssize_t 4533 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) 4534 { 4535 unsigned long long old, new; 4536 int err; 4537 4538 err = kstrtoull(buf, 10, &new); 4539 if (err < 0) 4540 return err; 4541 if (new != (sector_t)new) 4542 return -EINVAL; 4543 4544 err = mddev_lock(mddev); 4545 if (err) 4546 return err; 4547 err = -EINVAL; 4548 if (mddev->pers == NULL || 4549 mddev->pers->quiesce == NULL) 4550 goto unlock; 4551 old = mddev->suspend_lo; 4552 mddev->suspend_lo = new; 4553 if (new >= old) 4554 /* Shrinking suspended region */ 4555 mddev->pers->quiesce(mddev, 2); 4556 else { 4557 /* Expanding suspended region - need to wait */ 4558 mddev->pers->quiesce(mddev, 1); 4559 mddev->pers->quiesce(mddev, 0); 4560 } 4561 err = 0; 4562 unlock: 4563 mddev_unlock(mddev); 4564 return err ?: len; 4565 } 4566 static struct md_sysfs_entry md_suspend_lo = 4567 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 4568 4569 static ssize_t 4570 suspend_hi_show(struct mddev *mddev, char *page) 4571 { 4572 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 4573 } 4574 4575 static ssize_t 4576 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) 4577 { 4578 unsigned long long old, new; 4579 int err; 4580 4581 err = kstrtoull(buf, 10, &new); 4582 if (err < 0) 4583 return err; 4584 if (new != (sector_t)new) 4585 return -EINVAL; 4586 4587 err = mddev_lock(mddev); 4588 if (err) 4589 return err; 4590 err = -EINVAL; 4591 if (mddev->pers == NULL || 4592 mddev->pers->quiesce == NULL) 4593 goto unlock; 4594 old = mddev->suspend_hi; 4595 mddev->suspend_hi = new; 4596 if (new <= old) 4597 /* Shrinking suspended region */ 4598 mddev->pers->quiesce(mddev, 2); 4599 else { 4600 /* Expanding suspended region - need to wait */ 4601 mddev->pers->quiesce(mddev, 1); 4602 mddev->pers->quiesce(mddev, 0); 4603 } 4604 err = 0; 4605 unlock: 4606 mddev_unlock(mddev); 4607 return err ?: len; 4608 } 4609 static struct md_sysfs_entry md_suspend_hi = 4610 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 4611 4612 static ssize_t 4613 reshape_position_show(struct mddev *mddev, char *page) 4614 { 4615 if (mddev->reshape_position != MaxSector) 4616 return sprintf(page, "%llu\n", 4617 (unsigned long long)mddev->reshape_position); 4618 strcpy(page, "none\n"); 4619 return 5; 4620 } 4621 4622 static ssize_t 4623 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) 4624 { 4625 struct md_rdev *rdev; 4626 unsigned long long new; 4627 int err; 4628 4629 err = kstrtoull(buf, 10, &new); 4630 if (err < 0) 4631 return err; 4632 if (new != (sector_t)new) 4633 return -EINVAL; 4634 err = mddev_lock(mddev); 4635 if (err) 4636 return err; 4637 err = -EBUSY; 4638 if (mddev->pers) 4639 goto unlock; 4640 mddev->reshape_position = new; 4641 mddev->delta_disks = 0; 4642 mddev->reshape_backwards = 0; 4643 mddev->new_level = mddev->level; 4644 mddev->new_layout = mddev->layout; 4645 mddev->new_chunk_sectors = mddev->chunk_sectors; 4646 rdev_for_each(rdev, mddev) 4647 rdev->new_data_offset = rdev->data_offset; 4648 err = 0; 4649 unlock: 4650 mddev_unlock(mddev); 4651 return err ?: len; 4652 } 4653 4654 static struct md_sysfs_entry md_reshape_position = 4655 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 4656 reshape_position_store); 4657 4658 static ssize_t 4659 reshape_direction_show(struct mddev *mddev, char *page) 4660 { 4661 return sprintf(page, "%s\n", 4662 mddev->reshape_backwards ? "backwards" : "forwards"); 4663 } 4664 4665 static ssize_t 4666 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) 4667 { 4668 int backwards = 0; 4669 int err; 4670 4671 if (cmd_match(buf, "forwards")) 4672 backwards = 0; 4673 else if (cmd_match(buf, "backwards")) 4674 backwards = 1; 4675 else 4676 return -EINVAL; 4677 if (mddev->reshape_backwards == backwards) 4678 return len; 4679 4680 err = mddev_lock(mddev); 4681 if (err) 4682 return err; 4683 /* check if we are allowed to change */ 4684 if (mddev->delta_disks) 4685 err = -EBUSY; 4686 else if (mddev->persistent && 4687 mddev->major_version == 0) 4688 err = -EINVAL; 4689 else 4690 mddev->reshape_backwards = backwards; 4691 mddev_unlock(mddev); 4692 return err ?: len; 4693 } 4694 4695 static struct md_sysfs_entry md_reshape_direction = 4696 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show, 4697 reshape_direction_store); 4698 4699 static ssize_t 4700 array_size_show(struct mddev *mddev, char *page) 4701 { 4702 if (mddev->external_size) 4703 return sprintf(page, "%llu\n", 4704 (unsigned long long)mddev->array_sectors/2); 4705 else 4706 return sprintf(page, "default\n"); 4707 } 4708 4709 static ssize_t 4710 array_size_store(struct mddev *mddev, const char *buf, size_t len) 4711 { 4712 sector_t sectors; 4713 int err; 4714 4715 err = mddev_lock(mddev); 4716 if (err) 4717 return err; 4718 4719 if (strncmp(buf, "default", 7) == 0) { 4720 if (mddev->pers) 4721 sectors = mddev->pers->size(mddev, 0, 0); 4722 else 4723 sectors = mddev->array_sectors; 4724 4725 mddev->external_size = 0; 4726 } else { 4727 if (strict_blocks_to_sectors(buf, §ors) < 0) 4728 err = -EINVAL; 4729 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 4730 err = -E2BIG; 4731 else 4732 mddev->external_size = 1; 4733 } 4734 4735 if (!err) { 4736 mddev->array_sectors = sectors; 4737 if (mddev->pers) { 4738 set_capacity(mddev->gendisk, mddev->array_sectors); 4739 revalidate_disk(mddev->gendisk); 4740 } 4741 } 4742 mddev_unlock(mddev); 4743 return err ?: len; 4744 } 4745 4746 static struct md_sysfs_entry md_array_size = 4747 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 4748 array_size_store); 4749 4750 static struct attribute *md_default_attrs[] = { 4751 &md_level.attr, 4752 &md_layout.attr, 4753 &md_raid_disks.attr, 4754 &md_chunk_size.attr, 4755 &md_size.attr, 4756 &md_resync_start.attr, 4757 &md_metadata.attr, 4758 &md_new_device.attr, 4759 &md_safe_delay.attr, 4760 &md_array_state.attr, 4761 &md_reshape_position.attr, 4762 &md_reshape_direction.attr, 4763 &md_array_size.attr, 4764 &max_corr_read_errors.attr, 4765 NULL, 4766 }; 4767 4768 static struct attribute *md_redundancy_attrs[] = { 4769 &md_scan_mode.attr, 4770 &md_last_scan_mode.attr, 4771 &md_mismatches.attr, 4772 &md_sync_min.attr, 4773 &md_sync_max.attr, 4774 &md_sync_speed.attr, 4775 &md_sync_force_parallel.attr, 4776 &md_sync_completed.attr, 4777 &md_min_sync.attr, 4778 &md_max_sync.attr, 4779 &md_suspend_lo.attr, 4780 &md_suspend_hi.attr, 4781 &md_bitmap.attr, 4782 &md_degraded.attr, 4783 NULL, 4784 }; 4785 static struct attribute_group md_redundancy_group = { 4786 .name = NULL, 4787 .attrs = md_redundancy_attrs, 4788 }; 4789 4790 static ssize_t 4791 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 4792 { 4793 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4794 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 4795 ssize_t rv; 4796 4797 if (!entry->show) 4798 return -EIO; 4799 spin_lock(&all_mddevs_lock); 4800 if (list_empty(&mddev->all_mddevs)) { 4801 spin_unlock(&all_mddevs_lock); 4802 return -EBUSY; 4803 } 4804 mddev_get(mddev); 4805 spin_unlock(&all_mddevs_lock); 4806 4807 rv = entry->show(mddev, page); 4808 mddev_put(mddev); 4809 return rv; 4810 } 4811 4812 static ssize_t 4813 md_attr_store(struct kobject *kobj, struct attribute *attr, 4814 const char *page, size_t length) 4815 { 4816 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4817 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 4818 ssize_t rv; 4819 4820 if (!entry->store) 4821 return -EIO; 4822 if (!capable(CAP_SYS_ADMIN)) 4823 return -EACCES; 4824 spin_lock(&all_mddevs_lock); 4825 if (list_empty(&mddev->all_mddevs)) { 4826 spin_unlock(&all_mddevs_lock); 4827 return -EBUSY; 4828 } 4829 mddev_get(mddev); 4830 spin_unlock(&all_mddevs_lock); 4831 rv = entry->store(mddev, page, length); 4832 mddev_put(mddev); 4833 return rv; 4834 } 4835 4836 static void md_free(struct kobject *ko) 4837 { 4838 struct mddev *mddev = container_of(ko, struct mddev, kobj); 4839 4840 if (mddev->sysfs_state) 4841 sysfs_put(mddev->sysfs_state); 4842 4843 if (mddev->queue) 4844 blk_cleanup_queue(mddev->queue); 4845 if (mddev->gendisk) { 4846 del_gendisk(mddev->gendisk); 4847 put_disk(mddev->gendisk); 4848 } 4849 4850 kfree(mddev); 4851 } 4852 4853 static const struct sysfs_ops md_sysfs_ops = { 4854 .show = md_attr_show, 4855 .store = md_attr_store, 4856 }; 4857 static struct kobj_type md_ktype = { 4858 .release = md_free, 4859 .sysfs_ops = &md_sysfs_ops, 4860 .default_attrs = md_default_attrs, 4861 }; 4862 4863 int mdp_major = 0; 4864 4865 static void mddev_delayed_delete(struct work_struct *ws) 4866 { 4867 struct mddev *mddev = container_of(ws, struct mddev, del_work); 4868 4869 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); 4870 kobject_del(&mddev->kobj); 4871 kobject_put(&mddev->kobj); 4872 } 4873 4874 static int md_alloc(dev_t dev, char *name) 4875 { 4876 static DEFINE_MUTEX(disks_mutex); 4877 struct mddev *mddev = mddev_find(dev); 4878 struct gendisk *disk; 4879 int partitioned; 4880 int shift; 4881 int unit; 4882 int error; 4883 4884 if (!mddev) 4885 return -ENODEV; 4886 4887 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 4888 shift = partitioned ? MdpMinorShift : 0; 4889 unit = MINOR(mddev->unit) >> shift; 4890 4891 /* wait for any previous instance of this device to be 4892 * completely removed (mddev_delayed_delete). 4893 */ 4894 flush_workqueue(md_misc_wq); 4895 4896 mutex_lock(&disks_mutex); 4897 error = -EEXIST; 4898 if (mddev->gendisk) 4899 goto abort; 4900 4901 if (name) { 4902 /* Need to ensure that 'name' is not a duplicate. 4903 */ 4904 struct mddev *mddev2; 4905 spin_lock(&all_mddevs_lock); 4906 4907 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 4908 if (mddev2->gendisk && 4909 strcmp(mddev2->gendisk->disk_name, name) == 0) { 4910 spin_unlock(&all_mddevs_lock); 4911 goto abort; 4912 } 4913 spin_unlock(&all_mddevs_lock); 4914 } 4915 4916 error = -ENOMEM; 4917 mddev->queue = blk_alloc_queue(GFP_KERNEL); 4918 if (!mddev->queue) 4919 goto abort; 4920 mddev->queue->queuedata = mddev; 4921 4922 blk_queue_make_request(mddev->queue, md_make_request); 4923 blk_set_stacking_limits(&mddev->queue->limits); 4924 4925 disk = alloc_disk(1 << shift); 4926 if (!disk) { 4927 blk_cleanup_queue(mddev->queue); 4928 mddev->queue = NULL; 4929 goto abort; 4930 } 4931 disk->major = MAJOR(mddev->unit); 4932 disk->first_minor = unit << shift; 4933 if (name) 4934 strcpy(disk->disk_name, name); 4935 else if (partitioned) 4936 sprintf(disk->disk_name, "md_d%d", unit); 4937 else 4938 sprintf(disk->disk_name, "md%d", unit); 4939 disk->fops = &md_fops; 4940 disk->private_data = mddev; 4941 disk->queue = mddev->queue; 4942 blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA); 4943 /* Allow extended partitions. This makes the 4944 * 'mdp' device redundant, but we can't really 4945 * remove it now. 4946 */ 4947 disk->flags |= GENHD_FL_EXT_DEVT; 4948 mddev->gendisk = disk; 4949 /* As soon as we call add_disk(), another thread could get 4950 * through to md_open, so make sure it doesn't get too far 4951 */ 4952 mutex_lock(&mddev->open_mutex); 4953 add_disk(disk); 4954 4955 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 4956 &disk_to_dev(disk)->kobj, "%s", "md"); 4957 if (error) { 4958 /* This isn't possible, but as kobject_init_and_add is marked 4959 * __must_check, we must do something with the result 4960 */ 4961 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 4962 disk->disk_name); 4963 error = 0; 4964 } 4965 if (mddev->kobj.sd && 4966 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) 4967 printk(KERN_DEBUG "pointless warning\n"); 4968 mutex_unlock(&mddev->open_mutex); 4969 abort: 4970 mutex_unlock(&disks_mutex); 4971 if (!error && mddev->kobj.sd) { 4972 kobject_uevent(&mddev->kobj, KOBJ_ADD); 4973 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 4974 } 4975 mddev_put(mddev); 4976 return error; 4977 } 4978 4979 static struct kobject *md_probe(dev_t dev, int *part, void *data) 4980 { 4981 md_alloc(dev, NULL); 4982 return NULL; 4983 } 4984 4985 static int add_named_array(const char *val, struct kernel_param *kp) 4986 { 4987 /* val must be "md_*" where * is not all digits. 4988 * We allocate an array with a large free minor number, and 4989 * set the name to val. val must not already be an active name. 4990 */ 4991 int len = strlen(val); 4992 char buf[DISK_NAME_LEN]; 4993 4994 while (len && val[len-1] == '\n') 4995 len--; 4996 if (len >= DISK_NAME_LEN) 4997 return -E2BIG; 4998 strlcpy(buf, val, len+1); 4999 if (strncmp(buf, "md_", 3) != 0) 5000 return -EINVAL; 5001 return md_alloc(0, buf); 5002 } 5003 5004 static void md_safemode_timeout(unsigned long data) 5005 { 5006 struct mddev *mddev = (struct mddev *) data; 5007 5008 if (!atomic_read(&mddev->writes_pending)) { 5009 mddev->safemode = 1; 5010 if (mddev->external) 5011 sysfs_notify_dirent_safe(mddev->sysfs_state); 5012 } 5013 md_wakeup_thread(mddev->thread); 5014 } 5015 5016 static int start_dirty_degraded; 5017 5018 int md_run(struct mddev *mddev) 5019 { 5020 int err; 5021 struct md_rdev *rdev; 5022 struct md_personality *pers; 5023 5024 if (list_empty(&mddev->disks)) 5025 /* cannot run an array with no devices.. */ 5026 return -EINVAL; 5027 5028 if (mddev->pers) 5029 return -EBUSY; 5030 /* Cannot run until previous stop completes properly */ 5031 if (mddev->sysfs_active) 5032 return -EBUSY; 5033 5034 /* 5035 * Analyze all RAID superblock(s) 5036 */ 5037 if (!mddev->raid_disks) { 5038 if (!mddev->persistent) 5039 return -EINVAL; 5040 analyze_sbs(mddev); 5041 } 5042 5043 if (mddev->level != LEVEL_NONE) 5044 request_module("md-level-%d", mddev->level); 5045 else if (mddev->clevel[0]) 5046 request_module("md-%s", mddev->clevel); 5047 5048 /* 5049 * Drop all container device buffers, from now on 5050 * the only valid external interface is through the md 5051 * device. 5052 */ 5053 rdev_for_each(rdev, mddev) { 5054 if (test_bit(Faulty, &rdev->flags)) 5055 continue; 5056 sync_blockdev(rdev->bdev); 5057 invalidate_bdev(rdev->bdev); 5058 5059 /* perform some consistency tests on the device. 5060 * We don't want the data to overlap the metadata, 5061 * Internal Bitmap issues have been handled elsewhere. 5062 */ 5063 if (rdev->meta_bdev) { 5064 /* Nothing to check */; 5065 } else if (rdev->data_offset < rdev->sb_start) { 5066 if (mddev->dev_sectors && 5067 rdev->data_offset + mddev->dev_sectors 5068 > rdev->sb_start) { 5069 printk("md: %s: data overlaps metadata\n", 5070 mdname(mddev)); 5071 return -EINVAL; 5072 } 5073 } else { 5074 if (rdev->sb_start + rdev->sb_size/512 5075 > rdev->data_offset) { 5076 printk("md: %s: metadata overlaps data\n", 5077 mdname(mddev)); 5078 return -EINVAL; 5079 } 5080 } 5081 sysfs_notify_dirent_safe(rdev->sysfs_state); 5082 } 5083 5084 if (mddev->bio_set == NULL) 5085 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0); 5086 5087 spin_lock(&pers_lock); 5088 pers = find_pers(mddev->level, mddev->clevel); 5089 if (!pers || !try_module_get(pers->owner)) { 5090 spin_unlock(&pers_lock); 5091 if (mddev->level != LEVEL_NONE) 5092 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 5093 mddev->level); 5094 else 5095 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 5096 mddev->clevel); 5097 return -EINVAL; 5098 } 5099 spin_unlock(&pers_lock); 5100 if (mddev->level != pers->level) { 5101 mddev->level = pers->level; 5102 mddev->new_level = pers->level; 5103 } 5104 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 5105 5106 if (mddev->reshape_position != MaxSector && 5107 pers->start_reshape == NULL) { 5108 /* This personality cannot handle reshaping... */ 5109 module_put(pers->owner); 5110 return -EINVAL; 5111 } 5112 5113 if (pers->sync_request) { 5114 /* Warn if this is a potentially silly 5115 * configuration. 5116 */ 5117 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5118 struct md_rdev *rdev2; 5119 int warned = 0; 5120 5121 rdev_for_each(rdev, mddev) 5122 rdev_for_each(rdev2, mddev) { 5123 if (rdev < rdev2 && 5124 rdev->bdev->bd_contains == 5125 rdev2->bdev->bd_contains) { 5126 printk(KERN_WARNING 5127 "%s: WARNING: %s appears to be" 5128 " on the same physical disk as" 5129 " %s.\n", 5130 mdname(mddev), 5131 bdevname(rdev->bdev,b), 5132 bdevname(rdev2->bdev,b2)); 5133 warned = 1; 5134 } 5135 } 5136 5137 if (warned) 5138 printk(KERN_WARNING 5139 "True protection against single-disk" 5140 " failure might be compromised.\n"); 5141 } 5142 5143 mddev->recovery = 0; 5144 /* may be over-ridden by personality */ 5145 mddev->resync_max_sectors = mddev->dev_sectors; 5146 5147 mddev->ok_start_degraded = start_dirty_degraded; 5148 5149 if (start_readonly && mddev->ro == 0) 5150 mddev->ro = 2; /* read-only, but switch on first write */ 5151 5152 err = pers->run(mddev); 5153 if (err) 5154 printk(KERN_ERR "md: pers->run() failed ...\n"); 5155 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { 5156 WARN_ONCE(!mddev->external_size, "%s: default size too small," 5157 " but 'external_size' not in effect?\n", __func__); 5158 printk(KERN_ERR 5159 "md: invalid array_size %llu > default size %llu\n", 5160 (unsigned long long)mddev->array_sectors / 2, 5161 (unsigned long long)pers->size(mddev, 0, 0) / 2); 5162 err = -EINVAL; 5163 } 5164 if (err == 0 && pers->sync_request && 5165 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { 5166 struct bitmap *bitmap; 5167 5168 bitmap = bitmap_create(mddev, -1); 5169 if (IS_ERR(bitmap)) { 5170 err = PTR_ERR(bitmap); 5171 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 5172 mdname(mddev), err); 5173 } else 5174 mddev->bitmap = bitmap; 5175 5176 } 5177 if (err) { 5178 mddev_detach(mddev); 5179 if (mddev->private) 5180 pers->free(mddev, mddev->private); 5181 mddev->private = NULL; 5182 module_put(pers->owner); 5183 bitmap_destroy(mddev); 5184 return err; 5185 } 5186 if (mddev->queue) { 5187 mddev->queue->backing_dev_info.congested_data = mddev; 5188 mddev->queue->backing_dev_info.congested_fn = md_congested; 5189 blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec); 5190 } 5191 if (pers->sync_request) { 5192 if (mddev->kobj.sd && 5193 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 5194 printk(KERN_WARNING 5195 "md: cannot register extra attributes for %s\n", 5196 mdname(mddev)); 5197 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 5198 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 5199 mddev->ro = 0; 5200 5201 atomic_set(&mddev->writes_pending,0); 5202 atomic_set(&mddev->max_corr_read_errors, 5203 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 5204 mddev->safemode = 0; 5205 mddev->safemode_timer.function = md_safemode_timeout; 5206 mddev->safemode_timer.data = (unsigned long) mddev; 5207 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 5208 mddev->in_sync = 1; 5209 smp_wmb(); 5210 spin_lock(&mddev->lock); 5211 mddev->pers = pers; 5212 mddev->ready = 1; 5213 spin_unlock(&mddev->lock); 5214 rdev_for_each(rdev, mddev) 5215 if (rdev->raid_disk >= 0) 5216 if (sysfs_link_rdev(mddev, rdev)) 5217 /* failure here is OK */; 5218 5219 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5220 5221 if (mddev->flags & MD_UPDATE_SB_FLAGS) 5222 md_update_sb(mddev, 0); 5223 5224 md_new_event(mddev); 5225 sysfs_notify_dirent_safe(mddev->sysfs_state); 5226 sysfs_notify_dirent_safe(mddev->sysfs_action); 5227 sysfs_notify(&mddev->kobj, NULL, "degraded"); 5228 return 0; 5229 } 5230 EXPORT_SYMBOL_GPL(md_run); 5231 5232 static int do_md_run(struct mddev *mddev) 5233 { 5234 int err; 5235 5236 err = md_run(mddev); 5237 if (err) 5238 goto out; 5239 err = bitmap_load(mddev); 5240 if (err) { 5241 bitmap_destroy(mddev); 5242 goto out; 5243 } 5244 5245 md_wakeup_thread(mddev->thread); 5246 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 5247 5248 set_capacity(mddev->gendisk, mddev->array_sectors); 5249 revalidate_disk(mddev->gendisk); 5250 mddev->changed = 1; 5251 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 5252 out: 5253 return err; 5254 } 5255 5256 static int restart_array(struct mddev *mddev) 5257 { 5258 struct gendisk *disk = mddev->gendisk; 5259 5260 /* Complain if it has no devices */ 5261 if (list_empty(&mddev->disks)) 5262 return -ENXIO; 5263 if (!mddev->pers) 5264 return -EINVAL; 5265 if (!mddev->ro) 5266 return -EBUSY; 5267 mddev->safemode = 0; 5268 mddev->ro = 0; 5269 set_disk_ro(disk, 0); 5270 printk(KERN_INFO "md: %s switched to read-write mode.\n", 5271 mdname(mddev)); 5272 /* Kick recovery or resync if necessary */ 5273 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5274 md_wakeup_thread(mddev->thread); 5275 md_wakeup_thread(mddev->sync_thread); 5276 sysfs_notify_dirent_safe(mddev->sysfs_state); 5277 return 0; 5278 } 5279 5280 static void md_clean(struct mddev *mddev) 5281 { 5282 mddev->array_sectors = 0; 5283 mddev->external_size = 0; 5284 mddev->dev_sectors = 0; 5285 mddev->raid_disks = 0; 5286 mddev->recovery_cp = 0; 5287 mddev->resync_min = 0; 5288 mddev->resync_max = MaxSector; 5289 mddev->reshape_position = MaxSector; 5290 mddev->external = 0; 5291 mddev->persistent = 0; 5292 mddev->level = LEVEL_NONE; 5293 mddev->clevel[0] = 0; 5294 mddev->flags = 0; 5295 mddev->ro = 0; 5296 mddev->metadata_type[0] = 0; 5297 mddev->chunk_sectors = 0; 5298 mddev->ctime = mddev->utime = 0; 5299 mddev->layout = 0; 5300 mddev->max_disks = 0; 5301 mddev->events = 0; 5302 mddev->can_decrease_events = 0; 5303 mddev->delta_disks = 0; 5304 mddev->reshape_backwards = 0; 5305 mddev->new_level = LEVEL_NONE; 5306 mddev->new_layout = 0; 5307 mddev->new_chunk_sectors = 0; 5308 mddev->curr_resync = 0; 5309 atomic64_set(&mddev->resync_mismatches, 0); 5310 mddev->suspend_lo = mddev->suspend_hi = 0; 5311 mddev->sync_speed_min = mddev->sync_speed_max = 0; 5312 mddev->recovery = 0; 5313 mddev->in_sync = 0; 5314 mddev->changed = 0; 5315 mddev->degraded = 0; 5316 mddev->safemode = 0; 5317 mddev->private = NULL; 5318 mddev->merge_check_needed = 0; 5319 mddev->bitmap_info.offset = 0; 5320 mddev->bitmap_info.default_offset = 0; 5321 mddev->bitmap_info.default_space = 0; 5322 mddev->bitmap_info.chunksize = 0; 5323 mddev->bitmap_info.daemon_sleep = 0; 5324 mddev->bitmap_info.max_write_behind = 0; 5325 } 5326 5327 static void __md_stop_writes(struct mddev *mddev) 5328 { 5329 if (mddev_is_clustered(mddev)) 5330 md_cluster_ops->metadata_update_start(mddev); 5331 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5332 flush_workqueue(md_misc_wq); 5333 if (mddev->sync_thread) { 5334 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5335 md_reap_sync_thread(mddev); 5336 } 5337 5338 del_timer_sync(&mddev->safemode_timer); 5339 5340 bitmap_flush(mddev); 5341 md_super_wait(mddev); 5342 5343 if (mddev->ro == 0 && 5344 (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) { 5345 /* mark array as shutdown cleanly */ 5346 mddev->in_sync = 1; 5347 md_update_sb(mddev, 1); 5348 } 5349 if (mddev_is_clustered(mddev)) 5350 md_cluster_ops->metadata_update_finish(mddev); 5351 } 5352 5353 void md_stop_writes(struct mddev *mddev) 5354 { 5355 mddev_lock_nointr(mddev); 5356 __md_stop_writes(mddev); 5357 mddev_unlock(mddev); 5358 } 5359 EXPORT_SYMBOL_GPL(md_stop_writes); 5360 5361 static void mddev_detach(struct mddev *mddev) 5362 { 5363 struct bitmap *bitmap = mddev->bitmap; 5364 /* wait for behind writes to complete */ 5365 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) { 5366 printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n", 5367 mdname(mddev)); 5368 /* need to kick something here to make sure I/O goes? */ 5369 wait_event(bitmap->behind_wait, 5370 atomic_read(&bitmap->behind_writes) == 0); 5371 } 5372 if (mddev->pers && mddev->pers->quiesce) { 5373 mddev->pers->quiesce(mddev, 1); 5374 mddev->pers->quiesce(mddev, 0); 5375 } 5376 md_unregister_thread(&mddev->thread); 5377 if (mddev->queue) 5378 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 5379 } 5380 5381 static void __md_stop(struct mddev *mddev) 5382 { 5383 struct md_personality *pers = mddev->pers; 5384 mddev_detach(mddev); 5385 spin_lock(&mddev->lock); 5386 mddev->ready = 0; 5387 mddev->pers = NULL; 5388 spin_unlock(&mddev->lock); 5389 pers->free(mddev, mddev->private); 5390 mddev->private = NULL; 5391 if (pers->sync_request && mddev->to_remove == NULL) 5392 mddev->to_remove = &md_redundancy_group; 5393 module_put(pers->owner); 5394 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5395 } 5396 5397 void md_stop(struct mddev *mddev) 5398 { 5399 /* stop the array and free an attached data structures. 5400 * This is called from dm-raid 5401 */ 5402 __md_stop(mddev); 5403 bitmap_destroy(mddev); 5404 if (mddev->bio_set) 5405 bioset_free(mddev->bio_set); 5406 } 5407 5408 EXPORT_SYMBOL_GPL(md_stop); 5409 5410 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) 5411 { 5412 int err = 0; 5413 int did_freeze = 0; 5414 5415 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 5416 did_freeze = 1; 5417 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5418 md_wakeup_thread(mddev->thread); 5419 } 5420 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5421 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5422 if (mddev->sync_thread) 5423 /* Thread might be blocked waiting for metadata update 5424 * which will now never happen */ 5425 wake_up_process(mddev->sync_thread->tsk); 5426 5427 mddev_unlock(mddev); 5428 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, 5429 &mddev->recovery)); 5430 mddev_lock_nointr(mddev); 5431 5432 mutex_lock(&mddev->open_mutex); 5433 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 5434 mddev->sync_thread || 5435 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 5436 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { 5437 printk("md: %s still in use.\n",mdname(mddev)); 5438 if (did_freeze) { 5439 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5440 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5441 md_wakeup_thread(mddev->thread); 5442 } 5443 err = -EBUSY; 5444 goto out; 5445 } 5446 if (mddev->pers) { 5447 __md_stop_writes(mddev); 5448 5449 err = -ENXIO; 5450 if (mddev->ro==1) 5451 goto out; 5452 mddev->ro = 1; 5453 set_disk_ro(mddev->gendisk, 1); 5454 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5455 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5456 md_wakeup_thread(mddev->thread); 5457 sysfs_notify_dirent_safe(mddev->sysfs_state); 5458 err = 0; 5459 } 5460 out: 5461 mutex_unlock(&mddev->open_mutex); 5462 return err; 5463 } 5464 5465 /* mode: 5466 * 0 - completely stop and dis-assemble array 5467 * 2 - stop but do not disassemble array 5468 */ 5469 static int do_md_stop(struct mddev *mddev, int mode, 5470 struct block_device *bdev) 5471 { 5472 struct gendisk *disk = mddev->gendisk; 5473 struct md_rdev *rdev; 5474 int did_freeze = 0; 5475 5476 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 5477 did_freeze = 1; 5478 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5479 md_wakeup_thread(mddev->thread); 5480 } 5481 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5482 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5483 if (mddev->sync_thread) 5484 /* Thread might be blocked waiting for metadata update 5485 * which will now never happen */ 5486 wake_up_process(mddev->sync_thread->tsk); 5487 5488 mddev_unlock(mddev); 5489 wait_event(resync_wait, (mddev->sync_thread == NULL && 5490 !test_bit(MD_RECOVERY_RUNNING, 5491 &mddev->recovery))); 5492 mddev_lock_nointr(mddev); 5493 5494 mutex_lock(&mddev->open_mutex); 5495 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 5496 mddev->sysfs_active || 5497 mddev->sync_thread || 5498 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 5499 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { 5500 printk("md: %s still in use.\n",mdname(mddev)); 5501 mutex_unlock(&mddev->open_mutex); 5502 if (did_freeze) { 5503 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5504 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5505 md_wakeup_thread(mddev->thread); 5506 } 5507 return -EBUSY; 5508 } 5509 if (mddev->pers) { 5510 if (mddev->ro) 5511 set_disk_ro(disk, 0); 5512 5513 __md_stop_writes(mddev); 5514 __md_stop(mddev); 5515 mddev->queue->merge_bvec_fn = NULL; 5516 mddev->queue->backing_dev_info.congested_fn = NULL; 5517 5518 /* tell userspace to handle 'inactive' */ 5519 sysfs_notify_dirent_safe(mddev->sysfs_state); 5520 5521 rdev_for_each(rdev, mddev) 5522 if (rdev->raid_disk >= 0) 5523 sysfs_unlink_rdev(mddev, rdev); 5524 5525 set_capacity(disk, 0); 5526 mutex_unlock(&mddev->open_mutex); 5527 mddev->changed = 1; 5528 revalidate_disk(disk); 5529 5530 if (mddev->ro) 5531 mddev->ro = 0; 5532 } else 5533 mutex_unlock(&mddev->open_mutex); 5534 /* 5535 * Free resources if final stop 5536 */ 5537 if (mode == 0) { 5538 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 5539 5540 bitmap_destroy(mddev); 5541 if (mddev->bitmap_info.file) { 5542 struct file *f = mddev->bitmap_info.file; 5543 spin_lock(&mddev->lock); 5544 mddev->bitmap_info.file = NULL; 5545 spin_unlock(&mddev->lock); 5546 fput(f); 5547 } 5548 mddev->bitmap_info.offset = 0; 5549 5550 export_array(mddev); 5551 5552 md_clean(mddev); 5553 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 5554 if (mddev->hold_active == UNTIL_STOP) 5555 mddev->hold_active = 0; 5556 } 5557 blk_integrity_unregister(disk); 5558 md_new_event(mddev); 5559 sysfs_notify_dirent_safe(mddev->sysfs_state); 5560 return 0; 5561 } 5562 5563 #ifndef MODULE 5564 static void autorun_array(struct mddev *mddev) 5565 { 5566 struct md_rdev *rdev; 5567 int err; 5568 5569 if (list_empty(&mddev->disks)) 5570 return; 5571 5572 printk(KERN_INFO "md: running: "); 5573 5574 rdev_for_each(rdev, mddev) { 5575 char b[BDEVNAME_SIZE]; 5576 printk("<%s>", bdevname(rdev->bdev,b)); 5577 } 5578 printk("\n"); 5579 5580 err = do_md_run(mddev); 5581 if (err) { 5582 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 5583 do_md_stop(mddev, 0, NULL); 5584 } 5585 } 5586 5587 /* 5588 * lets try to run arrays based on all disks that have arrived 5589 * until now. (those are in pending_raid_disks) 5590 * 5591 * the method: pick the first pending disk, collect all disks with 5592 * the same UUID, remove all from the pending list and put them into 5593 * the 'same_array' list. Then order this list based on superblock 5594 * update time (freshest comes first), kick out 'old' disks and 5595 * compare superblocks. If everything's fine then run it. 5596 * 5597 * If "unit" is allocated, then bump its reference count 5598 */ 5599 static void autorun_devices(int part) 5600 { 5601 struct md_rdev *rdev0, *rdev, *tmp; 5602 struct mddev *mddev; 5603 char b[BDEVNAME_SIZE]; 5604 5605 printk(KERN_INFO "md: autorun ...\n"); 5606 while (!list_empty(&pending_raid_disks)) { 5607 int unit; 5608 dev_t dev; 5609 LIST_HEAD(candidates); 5610 rdev0 = list_entry(pending_raid_disks.next, 5611 struct md_rdev, same_set); 5612 5613 printk(KERN_INFO "md: considering %s ...\n", 5614 bdevname(rdev0->bdev,b)); 5615 INIT_LIST_HEAD(&candidates); 5616 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 5617 if (super_90_load(rdev, rdev0, 0) >= 0) { 5618 printk(KERN_INFO "md: adding %s ...\n", 5619 bdevname(rdev->bdev,b)); 5620 list_move(&rdev->same_set, &candidates); 5621 } 5622 /* 5623 * now we have a set of devices, with all of them having 5624 * mostly sane superblocks. It's time to allocate the 5625 * mddev. 5626 */ 5627 if (part) { 5628 dev = MKDEV(mdp_major, 5629 rdev0->preferred_minor << MdpMinorShift); 5630 unit = MINOR(dev) >> MdpMinorShift; 5631 } else { 5632 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 5633 unit = MINOR(dev); 5634 } 5635 if (rdev0->preferred_minor != unit) { 5636 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 5637 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 5638 break; 5639 } 5640 5641 md_probe(dev, NULL, NULL); 5642 mddev = mddev_find(dev); 5643 if (!mddev || !mddev->gendisk) { 5644 if (mddev) 5645 mddev_put(mddev); 5646 printk(KERN_ERR 5647 "md: cannot allocate memory for md drive.\n"); 5648 break; 5649 } 5650 if (mddev_lock(mddev)) 5651 printk(KERN_WARNING "md: %s locked, cannot run\n", 5652 mdname(mddev)); 5653 else if (mddev->raid_disks || mddev->major_version 5654 || !list_empty(&mddev->disks)) { 5655 printk(KERN_WARNING 5656 "md: %s already running, cannot run %s\n", 5657 mdname(mddev), bdevname(rdev0->bdev,b)); 5658 mddev_unlock(mddev); 5659 } else { 5660 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 5661 mddev->persistent = 1; 5662 rdev_for_each_list(rdev, tmp, &candidates) { 5663 list_del_init(&rdev->same_set); 5664 if (bind_rdev_to_array(rdev, mddev)) 5665 export_rdev(rdev); 5666 } 5667 autorun_array(mddev); 5668 mddev_unlock(mddev); 5669 } 5670 /* on success, candidates will be empty, on error 5671 * it won't... 5672 */ 5673 rdev_for_each_list(rdev, tmp, &candidates) { 5674 list_del_init(&rdev->same_set); 5675 export_rdev(rdev); 5676 } 5677 mddev_put(mddev); 5678 } 5679 printk(KERN_INFO "md: ... autorun DONE.\n"); 5680 } 5681 #endif /* !MODULE */ 5682 5683 static int get_version(void __user *arg) 5684 { 5685 mdu_version_t ver; 5686 5687 ver.major = MD_MAJOR_VERSION; 5688 ver.minor = MD_MINOR_VERSION; 5689 ver.patchlevel = MD_PATCHLEVEL_VERSION; 5690 5691 if (copy_to_user(arg, &ver, sizeof(ver))) 5692 return -EFAULT; 5693 5694 return 0; 5695 } 5696 5697 static int get_array_info(struct mddev *mddev, void __user *arg) 5698 { 5699 mdu_array_info_t info; 5700 int nr,working,insync,failed,spare; 5701 struct md_rdev *rdev; 5702 5703 nr = working = insync = failed = spare = 0; 5704 rcu_read_lock(); 5705 rdev_for_each_rcu(rdev, mddev) { 5706 nr++; 5707 if (test_bit(Faulty, &rdev->flags)) 5708 failed++; 5709 else { 5710 working++; 5711 if (test_bit(In_sync, &rdev->flags)) 5712 insync++; 5713 else 5714 spare++; 5715 } 5716 } 5717 rcu_read_unlock(); 5718 5719 info.major_version = mddev->major_version; 5720 info.minor_version = mddev->minor_version; 5721 info.patch_version = MD_PATCHLEVEL_VERSION; 5722 info.ctime = mddev->ctime; 5723 info.level = mddev->level; 5724 info.size = mddev->dev_sectors / 2; 5725 if (info.size != mddev->dev_sectors / 2) /* overflow */ 5726 info.size = -1; 5727 info.nr_disks = nr; 5728 info.raid_disks = mddev->raid_disks; 5729 info.md_minor = mddev->md_minor; 5730 info.not_persistent= !mddev->persistent; 5731 5732 info.utime = mddev->utime; 5733 info.state = 0; 5734 if (mddev->in_sync) 5735 info.state = (1<<MD_SB_CLEAN); 5736 if (mddev->bitmap && mddev->bitmap_info.offset) 5737 info.state |= (1<<MD_SB_BITMAP_PRESENT); 5738 if (mddev_is_clustered(mddev)) 5739 info.state |= (1<<MD_SB_CLUSTERED); 5740 info.active_disks = insync; 5741 info.working_disks = working; 5742 info.failed_disks = failed; 5743 info.spare_disks = spare; 5744 5745 info.layout = mddev->layout; 5746 info.chunk_size = mddev->chunk_sectors << 9; 5747 5748 if (copy_to_user(arg, &info, sizeof(info))) 5749 return -EFAULT; 5750 5751 return 0; 5752 } 5753 5754 static int get_bitmap_file(struct mddev *mddev, void __user * arg) 5755 { 5756 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 5757 char *ptr; 5758 int err; 5759 5760 file = kmalloc(sizeof(*file), GFP_NOIO); 5761 if (!file) 5762 return -ENOMEM; 5763 5764 err = 0; 5765 spin_lock(&mddev->lock); 5766 /* bitmap disabled, zero the first byte and copy out */ 5767 if (!mddev->bitmap_info.file) 5768 file->pathname[0] = '\0'; 5769 else if ((ptr = file_path(mddev->bitmap_info.file, 5770 file->pathname, sizeof(file->pathname))), 5771 IS_ERR(ptr)) 5772 err = PTR_ERR(ptr); 5773 else 5774 memmove(file->pathname, ptr, 5775 sizeof(file->pathname)-(ptr-file->pathname)); 5776 spin_unlock(&mddev->lock); 5777 5778 if (err == 0 && 5779 copy_to_user(arg, file, sizeof(*file))) 5780 err = -EFAULT; 5781 5782 kfree(file); 5783 return err; 5784 } 5785 5786 static int get_disk_info(struct mddev *mddev, void __user * arg) 5787 { 5788 mdu_disk_info_t info; 5789 struct md_rdev *rdev; 5790 5791 if (copy_from_user(&info, arg, sizeof(info))) 5792 return -EFAULT; 5793 5794 rcu_read_lock(); 5795 rdev = md_find_rdev_nr_rcu(mddev, info.number); 5796 if (rdev) { 5797 info.major = MAJOR(rdev->bdev->bd_dev); 5798 info.minor = MINOR(rdev->bdev->bd_dev); 5799 info.raid_disk = rdev->raid_disk; 5800 info.state = 0; 5801 if (test_bit(Faulty, &rdev->flags)) 5802 info.state |= (1<<MD_DISK_FAULTY); 5803 else if (test_bit(In_sync, &rdev->flags)) { 5804 info.state |= (1<<MD_DISK_ACTIVE); 5805 info.state |= (1<<MD_DISK_SYNC); 5806 } 5807 if (test_bit(WriteMostly, &rdev->flags)) 5808 info.state |= (1<<MD_DISK_WRITEMOSTLY); 5809 } else { 5810 info.major = info.minor = 0; 5811 info.raid_disk = -1; 5812 info.state = (1<<MD_DISK_REMOVED); 5813 } 5814 rcu_read_unlock(); 5815 5816 if (copy_to_user(arg, &info, sizeof(info))) 5817 return -EFAULT; 5818 5819 return 0; 5820 } 5821 5822 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) 5823 { 5824 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5825 struct md_rdev *rdev; 5826 dev_t dev = MKDEV(info->major,info->minor); 5827 5828 if (mddev_is_clustered(mddev) && 5829 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) { 5830 pr_err("%s: Cannot add to clustered mddev.\n", 5831 mdname(mddev)); 5832 return -EINVAL; 5833 } 5834 5835 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 5836 return -EOVERFLOW; 5837 5838 if (!mddev->raid_disks) { 5839 int err; 5840 /* expecting a device which has a superblock */ 5841 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 5842 if (IS_ERR(rdev)) { 5843 printk(KERN_WARNING 5844 "md: md_import_device returned %ld\n", 5845 PTR_ERR(rdev)); 5846 return PTR_ERR(rdev); 5847 } 5848 if (!list_empty(&mddev->disks)) { 5849 struct md_rdev *rdev0 5850 = list_entry(mddev->disks.next, 5851 struct md_rdev, same_set); 5852 err = super_types[mddev->major_version] 5853 .load_super(rdev, rdev0, mddev->minor_version); 5854 if (err < 0) { 5855 printk(KERN_WARNING 5856 "md: %s has different UUID to %s\n", 5857 bdevname(rdev->bdev,b), 5858 bdevname(rdev0->bdev,b2)); 5859 export_rdev(rdev); 5860 return -EINVAL; 5861 } 5862 } 5863 err = bind_rdev_to_array(rdev, mddev); 5864 if (err) 5865 export_rdev(rdev); 5866 return err; 5867 } 5868 5869 /* 5870 * add_new_disk can be used once the array is assembled 5871 * to add "hot spares". They must already have a superblock 5872 * written 5873 */ 5874 if (mddev->pers) { 5875 int err; 5876 if (!mddev->pers->hot_add_disk) { 5877 printk(KERN_WARNING 5878 "%s: personality does not support diskops!\n", 5879 mdname(mddev)); 5880 return -EINVAL; 5881 } 5882 if (mddev->persistent) 5883 rdev = md_import_device(dev, mddev->major_version, 5884 mddev->minor_version); 5885 else 5886 rdev = md_import_device(dev, -1, -1); 5887 if (IS_ERR(rdev)) { 5888 printk(KERN_WARNING 5889 "md: md_import_device returned %ld\n", 5890 PTR_ERR(rdev)); 5891 return PTR_ERR(rdev); 5892 } 5893 /* set saved_raid_disk if appropriate */ 5894 if (!mddev->persistent) { 5895 if (info->state & (1<<MD_DISK_SYNC) && 5896 info->raid_disk < mddev->raid_disks) { 5897 rdev->raid_disk = info->raid_disk; 5898 set_bit(In_sync, &rdev->flags); 5899 clear_bit(Bitmap_sync, &rdev->flags); 5900 } else 5901 rdev->raid_disk = -1; 5902 rdev->saved_raid_disk = rdev->raid_disk; 5903 } else 5904 super_types[mddev->major_version]. 5905 validate_super(mddev, rdev); 5906 if ((info->state & (1<<MD_DISK_SYNC)) && 5907 rdev->raid_disk != info->raid_disk) { 5908 /* This was a hot-add request, but events doesn't 5909 * match, so reject it. 5910 */ 5911 export_rdev(rdev); 5912 return -EINVAL; 5913 } 5914 5915 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 5916 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5917 set_bit(WriteMostly, &rdev->flags); 5918 else 5919 clear_bit(WriteMostly, &rdev->flags); 5920 5921 /* 5922 * check whether the device shows up in other nodes 5923 */ 5924 if (mddev_is_clustered(mddev)) { 5925 if (info->state & (1 << MD_DISK_CANDIDATE)) { 5926 /* Through --cluster-confirm */ 5927 set_bit(Candidate, &rdev->flags); 5928 err = md_cluster_ops->new_disk_ack(mddev, true); 5929 if (err) { 5930 export_rdev(rdev); 5931 return err; 5932 } 5933 } else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) { 5934 /* --add initiated by this node */ 5935 err = md_cluster_ops->add_new_disk_start(mddev, rdev); 5936 if (err) { 5937 md_cluster_ops->add_new_disk_finish(mddev); 5938 export_rdev(rdev); 5939 return err; 5940 } 5941 } 5942 } 5943 5944 rdev->raid_disk = -1; 5945 err = bind_rdev_to_array(rdev, mddev); 5946 if (err) 5947 export_rdev(rdev); 5948 else 5949 err = add_bound_rdev(rdev); 5950 if (mddev_is_clustered(mddev) && 5951 (info->state & (1 << MD_DISK_CLUSTER_ADD))) 5952 md_cluster_ops->add_new_disk_finish(mddev); 5953 return err; 5954 } 5955 5956 /* otherwise, add_new_disk is only allowed 5957 * for major_version==0 superblocks 5958 */ 5959 if (mddev->major_version != 0) { 5960 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 5961 mdname(mddev)); 5962 return -EINVAL; 5963 } 5964 5965 if (!(info->state & (1<<MD_DISK_FAULTY))) { 5966 int err; 5967 rdev = md_import_device(dev, -1, 0); 5968 if (IS_ERR(rdev)) { 5969 printk(KERN_WARNING 5970 "md: error, md_import_device() returned %ld\n", 5971 PTR_ERR(rdev)); 5972 return PTR_ERR(rdev); 5973 } 5974 rdev->desc_nr = info->number; 5975 if (info->raid_disk < mddev->raid_disks) 5976 rdev->raid_disk = info->raid_disk; 5977 else 5978 rdev->raid_disk = -1; 5979 5980 if (rdev->raid_disk < mddev->raid_disks) 5981 if (info->state & (1<<MD_DISK_SYNC)) 5982 set_bit(In_sync, &rdev->flags); 5983 5984 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5985 set_bit(WriteMostly, &rdev->flags); 5986 5987 if (!mddev->persistent) { 5988 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 5989 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 5990 } else 5991 rdev->sb_start = calc_dev_sboffset(rdev); 5992 rdev->sectors = rdev->sb_start; 5993 5994 err = bind_rdev_to_array(rdev, mddev); 5995 if (err) { 5996 export_rdev(rdev); 5997 return err; 5998 } 5999 } 6000 6001 return 0; 6002 } 6003 6004 static int hot_remove_disk(struct mddev *mddev, dev_t dev) 6005 { 6006 char b[BDEVNAME_SIZE]; 6007 struct md_rdev *rdev; 6008 6009 rdev = find_rdev(mddev, dev); 6010 if (!rdev) 6011 return -ENXIO; 6012 6013 if (mddev_is_clustered(mddev)) 6014 md_cluster_ops->metadata_update_start(mddev); 6015 6016 clear_bit(Blocked, &rdev->flags); 6017 remove_and_add_spares(mddev, rdev); 6018 6019 if (rdev->raid_disk >= 0) 6020 goto busy; 6021 6022 if (mddev_is_clustered(mddev)) 6023 md_cluster_ops->remove_disk(mddev, rdev); 6024 6025 md_kick_rdev_from_array(rdev); 6026 md_update_sb(mddev, 1); 6027 md_new_event(mddev); 6028 6029 if (mddev_is_clustered(mddev)) 6030 md_cluster_ops->metadata_update_finish(mddev); 6031 6032 return 0; 6033 busy: 6034 if (mddev_is_clustered(mddev)) 6035 md_cluster_ops->metadata_update_cancel(mddev); 6036 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", 6037 bdevname(rdev->bdev,b), mdname(mddev)); 6038 return -EBUSY; 6039 } 6040 6041 static int hot_add_disk(struct mddev *mddev, dev_t dev) 6042 { 6043 char b[BDEVNAME_SIZE]; 6044 int err; 6045 struct md_rdev *rdev; 6046 6047 if (!mddev->pers) 6048 return -ENODEV; 6049 6050 if (mddev->major_version != 0) { 6051 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 6052 " version-0 superblocks.\n", 6053 mdname(mddev)); 6054 return -EINVAL; 6055 } 6056 if (!mddev->pers->hot_add_disk) { 6057 printk(KERN_WARNING 6058 "%s: personality does not support diskops!\n", 6059 mdname(mddev)); 6060 return -EINVAL; 6061 } 6062 6063 rdev = md_import_device(dev, -1, 0); 6064 if (IS_ERR(rdev)) { 6065 printk(KERN_WARNING 6066 "md: error, md_import_device() returned %ld\n", 6067 PTR_ERR(rdev)); 6068 return -EINVAL; 6069 } 6070 6071 if (mddev->persistent) 6072 rdev->sb_start = calc_dev_sboffset(rdev); 6073 else 6074 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 6075 6076 rdev->sectors = rdev->sb_start; 6077 6078 if (test_bit(Faulty, &rdev->flags)) { 6079 printk(KERN_WARNING 6080 "md: can not hot-add faulty %s disk to %s!\n", 6081 bdevname(rdev->bdev,b), mdname(mddev)); 6082 err = -EINVAL; 6083 goto abort_export; 6084 } 6085 6086 if (mddev_is_clustered(mddev)) 6087 md_cluster_ops->metadata_update_start(mddev); 6088 clear_bit(In_sync, &rdev->flags); 6089 rdev->desc_nr = -1; 6090 rdev->saved_raid_disk = -1; 6091 err = bind_rdev_to_array(rdev, mddev); 6092 if (err) 6093 goto abort_clustered; 6094 6095 /* 6096 * The rest should better be atomic, we can have disk failures 6097 * noticed in interrupt contexts ... 6098 */ 6099 6100 rdev->raid_disk = -1; 6101 6102 md_update_sb(mddev, 1); 6103 6104 if (mddev_is_clustered(mddev)) 6105 md_cluster_ops->metadata_update_finish(mddev); 6106 /* 6107 * Kick recovery, maybe this spare has to be added to the 6108 * array immediately. 6109 */ 6110 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6111 md_wakeup_thread(mddev->thread); 6112 md_new_event(mddev); 6113 return 0; 6114 6115 abort_clustered: 6116 if (mddev_is_clustered(mddev)) 6117 md_cluster_ops->metadata_update_cancel(mddev); 6118 abort_export: 6119 export_rdev(rdev); 6120 return err; 6121 } 6122 6123 static int set_bitmap_file(struct mddev *mddev, int fd) 6124 { 6125 int err = 0; 6126 6127 if (mddev->pers) { 6128 if (!mddev->pers->quiesce || !mddev->thread) 6129 return -EBUSY; 6130 if (mddev->recovery || mddev->sync_thread) 6131 return -EBUSY; 6132 /* we should be able to change the bitmap.. */ 6133 } 6134 6135 if (fd >= 0) { 6136 struct inode *inode; 6137 struct file *f; 6138 6139 if (mddev->bitmap || mddev->bitmap_info.file) 6140 return -EEXIST; /* cannot add when bitmap is present */ 6141 f = fget(fd); 6142 6143 if (f == NULL) { 6144 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 6145 mdname(mddev)); 6146 return -EBADF; 6147 } 6148 6149 inode = f->f_mapping->host; 6150 if (!S_ISREG(inode->i_mode)) { 6151 printk(KERN_ERR "%s: error: bitmap file must be a regular file\n", 6152 mdname(mddev)); 6153 err = -EBADF; 6154 } else if (!(f->f_mode & FMODE_WRITE)) { 6155 printk(KERN_ERR "%s: error: bitmap file must open for write\n", 6156 mdname(mddev)); 6157 err = -EBADF; 6158 } else if (atomic_read(&inode->i_writecount) != 1) { 6159 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 6160 mdname(mddev)); 6161 err = -EBUSY; 6162 } 6163 if (err) { 6164 fput(f); 6165 return err; 6166 } 6167 mddev->bitmap_info.file = f; 6168 mddev->bitmap_info.offset = 0; /* file overrides offset */ 6169 } else if (mddev->bitmap == NULL) 6170 return -ENOENT; /* cannot remove what isn't there */ 6171 err = 0; 6172 if (mddev->pers) { 6173 mddev->pers->quiesce(mddev, 1); 6174 if (fd >= 0) { 6175 struct bitmap *bitmap; 6176 6177 bitmap = bitmap_create(mddev, -1); 6178 if (!IS_ERR(bitmap)) { 6179 mddev->bitmap = bitmap; 6180 err = bitmap_load(mddev); 6181 } else 6182 err = PTR_ERR(bitmap); 6183 } 6184 if (fd < 0 || err) { 6185 bitmap_destroy(mddev); 6186 fd = -1; /* make sure to put the file */ 6187 } 6188 mddev->pers->quiesce(mddev, 0); 6189 } 6190 if (fd < 0) { 6191 struct file *f = mddev->bitmap_info.file; 6192 if (f) { 6193 spin_lock(&mddev->lock); 6194 mddev->bitmap_info.file = NULL; 6195 spin_unlock(&mddev->lock); 6196 fput(f); 6197 } 6198 } 6199 6200 return err; 6201 } 6202 6203 /* 6204 * set_array_info is used two different ways 6205 * The original usage is when creating a new array. 6206 * In this usage, raid_disks is > 0 and it together with 6207 * level, size, not_persistent,layout,chunksize determine the 6208 * shape of the array. 6209 * This will always create an array with a type-0.90.0 superblock. 6210 * The newer usage is when assembling an array. 6211 * In this case raid_disks will be 0, and the major_version field is 6212 * use to determine which style super-blocks are to be found on the devices. 6213 * The minor and patch _version numbers are also kept incase the 6214 * super_block handler wishes to interpret them. 6215 */ 6216 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) 6217 { 6218 6219 if (info->raid_disks == 0) { 6220 /* just setting version number for superblock loading */ 6221 if (info->major_version < 0 || 6222 info->major_version >= ARRAY_SIZE(super_types) || 6223 super_types[info->major_version].name == NULL) { 6224 /* maybe try to auto-load a module? */ 6225 printk(KERN_INFO 6226 "md: superblock version %d not known\n", 6227 info->major_version); 6228 return -EINVAL; 6229 } 6230 mddev->major_version = info->major_version; 6231 mddev->minor_version = info->minor_version; 6232 mddev->patch_version = info->patch_version; 6233 mddev->persistent = !info->not_persistent; 6234 /* ensure mddev_put doesn't delete this now that there 6235 * is some minimal configuration. 6236 */ 6237 mddev->ctime = get_seconds(); 6238 return 0; 6239 } 6240 mddev->major_version = MD_MAJOR_VERSION; 6241 mddev->minor_version = MD_MINOR_VERSION; 6242 mddev->patch_version = MD_PATCHLEVEL_VERSION; 6243 mddev->ctime = get_seconds(); 6244 6245 mddev->level = info->level; 6246 mddev->clevel[0] = 0; 6247 mddev->dev_sectors = 2 * (sector_t)info->size; 6248 mddev->raid_disks = info->raid_disks; 6249 /* don't set md_minor, it is determined by which /dev/md* was 6250 * openned 6251 */ 6252 if (info->state & (1<<MD_SB_CLEAN)) 6253 mddev->recovery_cp = MaxSector; 6254 else 6255 mddev->recovery_cp = 0; 6256 mddev->persistent = ! info->not_persistent; 6257 mddev->external = 0; 6258 6259 mddev->layout = info->layout; 6260 mddev->chunk_sectors = info->chunk_size >> 9; 6261 6262 mddev->max_disks = MD_SB_DISKS; 6263 6264 if (mddev->persistent) 6265 mddev->flags = 0; 6266 set_bit(MD_CHANGE_DEVS, &mddev->flags); 6267 6268 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 6269 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 6270 mddev->bitmap_info.offset = 0; 6271 6272 mddev->reshape_position = MaxSector; 6273 6274 /* 6275 * Generate a 128 bit UUID 6276 */ 6277 get_random_bytes(mddev->uuid, 16); 6278 6279 mddev->new_level = mddev->level; 6280 mddev->new_chunk_sectors = mddev->chunk_sectors; 6281 mddev->new_layout = mddev->layout; 6282 mddev->delta_disks = 0; 6283 mddev->reshape_backwards = 0; 6284 6285 return 0; 6286 } 6287 6288 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) 6289 { 6290 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); 6291 6292 if (mddev->external_size) 6293 return; 6294 6295 mddev->array_sectors = array_sectors; 6296 } 6297 EXPORT_SYMBOL(md_set_array_sectors); 6298 6299 static int update_size(struct mddev *mddev, sector_t num_sectors) 6300 { 6301 struct md_rdev *rdev; 6302 int rv; 6303 int fit = (num_sectors == 0); 6304 6305 if (mddev->pers->resize == NULL) 6306 return -EINVAL; 6307 /* The "num_sectors" is the number of sectors of each device that 6308 * is used. This can only make sense for arrays with redundancy. 6309 * linear and raid0 always use whatever space is available. We can only 6310 * consider changing this number if no resync or reconstruction is 6311 * happening, and if the new size is acceptable. It must fit before the 6312 * sb_start or, if that is <data_offset, it must fit before the size 6313 * of each device. If num_sectors is zero, we find the largest size 6314 * that fits. 6315 */ 6316 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 6317 mddev->sync_thread) 6318 return -EBUSY; 6319 if (mddev->ro) 6320 return -EROFS; 6321 6322 rdev_for_each(rdev, mddev) { 6323 sector_t avail = rdev->sectors; 6324 6325 if (fit && (num_sectors == 0 || num_sectors > avail)) 6326 num_sectors = avail; 6327 if (avail < num_sectors) 6328 return -ENOSPC; 6329 } 6330 rv = mddev->pers->resize(mddev, num_sectors); 6331 if (!rv) 6332 revalidate_disk(mddev->gendisk); 6333 return rv; 6334 } 6335 6336 static int update_raid_disks(struct mddev *mddev, int raid_disks) 6337 { 6338 int rv; 6339 struct md_rdev *rdev; 6340 /* change the number of raid disks */ 6341 if (mddev->pers->check_reshape == NULL) 6342 return -EINVAL; 6343 if (mddev->ro) 6344 return -EROFS; 6345 if (raid_disks <= 0 || 6346 (mddev->max_disks && raid_disks >= mddev->max_disks)) 6347 return -EINVAL; 6348 if (mddev->sync_thread || 6349 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 6350 mddev->reshape_position != MaxSector) 6351 return -EBUSY; 6352 6353 rdev_for_each(rdev, mddev) { 6354 if (mddev->raid_disks < raid_disks && 6355 rdev->data_offset < rdev->new_data_offset) 6356 return -EINVAL; 6357 if (mddev->raid_disks > raid_disks && 6358 rdev->data_offset > rdev->new_data_offset) 6359 return -EINVAL; 6360 } 6361 6362 mddev->delta_disks = raid_disks - mddev->raid_disks; 6363 if (mddev->delta_disks < 0) 6364 mddev->reshape_backwards = 1; 6365 else if (mddev->delta_disks > 0) 6366 mddev->reshape_backwards = 0; 6367 6368 rv = mddev->pers->check_reshape(mddev); 6369 if (rv < 0) { 6370 mddev->delta_disks = 0; 6371 mddev->reshape_backwards = 0; 6372 } 6373 return rv; 6374 } 6375 6376 /* 6377 * update_array_info is used to change the configuration of an 6378 * on-line array. 6379 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 6380 * fields in the info are checked against the array. 6381 * Any differences that cannot be handled will cause an error. 6382 * Normally, only one change can be managed at a time. 6383 */ 6384 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) 6385 { 6386 int rv = 0; 6387 int cnt = 0; 6388 int state = 0; 6389 6390 /* calculate expected state,ignoring low bits */ 6391 if (mddev->bitmap && mddev->bitmap_info.offset) 6392 state |= (1 << MD_SB_BITMAP_PRESENT); 6393 6394 if (mddev->major_version != info->major_version || 6395 mddev->minor_version != info->minor_version || 6396 /* mddev->patch_version != info->patch_version || */ 6397 mddev->ctime != info->ctime || 6398 mddev->level != info->level || 6399 /* mddev->layout != info->layout || */ 6400 mddev->persistent != !info->not_persistent || 6401 mddev->chunk_sectors != info->chunk_size >> 9 || 6402 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 6403 ((state^info->state) & 0xfffffe00) 6404 ) 6405 return -EINVAL; 6406 /* Check there is only one change */ 6407 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 6408 cnt++; 6409 if (mddev->raid_disks != info->raid_disks) 6410 cnt++; 6411 if (mddev->layout != info->layout) 6412 cnt++; 6413 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 6414 cnt++; 6415 if (cnt == 0) 6416 return 0; 6417 if (cnt > 1) 6418 return -EINVAL; 6419 6420 if (mddev->layout != info->layout) { 6421 /* Change layout 6422 * we don't need to do anything at the md level, the 6423 * personality will take care of it all. 6424 */ 6425 if (mddev->pers->check_reshape == NULL) 6426 return -EINVAL; 6427 else { 6428 mddev->new_layout = info->layout; 6429 rv = mddev->pers->check_reshape(mddev); 6430 if (rv) 6431 mddev->new_layout = mddev->layout; 6432 return rv; 6433 } 6434 } 6435 if (mddev_is_clustered(mddev)) 6436 md_cluster_ops->metadata_update_start(mddev); 6437 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 6438 rv = update_size(mddev, (sector_t)info->size * 2); 6439 6440 if (mddev->raid_disks != info->raid_disks) 6441 rv = update_raid_disks(mddev, info->raid_disks); 6442 6443 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 6444 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { 6445 rv = -EINVAL; 6446 goto err; 6447 } 6448 if (mddev->recovery || mddev->sync_thread) { 6449 rv = -EBUSY; 6450 goto err; 6451 } 6452 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 6453 struct bitmap *bitmap; 6454 /* add the bitmap */ 6455 if (mddev->bitmap) { 6456 rv = -EEXIST; 6457 goto err; 6458 } 6459 if (mddev->bitmap_info.default_offset == 0) { 6460 rv = -EINVAL; 6461 goto err; 6462 } 6463 mddev->bitmap_info.offset = 6464 mddev->bitmap_info.default_offset; 6465 mddev->bitmap_info.space = 6466 mddev->bitmap_info.default_space; 6467 mddev->pers->quiesce(mddev, 1); 6468 bitmap = bitmap_create(mddev, -1); 6469 if (!IS_ERR(bitmap)) { 6470 mddev->bitmap = bitmap; 6471 rv = bitmap_load(mddev); 6472 } else 6473 rv = PTR_ERR(bitmap); 6474 if (rv) 6475 bitmap_destroy(mddev); 6476 mddev->pers->quiesce(mddev, 0); 6477 } else { 6478 /* remove the bitmap */ 6479 if (!mddev->bitmap) { 6480 rv = -ENOENT; 6481 goto err; 6482 } 6483 if (mddev->bitmap->storage.file) { 6484 rv = -EINVAL; 6485 goto err; 6486 } 6487 mddev->pers->quiesce(mddev, 1); 6488 bitmap_destroy(mddev); 6489 mddev->pers->quiesce(mddev, 0); 6490 mddev->bitmap_info.offset = 0; 6491 } 6492 } 6493 md_update_sb(mddev, 1); 6494 if (mddev_is_clustered(mddev)) 6495 md_cluster_ops->metadata_update_finish(mddev); 6496 return rv; 6497 err: 6498 if (mddev_is_clustered(mddev)) 6499 md_cluster_ops->metadata_update_cancel(mddev); 6500 return rv; 6501 } 6502 6503 static int set_disk_faulty(struct mddev *mddev, dev_t dev) 6504 { 6505 struct md_rdev *rdev; 6506 int err = 0; 6507 6508 if (mddev->pers == NULL) 6509 return -ENODEV; 6510 6511 rcu_read_lock(); 6512 rdev = find_rdev_rcu(mddev, dev); 6513 if (!rdev) 6514 err = -ENODEV; 6515 else { 6516 md_error(mddev, rdev); 6517 if (!test_bit(Faulty, &rdev->flags)) 6518 err = -EBUSY; 6519 } 6520 rcu_read_unlock(); 6521 return err; 6522 } 6523 6524 /* 6525 * We have a problem here : there is no easy way to give a CHS 6526 * virtual geometry. We currently pretend that we have a 2 heads 6527 * 4 sectors (with a BIG number of cylinders...). This drives 6528 * dosfs just mad... ;-) 6529 */ 6530 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 6531 { 6532 struct mddev *mddev = bdev->bd_disk->private_data; 6533 6534 geo->heads = 2; 6535 geo->sectors = 4; 6536 geo->cylinders = mddev->array_sectors / 8; 6537 return 0; 6538 } 6539 6540 static inline bool md_ioctl_valid(unsigned int cmd) 6541 { 6542 switch (cmd) { 6543 case ADD_NEW_DISK: 6544 case BLKROSET: 6545 case GET_ARRAY_INFO: 6546 case GET_BITMAP_FILE: 6547 case GET_DISK_INFO: 6548 case HOT_ADD_DISK: 6549 case HOT_REMOVE_DISK: 6550 case RAID_AUTORUN: 6551 case RAID_VERSION: 6552 case RESTART_ARRAY_RW: 6553 case RUN_ARRAY: 6554 case SET_ARRAY_INFO: 6555 case SET_BITMAP_FILE: 6556 case SET_DISK_FAULTY: 6557 case STOP_ARRAY: 6558 case STOP_ARRAY_RO: 6559 case CLUSTERED_DISK_NACK: 6560 return true; 6561 default: 6562 return false; 6563 } 6564 } 6565 6566 static int md_ioctl(struct block_device *bdev, fmode_t mode, 6567 unsigned int cmd, unsigned long arg) 6568 { 6569 int err = 0; 6570 void __user *argp = (void __user *)arg; 6571 struct mddev *mddev = NULL; 6572 int ro; 6573 6574 if (!md_ioctl_valid(cmd)) 6575 return -ENOTTY; 6576 6577 switch (cmd) { 6578 case RAID_VERSION: 6579 case GET_ARRAY_INFO: 6580 case GET_DISK_INFO: 6581 break; 6582 default: 6583 if (!capable(CAP_SYS_ADMIN)) 6584 return -EACCES; 6585 } 6586 6587 /* 6588 * Commands dealing with the RAID driver but not any 6589 * particular array: 6590 */ 6591 switch (cmd) { 6592 case RAID_VERSION: 6593 err = get_version(argp); 6594 goto out; 6595 6596 #ifndef MODULE 6597 case RAID_AUTORUN: 6598 err = 0; 6599 autostart_arrays(arg); 6600 goto out; 6601 #endif 6602 default:; 6603 } 6604 6605 /* 6606 * Commands creating/starting a new array: 6607 */ 6608 6609 mddev = bdev->bd_disk->private_data; 6610 6611 if (!mddev) { 6612 BUG(); 6613 goto out; 6614 } 6615 6616 /* Some actions do not requires the mutex */ 6617 switch (cmd) { 6618 case GET_ARRAY_INFO: 6619 if (!mddev->raid_disks && !mddev->external) 6620 err = -ENODEV; 6621 else 6622 err = get_array_info(mddev, argp); 6623 goto out; 6624 6625 case GET_DISK_INFO: 6626 if (!mddev->raid_disks && !mddev->external) 6627 err = -ENODEV; 6628 else 6629 err = get_disk_info(mddev, argp); 6630 goto out; 6631 6632 case SET_DISK_FAULTY: 6633 err = set_disk_faulty(mddev, new_decode_dev(arg)); 6634 goto out; 6635 6636 case GET_BITMAP_FILE: 6637 err = get_bitmap_file(mddev, argp); 6638 goto out; 6639 6640 } 6641 6642 if (cmd == ADD_NEW_DISK) 6643 /* need to ensure md_delayed_delete() has completed */ 6644 flush_workqueue(md_misc_wq); 6645 6646 if (cmd == HOT_REMOVE_DISK) 6647 /* need to ensure recovery thread has run */ 6648 wait_event_interruptible_timeout(mddev->sb_wait, 6649 !test_bit(MD_RECOVERY_NEEDED, 6650 &mddev->flags), 6651 msecs_to_jiffies(5000)); 6652 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { 6653 /* Need to flush page cache, and ensure no-one else opens 6654 * and writes 6655 */ 6656 mutex_lock(&mddev->open_mutex); 6657 if (mddev->pers && atomic_read(&mddev->openers) > 1) { 6658 mutex_unlock(&mddev->open_mutex); 6659 err = -EBUSY; 6660 goto out; 6661 } 6662 set_bit(MD_STILL_CLOSED, &mddev->flags); 6663 mutex_unlock(&mddev->open_mutex); 6664 sync_blockdev(bdev); 6665 } 6666 err = mddev_lock(mddev); 6667 if (err) { 6668 printk(KERN_INFO 6669 "md: ioctl lock interrupted, reason %d, cmd %d\n", 6670 err, cmd); 6671 goto out; 6672 } 6673 6674 if (cmd == SET_ARRAY_INFO) { 6675 mdu_array_info_t info; 6676 if (!arg) 6677 memset(&info, 0, sizeof(info)); 6678 else if (copy_from_user(&info, argp, sizeof(info))) { 6679 err = -EFAULT; 6680 goto unlock; 6681 } 6682 if (mddev->pers) { 6683 err = update_array_info(mddev, &info); 6684 if (err) { 6685 printk(KERN_WARNING "md: couldn't update" 6686 " array info. %d\n", err); 6687 goto unlock; 6688 } 6689 goto unlock; 6690 } 6691 if (!list_empty(&mddev->disks)) { 6692 printk(KERN_WARNING 6693 "md: array %s already has disks!\n", 6694 mdname(mddev)); 6695 err = -EBUSY; 6696 goto unlock; 6697 } 6698 if (mddev->raid_disks) { 6699 printk(KERN_WARNING 6700 "md: array %s already initialised!\n", 6701 mdname(mddev)); 6702 err = -EBUSY; 6703 goto unlock; 6704 } 6705 err = set_array_info(mddev, &info); 6706 if (err) { 6707 printk(KERN_WARNING "md: couldn't set" 6708 " array info. %d\n", err); 6709 goto unlock; 6710 } 6711 goto unlock; 6712 } 6713 6714 /* 6715 * Commands querying/configuring an existing array: 6716 */ 6717 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 6718 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 6719 if ((!mddev->raid_disks && !mddev->external) 6720 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 6721 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 6722 && cmd != GET_BITMAP_FILE) { 6723 err = -ENODEV; 6724 goto unlock; 6725 } 6726 6727 /* 6728 * Commands even a read-only array can execute: 6729 */ 6730 switch (cmd) { 6731 case RESTART_ARRAY_RW: 6732 err = restart_array(mddev); 6733 goto unlock; 6734 6735 case STOP_ARRAY: 6736 err = do_md_stop(mddev, 0, bdev); 6737 goto unlock; 6738 6739 case STOP_ARRAY_RO: 6740 err = md_set_readonly(mddev, bdev); 6741 goto unlock; 6742 6743 case HOT_REMOVE_DISK: 6744 err = hot_remove_disk(mddev, new_decode_dev(arg)); 6745 goto unlock; 6746 6747 case ADD_NEW_DISK: 6748 /* We can support ADD_NEW_DISK on read-only arrays 6749 * on if we are re-adding a preexisting device. 6750 * So require mddev->pers and MD_DISK_SYNC. 6751 */ 6752 if (mddev->pers) { 6753 mdu_disk_info_t info; 6754 if (copy_from_user(&info, argp, sizeof(info))) 6755 err = -EFAULT; 6756 else if (!(info.state & (1<<MD_DISK_SYNC))) 6757 /* Need to clear read-only for this */ 6758 break; 6759 else 6760 err = add_new_disk(mddev, &info); 6761 goto unlock; 6762 } 6763 break; 6764 6765 case BLKROSET: 6766 if (get_user(ro, (int __user *)(arg))) { 6767 err = -EFAULT; 6768 goto unlock; 6769 } 6770 err = -EINVAL; 6771 6772 /* if the bdev is going readonly the value of mddev->ro 6773 * does not matter, no writes are coming 6774 */ 6775 if (ro) 6776 goto unlock; 6777 6778 /* are we are already prepared for writes? */ 6779 if (mddev->ro != 1) 6780 goto unlock; 6781 6782 /* transitioning to readauto need only happen for 6783 * arrays that call md_write_start 6784 */ 6785 if (mddev->pers) { 6786 err = restart_array(mddev); 6787 if (err == 0) { 6788 mddev->ro = 2; 6789 set_disk_ro(mddev->gendisk, 0); 6790 } 6791 } 6792 goto unlock; 6793 } 6794 6795 /* 6796 * The remaining ioctls are changing the state of the 6797 * superblock, so we do not allow them on read-only arrays. 6798 */ 6799 if (mddev->ro && mddev->pers) { 6800 if (mddev->ro == 2) { 6801 mddev->ro = 0; 6802 sysfs_notify_dirent_safe(mddev->sysfs_state); 6803 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6804 /* mddev_unlock will wake thread */ 6805 /* If a device failed while we were read-only, we 6806 * need to make sure the metadata is updated now. 6807 */ 6808 if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) { 6809 mddev_unlock(mddev); 6810 wait_event(mddev->sb_wait, 6811 !test_bit(MD_CHANGE_DEVS, &mddev->flags) && 6812 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 6813 mddev_lock_nointr(mddev); 6814 } 6815 } else { 6816 err = -EROFS; 6817 goto unlock; 6818 } 6819 } 6820 6821 switch (cmd) { 6822 case ADD_NEW_DISK: 6823 { 6824 mdu_disk_info_t info; 6825 if (copy_from_user(&info, argp, sizeof(info))) 6826 err = -EFAULT; 6827 else 6828 err = add_new_disk(mddev, &info); 6829 goto unlock; 6830 } 6831 6832 case CLUSTERED_DISK_NACK: 6833 if (mddev_is_clustered(mddev)) 6834 md_cluster_ops->new_disk_ack(mddev, false); 6835 else 6836 err = -EINVAL; 6837 goto unlock; 6838 6839 case HOT_ADD_DISK: 6840 err = hot_add_disk(mddev, new_decode_dev(arg)); 6841 goto unlock; 6842 6843 case RUN_ARRAY: 6844 err = do_md_run(mddev); 6845 goto unlock; 6846 6847 case SET_BITMAP_FILE: 6848 err = set_bitmap_file(mddev, (int)arg); 6849 goto unlock; 6850 6851 default: 6852 err = -EINVAL; 6853 goto unlock; 6854 } 6855 6856 unlock: 6857 if (mddev->hold_active == UNTIL_IOCTL && 6858 err != -EINVAL) 6859 mddev->hold_active = 0; 6860 mddev_unlock(mddev); 6861 out: 6862 return err; 6863 } 6864 #ifdef CONFIG_COMPAT 6865 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, 6866 unsigned int cmd, unsigned long arg) 6867 { 6868 switch (cmd) { 6869 case HOT_REMOVE_DISK: 6870 case HOT_ADD_DISK: 6871 case SET_DISK_FAULTY: 6872 case SET_BITMAP_FILE: 6873 /* These take in integer arg, do not convert */ 6874 break; 6875 default: 6876 arg = (unsigned long)compat_ptr(arg); 6877 break; 6878 } 6879 6880 return md_ioctl(bdev, mode, cmd, arg); 6881 } 6882 #endif /* CONFIG_COMPAT */ 6883 6884 static int md_open(struct block_device *bdev, fmode_t mode) 6885 { 6886 /* 6887 * Succeed if we can lock the mddev, which confirms that 6888 * it isn't being stopped right now. 6889 */ 6890 struct mddev *mddev = mddev_find(bdev->bd_dev); 6891 int err; 6892 6893 if (!mddev) 6894 return -ENODEV; 6895 6896 if (mddev->gendisk != bdev->bd_disk) { 6897 /* we are racing with mddev_put which is discarding this 6898 * bd_disk. 6899 */ 6900 mddev_put(mddev); 6901 /* Wait until bdev->bd_disk is definitely gone */ 6902 flush_workqueue(md_misc_wq); 6903 /* Then retry the open from the top */ 6904 return -ERESTARTSYS; 6905 } 6906 BUG_ON(mddev != bdev->bd_disk->private_data); 6907 6908 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) 6909 goto out; 6910 6911 err = 0; 6912 atomic_inc(&mddev->openers); 6913 clear_bit(MD_STILL_CLOSED, &mddev->flags); 6914 mutex_unlock(&mddev->open_mutex); 6915 6916 check_disk_change(bdev); 6917 out: 6918 return err; 6919 } 6920 6921 static void md_release(struct gendisk *disk, fmode_t mode) 6922 { 6923 struct mddev *mddev = disk->private_data; 6924 6925 BUG_ON(!mddev); 6926 atomic_dec(&mddev->openers); 6927 mddev_put(mddev); 6928 } 6929 6930 static int md_media_changed(struct gendisk *disk) 6931 { 6932 struct mddev *mddev = disk->private_data; 6933 6934 return mddev->changed; 6935 } 6936 6937 static int md_revalidate(struct gendisk *disk) 6938 { 6939 struct mddev *mddev = disk->private_data; 6940 6941 mddev->changed = 0; 6942 return 0; 6943 } 6944 static const struct block_device_operations md_fops = 6945 { 6946 .owner = THIS_MODULE, 6947 .open = md_open, 6948 .release = md_release, 6949 .ioctl = md_ioctl, 6950 #ifdef CONFIG_COMPAT 6951 .compat_ioctl = md_compat_ioctl, 6952 #endif 6953 .getgeo = md_getgeo, 6954 .media_changed = md_media_changed, 6955 .revalidate_disk= md_revalidate, 6956 }; 6957 6958 static int md_thread(void *arg) 6959 { 6960 struct md_thread *thread = arg; 6961 6962 /* 6963 * md_thread is a 'system-thread', it's priority should be very 6964 * high. We avoid resource deadlocks individually in each 6965 * raid personality. (RAID5 does preallocation) We also use RR and 6966 * the very same RT priority as kswapd, thus we will never get 6967 * into a priority inversion deadlock. 6968 * 6969 * we definitely have to have equal or higher priority than 6970 * bdflush, otherwise bdflush will deadlock if there are too 6971 * many dirty RAID5 blocks. 6972 */ 6973 6974 allow_signal(SIGKILL); 6975 while (!kthread_should_stop()) { 6976 6977 /* We need to wait INTERRUPTIBLE so that 6978 * we don't add to the load-average. 6979 * That means we need to be sure no signals are 6980 * pending 6981 */ 6982 if (signal_pending(current)) 6983 flush_signals(current); 6984 6985 wait_event_interruptible_timeout 6986 (thread->wqueue, 6987 test_bit(THREAD_WAKEUP, &thread->flags) 6988 || kthread_should_stop(), 6989 thread->timeout); 6990 6991 clear_bit(THREAD_WAKEUP, &thread->flags); 6992 if (!kthread_should_stop()) 6993 thread->run(thread); 6994 } 6995 6996 return 0; 6997 } 6998 6999 void md_wakeup_thread(struct md_thread *thread) 7000 { 7001 if (thread) { 7002 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); 7003 set_bit(THREAD_WAKEUP, &thread->flags); 7004 wake_up(&thread->wqueue); 7005 } 7006 } 7007 EXPORT_SYMBOL(md_wakeup_thread); 7008 7009 struct md_thread *md_register_thread(void (*run) (struct md_thread *), 7010 struct mddev *mddev, const char *name) 7011 { 7012 struct md_thread *thread; 7013 7014 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL); 7015 if (!thread) 7016 return NULL; 7017 7018 init_waitqueue_head(&thread->wqueue); 7019 7020 thread->run = run; 7021 thread->mddev = mddev; 7022 thread->timeout = MAX_SCHEDULE_TIMEOUT; 7023 thread->tsk = kthread_run(md_thread, thread, 7024 "%s_%s", 7025 mdname(thread->mddev), 7026 name); 7027 if (IS_ERR(thread->tsk)) { 7028 kfree(thread); 7029 return NULL; 7030 } 7031 return thread; 7032 } 7033 EXPORT_SYMBOL(md_register_thread); 7034 7035 void md_unregister_thread(struct md_thread **threadp) 7036 { 7037 struct md_thread *thread = *threadp; 7038 if (!thread) 7039 return; 7040 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 7041 /* Locking ensures that mddev_unlock does not wake_up a 7042 * non-existent thread 7043 */ 7044 spin_lock(&pers_lock); 7045 *threadp = NULL; 7046 spin_unlock(&pers_lock); 7047 7048 kthread_stop(thread->tsk); 7049 kfree(thread); 7050 } 7051 EXPORT_SYMBOL(md_unregister_thread); 7052 7053 void md_error(struct mddev *mddev, struct md_rdev *rdev) 7054 { 7055 if (!rdev || test_bit(Faulty, &rdev->flags)) 7056 return; 7057 7058 if (!mddev->pers || !mddev->pers->error_handler) 7059 return; 7060 mddev->pers->error_handler(mddev,rdev); 7061 if (mddev->degraded) 7062 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7063 sysfs_notify_dirent_safe(rdev->sysfs_state); 7064 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7065 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7066 md_wakeup_thread(mddev->thread); 7067 if (mddev->event_work.func) 7068 queue_work(md_misc_wq, &mddev->event_work); 7069 md_new_event_inintr(mddev); 7070 } 7071 EXPORT_SYMBOL(md_error); 7072 7073 /* seq_file implementation /proc/mdstat */ 7074 7075 static void status_unused(struct seq_file *seq) 7076 { 7077 int i = 0; 7078 struct md_rdev *rdev; 7079 7080 seq_printf(seq, "unused devices: "); 7081 7082 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 7083 char b[BDEVNAME_SIZE]; 7084 i++; 7085 seq_printf(seq, "%s ", 7086 bdevname(rdev->bdev,b)); 7087 } 7088 if (!i) 7089 seq_printf(seq, "<none>"); 7090 7091 seq_printf(seq, "\n"); 7092 } 7093 7094 static void status_resync(struct seq_file *seq, struct mddev *mddev) 7095 { 7096 sector_t max_sectors, resync, res; 7097 unsigned long dt, db; 7098 sector_t rt; 7099 int scale; 7100 unsigned int per_milli; 7101 7102 if (mddev->curr_resync <= 3) 7103 resync = 0; 7104 else 7105 resync = mddev->curr_resync 7106 - atomic_read(&mddev->recovery_active); 7107 7108 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 7109 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7110 max_sectors = mddev->resync_max_sectors; 7111 else 7112 max_sectors = mddev->dev_sectors; 7113 7114 WARN_ON(max_sectors == 0); 7115 /* Pick 'scale' such that (resync>>scale)*1000 will fit 7116 * in a sector_t, and (max_sectors>>scale) will fit in a 7117 * u32, as those are the requirements for sector_div. 7118 * Thus 'scale' must be at least 10 7119 */ 7120 scale = 10; 7121 if (sizeof(sector_t) > sizeof(unsigned long)) { 7122 while ( max_sectors/2 > (1ULL<<(scale+32))) 7123 scale++; 7124 } 7125 res = (resync>>scale)*1000; 7126 sector_div(res, (u32)((max_sectors>>scale)+1)); 7127 7128 per_milli = res; 7129 { 7130 int i, x = per_milli/50, y = 20-x; 7131 seq_printf(seq, "["); 7132 for (i = 0; i < x; i++) 7133 seq_printf(seq, "="); 7134 seq_printf(seq, ">"); 7135 for (i = 0; i < y; i++) 7136 seq_printf(seq, "."); 7137 seq_printf(seq, "] "); 7138 } 7139 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 7140 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 7141 "reshape" : 7142 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 7143 "check" : 7144 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 7145 "resync" : "recovery"))), 7146 per_milli/10, per_milli % 10, 7147 (unsigned long long) resync/2, 7148 (unsigned long long) max_sectors/2); 7149 7150 /* 7151 * dt: time from mark until now 7152 * db: blocks written from mark until now 7153 * rt: remaining time 7154 * 7155 * rt is a sector_t, so could be 32bit or 64bit. 7156 * So we divide before multiply in case it is 32bit and close 7157 * to the limit. 7158 * We scale the divisor (db) by 32 to avoid losing precision 7159 * near the end of resync when the number of remaining sectors 7160 * is close to 'db'. 7161 * We then divide rt by 32 after multiplying by db to compensate. 7162 * The '+1' avoids division by zero if db is very small. 7163 */ 7164 dt = ((jiffies - mddev->resync_mark) / HZ); 7165 if (!dt) dt++; 7166 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) 7167 - mddev->resync_mark_cnt; 7168 7169 rt = max_sectors - resync; /* number of remaining sectors */ 7170 sector_div(rt, db/32+1); 7171 rt *= dt; 7172 rt >>= 5; 7173 7174 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 7175 ((unsigned long)rt % 60)/6); 7176 7177 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 7178 } 7179 7180 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 7181 { 7182 struct list_head *tmp; 7183 loff_t l = *pos; 7184 struct mddev *mddev; 7185 7186 if (l >= 0x10000) 7187 return NULL; 7188 if (!l--) 7189 /* header */ 7190 return (void*)1; 7191 7192 spin_lock(&all_mddevs_lock); 7193 list_for_each(tmp,&all_mddevs) 7194 if (!l--) { 7195 mddev = list_entry(tmp, struct mddev, all_mddevs); 7196 mddev_get(mddev); 7197 spin_unlock(&all_mddevs_lock); 7198 return mddev; 7199 } 7200 spin_unlock(&all_mddevs_lock); 7201 if (!l--) 7202 return (void*)2;/* tail */ 7203 return NULL; 7204 } 7205 7206 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 7207 { 7208 struct list_head *tmp; 7209 struct mddev *next_mddev, *mddev = v; 7210 7211 ++*pos; 7212 if (v == (void*)2) 7213 return NULL; 7214 7215 spin_lock(&all_mddevs_lock); 7216 if (v == (void*)1) 7217 tmp = all_mddevs.next; 7218 else 7219 tmp = mddev->all_mddevs.next; 7220 if (tmp != &all_mddevs) 7221 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs)); 7222 else { 7223 next_mddev = (void*)2; 7224 *pos = 0x10000; 7225 } 7226 spin_unlock(&all_mddevs_lock); 7227 7228 if (v != (void*)1) 7229 mddev_put(mddev); 7230 return next_mddev; 7231 7232 } 7233 7234 static void md_seq_stop(struct seq_file *seq, void *v) 7235 { 7236 struct mddev *mddev = v; 7237 7238 if (mddev && v != (void*)1 && v != (void*)2) 7239 mddev_put(mddev); 7240 } 7241 7242 static int md_seq_show(struct seq_file *seq, void *v) 7243 { 7244 struct mddev *mddev = v; 7245 sector_t sectors; 7246 struct md_rdev *rdev; 7247 7248 if (v == (void*)1) { 7249 struct md_personality *pers; 7250 seq_printf(seq, "Personalities : "); 7251 spin_lock(&pers_lock); 7252 list_for_each_entry(pers, &pers_list, list) 7253 seq_printf(seq, "[%s] ", pers->name); 7254 7255 spin_unlock(&pers_lock); 7256 seq_printf(seq, "\n"); 7257 seq->poll_event = atomic_read(&md_event_count); 7258 return 0; 7259 } 7260 if (v == (void*)2) { 7261 status_unused(seq); 7262 return 0; 7263 } 7264 7265 spin_lock(&mddev->lock); 7266 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 7267 seq_printf(seq, "%s : %sactive", mdname(mddev), 7268 mddev->pers ? "" : "in"); 7269 if (mddev->pers) { 7270 if (mddev->ro==1) 7271 seq_printf(seq, " (read-only)"); 7272 if (mddev->ro==2) 7273 seq_printf(seq, " (auto-read-only)"); 7274 seq_printf(seq, " %s", mddev->pers->name); 7275 } 7276 7277 sectors = 0; 7278 rcu_read_lock(); 7279 rdev_for_each_rcu(rdev, mddev) { 7280 char b[BDEVNAME_SIZE]; 7281 seq_printf(seq, " %s[%d]", 7282 bdevname(rdev->bdev,b), rdev->desc_nr); 7283 if (test_bit(WriteMostly, &rdev->flags)) 7284 seq_printf(seq, "(W)"); 7285 if (test_bit(Faulty, &rdev->flags)) { 7286 seq_printf(seq, "(F)"); 7287 continue; 7288 } 7289 if (rdev->raid_disk < 0) 7290 seq_printf(seq, "(S)"); /* spare */ 7291 if (test_bit(Replacement, &rdev->flags)) 7292 seq_printf(seq, "(R)"); 7293 sectors += rdev->sectors; 7294 } 7295 rcu_read_unlock(); 7296 7297 if (!list_empty(&mddev->disks)) { 7298 if (mddev->pers) 7299 seq_printf(seq, "\n %llu blocks", 7300 (unsigned long long) 7301 mddev->array_sectors / 2); 7302 else 7303 seq_printf(seq, "\n %llu blocks", 7304 (unsigned long long)sectors / 2); 7305 } 7306 if (mddev->persistent) { 7307 if (mddev->major_version != 0 || 7308 mddev->minor_version != 90) { 7309 seq_printf(seq," super %d.%d", 7310 mddev->major_version, 7311 mddev->minor_version); 7312 } 7313 } else if (mddev->external) 7314 seq_printf(seq, " super external:%s", 7315 mddev->metadata_type); 7316 else 7317 seq_printf(seq, " super non-persistent"); 7318 7319 if (mddev->pers) { 7320 mddev->pers->status(seq, mddev); 7321 seq_printf(seq, "\n "); 7322 if (mddev->pers->sync_request) { 7323 if (mddev->curr_resync > 2) { 7324 status_resync(seq, mddev); 7325 seq_printf(seq, "\n "); 7326 } else if (mddev->curr_resync >= 1) 7327 seq_printf(seq, "\tresync=DELAYED\n "); 7328 else if (mddev->recovery_cp < MaxSector) 7329 seq_printf(seq, "\tresync=PENDING\n "); 7330 } 7331 } else 7332 seq_printf(seq, "\n "); 7333 7334 bitmap_status(seq, mddev->bitmap); 7335 7336 seq_printf(seq, "\n"); 7337 } 7338 spin_unlock(&mddev->lock); 7339 7340 return 0; 7341 } 7342 7343 static const struct seq_operations md_seq_ops = { 7344 .start = md_seq_start, 7345 .next = md_seq_next, 7346 .stop = md_seq_stop, 7347 .show = md_seq_show, 7348 }; 7349 7350 static int md_seq_open(struct inode *inode, struct file *file) 7351 { 7352 struct seq_file *seq; 7353 int error; 7354 7355 error = seq_open(file, &md_seq_ops); 7356 if (error) 7357 return error; 7358 7359 seq = file->private_data; 7360 seq->poll_event = atomic_read(&md_event_count); 7361 return error; 7362 } 7363 7364 static int md_unloading; 7365 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 7366 { 7367 struct seq_file *seq = filp->private_data; 7368 int mask; 7369 7370 if (md_unloading) 7371 return POLLIN|POLLRDNORM|POLLERR|POLLPRI; 7372 poll_wait(filp, &md_event_waiters, wait); 7373 7374 /* always allow read */ 7375 mask = POLLIN | POLLRDNORM; 7376 7377 if (seq->poll_event != atomic_read(&md_event_count)) 7378 mask |= POLLERR | POLLPRI; 7379 return mask; 7380 } 7381 7382 static const struct file_operations md_seq_fops = { 7383 .owner = THIS_MODULE, 7384 .open = md_seq_open, 7385 .read = seq_read, 7386 .llseek = seq_lseek, 7387 .release = seq_release_private, 7388 .poll = mdstat_poll, 7389 }; 7390 7391 int register_md_personality(struct md_personality *p) 7392 { 7393 printk(KERN_INFO "md: %s personality registered for level %d\n", 7394 p->name, p->level); 7395 spin_lock(&pers_lock); 7396 list_add_tail(&p->list, &pers_list); 7397 spin_unlock(&pers_lock); 7398 return 0; 7399 } 7400 EXPORT_SYMBOL(register_md_personality); 7401 7402 int unregister_md_personality(struct md_personality *p) 7403 { 7404 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 7405 spin_lock(&pers_lock); 7406 list_del_init(&p->list); 7407 spin_unlock(&pers_lock); 7408 return 0; 7409 } 7410 EXPORT_SYMBOL(unregister_md_personality); 7411 7412 int register_md_cluster_operations(struct md_cluster_operations *ops, struct module *module) 7413 { 7414 if (md_cluster_ops != NULL) 7415 return -EALREADY; 7416 spin_lock(&pers_lock); 7417 md_cluster_ops = ops; 7418 md_cluster_mod = module; 7419 spin_unlock(&pers_lock); 7420 return 0; 7421 } 7422 EXPORT_SYMBOL(register_md_cluster_operations); 7423 7424 int unregister_md_cluster_operations(void) 7425 { 7426 spin_lock(&pers_lock); 7427 md_cluster_ops = NULL; 7428 spin_unlock(&pers_lock); 7429 return 0; 7430 } 7431 EXPORT_SYMBOL(unregister_md_cluster_operations); 7432 7433 int md_setup_cluster(struct mddev *mddev, int nodes) 7434 { 7435 int err; 7436 7437 err = request_module("md-cluster"); 7438 if (err) { 7439 pr_err("md-cluster module not found.\n"); 7440 return err; 7441 } 7442 7443 spin_lock(&pers_lock); 7444 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) { 7445 spin_unlock(&pers_lock); 7446 return -ENOENT; 7447 } 7448 spin_unlock(&pers_lock); 7449 7450 return md_cluster_ops->join(mddev, nodes); 7451 } 7452 7453 void md_cluster_stop(struct mddev *mddev) 7454 { 7455 if (!md_cluster_ops) 7456 return; 7457 md_cluster_ops->leave(mddev); 7458 module_put(md_cluster_mod); 7459 } 7460 7461 static int is_mddev_idle(struct mddev *mddev, int init) 7462 { 7463 struct md_rdev *rdev; 7464 int idle; 7465 int curr_events; 7466 7467 idle = 1; 7468 rcu_read_lock(); 7469 rdev_for_each_rcu(rdev, mddev) { 7470 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 7471 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + 7472 (int)part_stat_read(&disk->part0, sectors[1]) - 7473 atomic_read(&disk->sync_io); 7474 /* sync IO will cause sync_io to increase before the disk_stats 7475 * as sync_io is counted when a request starts, and 7476 * disk_stats is counted when it completes. 7477 * So resync activity will cause curr_events to be smaller than 7478 * when there was no such activity. 7479 * non-sync IO will cause disk_stat to increase without 7480 * increasing sync_io so curr_events will (eventually) 7481 * be larger than it was before. Once it becomes 7482 * substantially larger, the test below will cause 7483 * the array to appear non-idle, and resync will slow 7484 * down. 7485 * If there is a lot of outstanding resync activity when 7486 * we set last_event to curr_events, then all that activity 7487 * completing might cause the array to appear non-idle 7488 * and resync will be slowed down even though there might 7489 * not have been non-resync activity. This will only 7490 * happen once though. 'last_events' will soon reflect 7491 * the state where there is little or no outstanding 7492 * resync requests, and further resync activity will 7493 * always make curr_events less than last_events. 7494 * 7495 */ 7496 if (init || curr_events - rdev->last_events > 64) { 7497 rdev->last_events = curr_events; 7498 idle = 0; 7499 } 7500 } 7501 rcu_read_unlock(); 7502 return idle; 7503 } 7504 7505 void md_done_sync(struct mddev *mddev, int blocks, int ok) 7506 { 7507 /* another "blocks" (512byte) blocks have been synced */ 7508 atomic_sub(blocks, &mddev->recovery_active); 7509 wake_up(&mddev->recovery_wait); 7510 if (!ok) { 7511 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7512 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); 7513 md_wakeup_thread(mddev->thread); 7514 // stop recovery, signal do_sync .... 7515 } 7516 } 7517 EXPORT_SYMBOL(md_done_sync); 7518 7519 /* md_write_start(mddev, bi) 7520 * If we need to update some array metadata (e.g. 'active' flag 7521 * in superblock) before writing, schedule a superblock update 7522 * and wait for it to complete. 7523 */ 7524 void md_write_start(struct mddev *mddev, struct bio *bi) 7525 { 7526 int did_change = 0; 7527 if (bio_data_dir(bi) != WRITE) 7528 return; 7529 7530 BUG_ON(mddev->ro == 1); 7531 if (mddev->ro == 2) { 7532 /* need to switch to read/write */ 7533 mddev->ro = 0; 7534 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7535 md_wakeup_thread(mddev->thread); 7536 md_wakeup_thread(mddev->sync_thread); 7537 did_change = 1; 7538 } 7539 atomic_inc(&mddev->writes_pending); 7540 if (mddev->safemode == 1) 7541 mddev->safemode = 0; 7542 if (mddev->in_sync) { 7543 spin_lock(&mddev->lock); 7544 if (mddev->in_sync) { 7545 mddev->in_sync = 0; 7546 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7547 set_bit(MD_CHANGE_PENDING, &mddev->flags); 7548 md_wakeup_thread(mddev->thread); 7549 did_change = 1; 7550 } 7551 spin_unlock(&mddev->lock); 7552 } 7553 if (did_change) 7554 sysfs_notify_dirent_safe(mddev->sysfs_state); 7555 wait_event(mddev->sb_wait, 7556 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 7557 } 7558 EXPORT_SYMBOL(md_write_start); 7559 7560 void md_write_end(struct mddev *mddev) 7561 { 7562 if (atomic_dec_and_test(&mddev->writes_pending)) { 7563 if (mddev->safemode == 2) 7564 md_wakeup_thread(mddev->thread); 7565 else if (mddev->safemode_delay) 7566 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 7567 } 7568 } 7569 EXPORT_SYMBOL(md_write_end); 7570 7571 /* md_allow_write(mddev) 7572 * Calling this ensures that the array is marked 'active' so that writes 7573 * may proceed without blocking. It is important to call this before 7574 * attempting a GFP_KERNEL allocation while holding the mddev lock. 7575 * Must be called with mddev_lock held. 7576 * 7577 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock 7578 * is dropped, so return -EAGAIN after notifying userspace. 7579 */ 7580 int md_allow_write(struct mddev *mddev) 7581 { 7582 if (!mddev->pers) 7583 return 0; 7584 if (mddev->ro) 7585 return 0; 7586 if (!mddev->pers->sync_request) 7587 return 0; 7588 7589 spin_lock(&mddev->lock); 7590 if (mddev->in_sync) { 7591 mddev->in_sync = 0; 7592 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7593 set_bit(MD_CHANGE_PENDING, &mddev->flags); 7594 if (mddev->safemode_delay && 7595 mddev->safemode == 0) 7596 mddev->safemode = 1; 7597 spin_unlock(&mddev->lock); 7598 if (mddev_is_clustered(mddev)) 7599 md_cluster_ops->metadata_update_start(mddev); 7600 md_update_sb(mddev, 0); 7601 if (mddev_is_clustered(mddev)) 7602 md_cluster_ops->metadata_update_finish(mddev); 7603 sysfs_notify_dirent_safe(mddev->sysfs_state); 7604 } else 7605 spin_unlock(&mddev->lock); 7606 7607 if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 7608 return -EAGAIN; 7609 else 7610 return 0; 7611 } 7612 EXPORT_SYMBOL_GPL(md_allow_write); 7613 7614 #define SYNC_MARKS 10 7615 #define SYNC_MARK_STEP (3*HZ) 7616 #define UPDATE_FREQUENCY (5*60*HZ) 7617 void md_do_sync(struct md_thread *thread) 7618 { 7619 struct mddev *mddev = thread->mddev; 7620 struct mddev *mddev2; 7621 unsigned int currspeed = 0, 7622 window; 7623 sector_t max_sectors,j, io_sectors, recovery_done; 7624 unsigned long mark[SYNC_MARKS]; 7625 unsigned long update_time; 7626 sector_t mark_cnt[SYNC_MARKS]; 7627 int last_mark,m; 7628 struct list_head *tmp; 7629 sector_t last_check; 7630 int skipped = 0; 7631 struct md_rdev *rdev; 7632 char *desc, *action = NULL; 7633 struct blk_plug plug; 7634 7635 /* just incase thread restarts... */ 7636 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7637 return; 7638 if (mddev->ro) {/* never try to sync a read-only array */ 7639 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7640 return; 7641 } 7642 7643 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7644 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 7645 desc = "data-check"; 7646 action = "check"; 7647 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7648 desc = "requested-resync"; 7649 action = "repair"; 7650 } else 7651 desc = "resync"; 7652 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7653 desc = "reshape"; 7654 else 7655 desc = "recovery"; 7656 7657 mddev->last_sync_action = action ?: desc; 7658 7659 /* we overload curr_resync somewhat here. 7660 * 0 == not engaged in resync at all 7661 * 2 == checking that there is no conflict with another sync 7662 * 1 == like 2, but have yielded to allow conflicting resync to 7663 * commense 7664 * other == active in resync - this many blocks 7665 * 7666 * Before starting a resync we must have set curr_resync to 7667 * 2, and then checked that every "conflicting" array has curr_resync 7668 * less than ours. When we find one that is the same or higher 7669 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 7670 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 7671 * This will mean we have to start checking from the beginning again. 7672 * 7673 */ 7674 7675 do { 7676 mddev->curr_resync = 2; 7677 7678 try_again: 7679 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7680 goto skip; 7681 for_each_mddev(mddev2, tmp) { 7682 if (mddev2 == mddev) 7683 continue; 7684 if (!mddev->parallel_resync 7685 && mddev2->curr_resync 7686 && match_mddev_units(mddev, mddev2)) { 7687 DEFINE_WAIT(wq); 7688 if (mddev < mddev2 && mddev->curr_resync == 2) { 7689 /* arbitrarily yield */ 7690 mddev->curr_resync = 1; 7691 wake_up(&resync_wait); 7692 } 7693 if (mddev > mddev2 && mddev->curr_resync == 1) 7694 /* no need to wait here, we can wait the next 7695 * time 'round when curr_resync == 2 7696 */ 7697 continue; 7698 /* We need to wait 'interruptible' so as not to 7699 * contribute to the load average, and not to 7700 * be caught by 'softlockup' 7701 */ 7702 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 7703 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7704 mddev2->curr_resync >= mddev->curr_resync) { 7705 printk(KERN_INFO "md: delaying %s of %s" 7706 " until %s has finished (they" 7707 " share one or more physical units)\n", 7708 desc, mdname(mddev), mdname(mddev2)); 7709 mddev_put(mddev2); 7710 if (signal_pending(current)) 7711 flush_signals(current); 7712 schedule(); 7713 finish_wait(&resync_wait, &wq); 7714 goto try_again; 7715 } 7716 finish_wait(&resync_wait, &wq); 7717 } 7718 } 7719 } while (mddev->curr_resync < 2); 7720 7721 j = 0; 7722 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7723 /* resync follows the size requested by the personality, 7724 * which defaults to physical size, but can be virtual size 7725 */ 7726 max_sectors = mddev->resync_max_sectors; 7727 atomic64_set(&mddev->resync_mismatches, 0); 7728 /* we don't use the checkpoint if there's a bitmap */ 7729 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7730 j = mddev->resync_min; 7731 else if (!mddev->bitmap) 7732 j = mddev->recovery_cp; 7733 7734 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7735 max_sectors = mddev->resync_max_sectors; 7736 else { 7737 /* recovery follows the physical size of devices */ 7738 max_sectors = mddev->dev_sectors; 7739 j = MaxSector; 7740 rcu_read_lock(); 7741 rdev_for_each_rcu(rdev, mddev) 7742 if (rdev->raid_disk >= 0 && 7743 !test_bit(Faulty, &rdev->flags) && 7744 !test_bit(In_sync, &rdev->flags) && 7745 rdev->recovery_offset < j) 7746 j = rdev->recovery_offset; 7747 rcu_read_unlock(); 7748 7749 /* If there is a bitmap, we need to make sure all 7750 * writes that started before we added a spare 7751 * complete before we start doing a recovery. 7752 * Otherwise the write might complete and (via 7753 * bitmap_endwrite) set a bit in the bitmap after the 7754 * recovery has checked that bit and skipped that 7755 * region. 7756 */ 7757 if (mddev->bitmap) { 7758 mddev->pers->quiesce(mddev, 1); 7759 mddev->pers->quiesce(mddev, 0); 7760 } 7761 } 7762 7763 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); 7764 printk(KERN_INFO "md: minimum _guaranteed_ speed:" 7765 " %d KB/sec/disk.\n", speed_min(mddev)); 7766 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 7767 "(but not more than %d KB/sec) for %s.\n", 7768 speed_max(mddev), desc); 7769 7770 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 7771 7772 io_sectors = 0; 7773 for (m = 0; m < SYNC_MARKS; m++) { 7774 mark[m] = jiffies; 7775 mark_cnt[m] = io_sectors; 7776 } 7777 last_mark = 0; 7778 mddev->resync_mark = mark[last_mark]; 7779 mddev->resync_mark_cnt = mark_cnt[last_mark]; 7780 7781 /* 7782 * Tune reconstruction: 7783 */ 7784 window = 32*(PAGE_SIZE/512); 7785 printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n", 7786 window/2, (unsigned long long)max_sectors/2); 7787 7788 atomic_set(&mddev->recovery_active, 0); 7789 last_check = 0; 7790 7791 if (j>2) { 7792 printk(KERN_INFO 7793 "md: resuming %s of %s from checkpoint.\n", 7794 desc, mdname(mddev)); 7795 mddev->curr_resync = j; 7796 } else 7797 mddev->curr_resync = 3; /* no longer delayed */ 7798 mddev->curr_resync_completed = j; 7799 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 7800 md_new_event(mddev); 7801 update_time = jiffies; 7802 7803 if (mddev_is_clustered(mddev)) 7804 md_cluster_ops->resync_start(mddev, j, max_sectors); 7805 7806 blk_start_plug(&plug); 7807 while (j < max_sectors) { 7808 sector_t sectors; 7809 7810 skipped = 0; 7811 7812 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 7813 ((mddev->curr_resync > mddev->curr_resync_completed && 7814 (mddev->curr_resync - mddev->curr_resync_completed) 7815 > (max_sectors >> 4)) || 7816 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) || 7817 (j - mddev->curr_resync_completed)*2 7818 >= mddev->resync_max - mddev->curr_resync_completed 7819 )) { 7820 /* time to update curr_resync_completed */ 7821 wait_event(mddev->recovery_wait, 7822 atomic_read(&mddev->recovery_active) == 0); 7823 mddev->curr_resync_completed = j; 7824 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 7825 j > mddev->recovery_cp) 7826 mddev->recovery_cp = j; 7827 update_time = jiffies; 7828 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7829 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 7830 } 7831 7832 while (j >= mddev->resync_max && 7833 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7834 /* As this condition is controlled by user-space, 7835 * we can block indefinitely, so use '_interruptible' 7836 * to avoid triggering warnings. 7837 */ 7838 flush_signals(current); /* just in case */ 7839 wait_event_interruptible(mddev->recovery_wait, 7840 mddev->resync_max > j 7841 || test_bit(MD_RECOVERY_INTR, 7842 &mddev->recovery)); 7843 } 7844 7845 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7846 break; 7847 7848 sectors = mddev->pers->sync_request(mddev, j, &skipped); 7849 if (sectors == 0) { 7850 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7851 break; 7852 } 7853 7854 if (!skipped) { /* actual IO requested */ 7855 io_sectors += sectors; 7856 atomic_add(sectors, &mddev->recovery_active); 7857 } 7858 7859 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7860 break; 7861 7862 j += sectors; 7863 if (j > 2) 7864 mddev->curr_resync = j; 7865 if (mddev_is_clustered(mddev)) 7866 md_cluster_ops->resync_info_update(mddev, j, max_sectors); 7867 mddev->curr_mark_cnt = io_sectors; 7868 if (last_check == 0) 7869 /* this is the earliest that rebuild will be 7870 * visible in /proc/mdstat 7871 */ 7872 md_new_event(mddev); 7873 7874 if (last_check + window > io_sectors || j == max_sectors) 7875 continue; 7876 7877 last_check = io_sectors; 7878 repeat: 7879 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 7880 /* step marks */ 7881 int next = (last_mark+1) % SYNC_MARKS; 7882 7883 mddev->resync_mark = mark[next]; 7884 mddev->resync_mark_cnt = mark_cnt[next]; 7885 mark[next] = jiffies; 7886 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 7887 last_mark = next; 7888 } 7889 7890 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7891 break; 7892 7893 /* 7894 * this loop exits only if either when we are slower than 7895 * the 'hard' speed limit, or the system was IO-idle for 7896 * a jiffy. 7897 * the system might be non-idle CPU-wise, but we only care 7898 * about not overloading the IO subsystem. (things like an 7899 * e2fsck being done on the RAID array should execute fast) 7900 */ 7901 cond_resched(); 7902 7903 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); 7904 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 7905 /((jiffies-mddev->resync_mark)/HZ +1) +1; 7906 7907 if (currspeed > speed_min(mddev)) { 7908 if (currspeed > speed_max(mddev)) { 7909 msleep(500); 7910 goto repeat; 7911 } 7912 if (!is_mddev_idle(mddev, 0)) { 7913 /* 7914 * Give other IO more of a chance. 7915 * The faster the devices, the less we wait. 7916 */ 7917 wait_event(mddev->recovery_wait, 7918 !atomic_read(&mddev->recovery_active)); 7919 } 7920 } 7921 } 7922 printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc, 7923 test_bit(MD_RECOVERY_INTR, &mddev->recovery) 7924 ? "interrupted" : "done"); 7925 /* 7926 * this also signals 'finished resyncing' to md_stop 7927 */ 7928 blk_finish_plug(&plug); 7929 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 7930 7931 /* tell personality that we are finished */ 7932 mddev->pers->sync_request(mddev, max_sectors, &skipped); 7933 7934 if (mddev_is_clustered(mddev)) 7935 md_cluster_ops->resync_finish(mddev); 7936 7937 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 7938 mddev->curr_resync > 2) { 7939 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7940 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7941 if (mddev->curr_resync >= mddev->recovery_cp) { 7942 printk(KERN_INFO 7943 "md: checkpointing %s of %s.\n", 7944 desc, mdname(mddev)); 7945 if (test_bit(MD_RECOVERY_ERROR, 7946 &mddev->recovery)) 7947 mddev->recovery_cp = 7948 mddev->curr_resync_completed; 7949 else 7950 mddev->recovery_cp = 7951 mddev->curr_resync; 7952 } 7953 } else 7954 mddev->recovery_cp = MaxSector; 7955 } else { 7956 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7957 mddev->curr_resync = MaxSector; 7958 rcu_read_lock(); 7959 rdev_for_each_rcu(rdev, mddev) 7960 if (rdev->raid_disk >= 0 && 7961 mddev->delta_disks >= 0 && 7962 !test_bit(Faulty, &rdev->flags) && 7963 !test_bit(In_sync, &rdev->flags) && 7964 rdev->recovery_offset < mddev->curr_resync) 7965 rdev->recovery_offset = mddev->curr_resync; 7966 rcu_read_unlock(); 7967 } 7968 } 7969 skip: 7970 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7971 7972 spin_lock(&mddev->lock); 7973 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7974 /* We completed so min/max setting can be forgotten if used. */ 7975 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7976 mddev->resync_min = 0; 7977 mddev->resync_max = MaxSector; 7978 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7979 mddev->resync_min = mddev->curr_resync_completed; 7980 mddev->curr_resync = 0; 7981 spin_unlock(&mddev->lock); 7982 7983 wake_up(&resync_wait); 7984 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 7985 md_wakeup_thread(mddev->thread); 7986 return; 7987 } 7988 EXPORT_SYMBOL_GPL(md_do_sync); 7989 7990 static int remove_and_add_spares(struct mddev *mddev, 7991 struct md_rdev *this) 7992 { 7993 struct md_rdev *rdev; 7994 int spares = 0; 7995 int removed = 0; 7996 7997 rdev_for_each(rdev, mddev) 7998 if ((this == NULL || rdev == this) && 7999 rdev->raid_disk >= 0 && 8000 !test_bit(Blocked, &rdev->flags) && 8001 (test_bit(Faulty, &rdev->flags) || 8002 ! test_bit(In_sync, &rdev->flags)) && 8003 atomic_read(&rdev->nr_pending)==0) { 8004 if (mddev->pers->hot_remove_disk( 8005 mddev, rdev) == 0) { 8006 sysfs_unlink_rdev(mddev, rdev); 8007 rdev->raid_disk = -1; 8008 removed++; 8009 } 8010 } 8011 if (removed && mddev->kobj.sd) 8012 sysfs_notify(&mddev->kobj, NULL, "degraded"); 8013 8014 if (this) 8015 goto no_add; 8016 8017 rdev_for_each(rdev, mddev) { 8018 if (rdev->raid_disk >= 0 && 8019 !test_bit(In_sync, &rdev->flags) && 8020 !test_bit(Faulty, &rdev->flags)) 8021 spares++; 8022 if (rdev->raid_disk >= 0) 8023 continue; 8024 if (test_bit(Faulty, &rdev->flags)) 8025 continue; 8026 if (mddev->ro && 8027 ! (rdev->saved_raid_disk >= 0 && 8028 !test_bit(Bitmap_sync, &rdev->flags))) 8029 continue; 8030 8031 if (rdev->saved_raid_disk < 0) 8032 rdev->recovery_offset = 0; 8033 if (mddev->pers-> 8034 hot_add_disk(mddev, rdev) == 0) { 8035 if (sysfs_link_rdev(mddev, rdev)) 8036 /* failure here is OK */; 8037 spares++; 8038 md_new_event(mddev); 8039 set_bit(MD_CHANGE_DEVS, &mddev->flags); 8040 } 8041 } 8042 no_add: 8043 if (removed) 8044 set_bit(MD_CHANGE_DEVS, &mddev->flags); 8045 return spares; 8046 } 8047 8048 static void md_start_sync(struct work_struct *ws) 8049 { 8050 struct mddev *mddev = container_of(ws, struct mddev, del_work); 8051 8052 mddev->sync_thread = md_register_thread(md_do_sync, 8053 mddev, 8054 "resync"); 8055 if (!mddev->sync_thread) { 8056 printk(KERN_ERR "%s: could not start resync" 8057 " thread...\n", 8058 mdname(mddev)); 8059 /* leave the spares where they are, it shouldn't hurt */ 8060 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8061 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8062 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 8063 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 8064 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8065 wake_up(&resync_wait); 8066 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 8067 &mddev->recovery)) 8068 if (mddev->sysfs_action) 8069 sysfs_notify_dirent_safe(mddev->sysfs_action); 8070 } else 8071 md_wakeup_thread(mddev->sync_thread); 8072 sysfs_notify_dirent_safe(mddev->sysfs_action); 8073 md_new_event(mddev); 8074 } 8075 8076 /* 8077 * This routine is regularly called by all per-raid-array threads to 8078 * deal with generic issues like resync and super-block update. 8079 * Raid personalities that don't have a thread (linear/raid0) do not 8080 * need this as they never do any recovery or update the superblock. 8081 * 8082 * It does not do any resync itself, but rather "forks" off other threads 8083 * to do that as needed. 8084 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 8085 * "->recovery" and create a thread at ->sync_thread. 8086 * When the thread finishes it sets MD_RECOVERY_DONE 8087 * and wakeups up this thread which will reap the thread and finish up. 8088 * This thread also removes any faulty devices (with nr_pending == 0). 8089 * 8090 * The overall approach is: 8091 * 1/ if the superblock needs updating, update it. 8092 * 2/ If a recovery thread is running, don't do anything else. 8093 * 3/ If recovery has finished, clean up, possibly marking spares active. 8094 * 4/ If there are any faulty devices, remove them. 8095 * 5/ If array is degraded, try to add spares devices 8096 * 6/ If array has spares or is not in-sync, start a resync thread. 8097 */ 8098 void md_check_recovery(struct mddev *mddev) 8099 { 8100 if (mddev->suspended) 8101 return; 8102 8103 if (mddev->bitmap) 8104 bitmap_daemon_work(mddev); 8105 8106 if (signal_pending(current)) { 8107 if (mddev->pers->sync_request && !mddev->external) { 8108 printk(KERN_INFO "md: %s in immediate safe mode\n", 8109 mdname(mddev)); 8110 mddev->safemode = 2; 8111 } 8112 flush_signals(current); 8113 } 8114 8115 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 8116 return; 8117 if ( ! ( 8118 (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) || 8119 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 8120 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 8121 (mddev->external == 0 && mddev->safemode == 1) || 8122 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 8123 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 8124 )) 8125 return; 8126 8127 if (mddev_trylock(mddev)) { 8128 int spares = 0; 8129 8130 if (mddev->ro) { 8131 struct md_rdev *rdev; 8132 if (!mddev->external && mddev->in_sync) 8133 /* 'Blocked' flag not needed as failed devices 8134 * will be recorded if array switched to read/write. 8135 * Leaving it set will prevent the device 8136 * from being removed. 8137 */ 8138 rdev_for_each(rdev, mddev) 8139 clear_bit(Blocked, &rdev->flags); 8140 /* On a read-only array we can: 8141 * - remove failed devices 8142 * - add already-in_sync devices if the array itself 8143 * is in-sync. 8144 * As we only add devices that are already in-sync, 8145 * we can activate the spares immediately. 8146 */ 8147 remove_and_add_spares(mddev, NULL); 8148 /* There is no thread, but we need to call 8149 * ->spare_active and clear saved_raid_disk 8150 */ 8151 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8152 md_reap_sync_thread(mddev); 8153 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8154 goto unlock; 8155 } 8156 8157 if (!mddev->external) { 8158 int did_change = 0; 8159 spin_lock(&mddev->lock); 8160 if (mddev->safemode && 8161 !atomic_read(&mddev->writes_pending) && 8162 !mddev->in_sync && 8163 mddev->recovery_cp == MaxSector) { 8164 mddev->in_sync = 1; 8165 did_change = 1; 8166 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 8167 } 8168 if (mddev->safemode == 1) 8169 mddev->safemode = 0; 8170 spin_unlock(&mddev->lock); 8171 if (did_change) 8172 sysfs_notify_dirent_safe(mddev->sysfs_state); 8173 } 8174 8175 if (mddev->flags & MD_UPDATE_SB_FLAGS) { 8176 if (mddev_is_clustered(mddev)) 8177 md_cluster_ops->metadata_update_start(mddev); 8178 md_update_sb(mddev, 0); 8179 if (mddev_is_clustered(mddev)) 8180 md_cluster_ops->metadata_update_finish(mddev); 8181 } 8182 8183 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 8184 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 8185 /* resync/recovery still happening */ 8186 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8187 goto unlock; 8188 } 8189 if (mddev->sync_thread) { 8190 md_reap_sync_thread(mddev); 8191 goto unlock; 8192 } 8193 /* Set RUNNING before clearing NEEDED to avoid 8194 * any transients in the value of "sync_action". 8195 */ 8196 mddev->curr_resync_completed = 0; 8197 spin_lock(&mddev->lock); 8198 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8199 spin_unlock(&mddev->lock); 8200 /* Clear some bits that don't mean anything, but 8201 * might be left set 8202 */ 8203 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 8204 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 8205 8206 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 8207 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 8208 goto not_running; 8209 /* no recovery is running. 8210 * remove any failed drives, then 8211 * add spares if possible. 8212 * Spares are also removed and re-added, to allow 8213 * the personality to fail the re-add. 8214 */ 8215 8216 if (mddev->reshape_position != MaxSector) { 8217 if (mddev->pers->check_reshape == NULL || 8218 mddev->pers->check_reshape(mddev) != 0) 8219 /* Cannot proceed */ 8220 goto not_running; 8221 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8222 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8223 } else if ((spares = remove_and_add_spares(mddev, NULL))) { 8224 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8225 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 8226 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 8227 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8228 } else if (mddev->recovery_cp < MaxSector) { 8229 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8230 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8231 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 8232 /* nothing to be done ... */ 8233 goto not_running; 8234 8235 if (mddev->pers->sync_request) { 8236 if (spares) { 8237 /* We are adding a device or devices to an array 8238 * which has the bitmap stored on all devices. 8239 * So make sure all bitmap pages get written 8240 */ 8241 bitmap_write_all(mddev->bitmap); 8242 } 8243 INIT_WORK(&mddev->del_work, md_start_sync); 8244 queue_work(md_misc_wq, &mddev->del_work); 8245 goto unlock; 8246 } 8247 not_running: 8248 if (!mddev->sync_thread) { 8249 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8250 wake_up(&resync_wait); 8251 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 8252 &mddev->recovery)) 8253 if (mddev->sysfs_action) 8254 sysfs_notify_dirent_safe(mddev->sysfs_action); 8255 } 8256 unlock: 8257 wake_up(&mddev->sb_wait); 8258 mddev_unlock(mddev); 8259 } 8260 } 8261 EXPORT_SYMBOL(md_check_recovery); 8262 8263 void md_reap_sync_thread(struct mddev *mddev) 8264 { 8265 struct md_rdev *rdev; 8266 8267 /* resync has finished, collect result */ 8268 md_unregister_thread(&mddev->sync_thread); 8269 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8270 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 8271 /* success...*/ 8272 /* activate any spares */ 8273 if (mddev->pers->spare_active(mddev)) { 8274 sysfs_notify(&mddev->kobj, NULL, 8275 "degraded"); 8276 set_bit(MD_CHANGE_DEVS, &mddev->flags); 8277 } 8278 } 8279 if (mddev_is_clustered(mddev)) 8280 md_cluster_ops->metadata_update_start(mddev); 8281 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8282 mddev->pers->finish_reshape) 8283 mddev->pers->finish_reshape(mddev); 8284 8285 /* If array is no-longer degraded, then any saved_raid_disk 8286 * information must be scrapped. 8287 */ 8288 if (!mddev->degraded) 8289 rdev_for_each(rdev, mddev) 8290 rdev->saved_raid_disk = -1; 8291 8292 md_update_sb(mddev, 1); 8293 if (mddev_is_clustered(mddev)) 8294 md_cluster_ops->metadata_update_finish(mddev); 8295 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8296 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 8297 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8298 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8299 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 8300 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 8301 wake_up(&resync_wait); 8302 /* flag recovery needed just to double check */ 8303 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8304 sysfs_notify_dirent_safe(mddev->sysfs_action); 8305 md_new_event(mddev); 8306 if (mddev->event_work.func) 8307 queue_work(md_misc_wq, &mddev->event_work); 8308 } 8309 EXPORT_SYMBOL(md_reap_sync_thread); 8310 8311 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) 8312 { 8313 sysfs_notify_dirent_safe(rdev->sysfs_state); 8314 wait_event_timeout(rdev->blocked_wait, 8315 !test_bit(Blocked, &rdev->flags) && 8316 !test_bit(BlockedBadBlocks, &rdev->flags), 8317 msecs_to_jiffies(5000)); 8318 rdev_dec_pending(rdev, mddev); 8319 } 8320 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 8321 8322 void md_finish_reshape(struct mddev *mddev) 8323 { 8324 /* called be personality module when reshape completes. */ 8325 struct md_rdev *rdev; 8326 8327 rdev_for_each(rdev, mddev) { 8328 if (rdev->data_offset > rdev->new_data_offset) 8329 rdev->sectors += rdev->data_offset - rdev->new_data_offset; 8330 else 8331 rdev->sectors -= rdev->new_data_offset - rdev->data_offset; 8332 rdev->data_offset = rdev->new_data_offset; 8333 } 8334 } 8335 EXPORT_SYMBOL(md_finish_reshape); 8336 8337 /* Bad block management. 8338 * We can record which blocks on each device are 'bad' and so just 8339 * fail those blocks, or that stripe, rather than the whole device. 8340 * Entries in the bad-block table are 64bits wide. This comprises: 8341 * Length of bad-range, in sectors: 0-511 for lengths 1-512 8342 * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes) 8343 * A 'shift' can be set so that larger blocks are tracked and 8344 * consequently larger devices can be covered. 8345 * 'Acknowledged' flag - 1 bit. - the most significant bit. 8346 * 8347 * Locking of the bad-block table uses a seqlock so md_is_badblock 8348 * might need to retry if it is very unlucky. 8349 * We will sometimes want to check for bad blocks in a bi_end_io function, 8350 * so we use the write_seqlock_irq variant. 8351 * 8352 * When looking for a bad block we specify a range and want to 8353 * know if any block in the range is bad. So we binary-search 8354 * to the last range that starts at-or-before the given endpoint, 8355 * (or "before the sector after the target range") 8356 * then see if it ends after the given start. 8357 * We return 8358 * 0 if there are no known bad blocks in the range 8359 * 1 if there are known bad block which are all acknowledged 8360 * -1 if there are bad blocks which have not yet been acknowledged in metadata. 8361 * plus the start/length of the first bad section we overlap. 8362 */ 8363 int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, 8364 sector_t *first_bad, int *bad_sectors) 8365 { 8366 int hi; 8367 int lo; 8368 u64 *p = bb->page; 8369 int rv; 8370 sector_t target = s + sectors; 8371 unsigned seq; 8372 8373 if (bb->shift > 0) { 8374 /* round the start down, and the end up */ 8375 s >>= bb->shift; 8376 target += (1<<bb->shift) - 1; 8377 target >>= bb->shift; 8378 sectors = target - s; 8379 } 8380 /* 'target' is now the first block after the bad range */ 8381 8382 retry: 8383 seq = read_seqbegin(&bb->lock); 8384 lo = 0; 8385 rv = 0; 8386 hi = bb->count; 8387 8388 /* Binary search between lo and hi for 'target' 8389 * i.e. for the last range that starts before 'target' 8390 */ 8391 /* INVARIANT: ranges before 'lo' and at-or-after 'hi' 8392 * are known not to be the last range before target. 8393 * VARIANT: hi-lo is the number of possible 8394 * ranges, and decreases until it reaches 1 8395 */ 8396 while (hi - lo > 1) { 8397 int mid = (lo + hi) / 2; 8398 sector_t a = BB_OFFSET(p[mid]); 8399 if (a < target) 8400 /* This could still be the one, earlier ranges 8401 * could not. */ 8402 lo = mid; 8403 else 8404 /* This and later ranges are definitely out. */ 8405 hi = mid; 8406 } 8407 /* 'lo' might be the last that started before target, but 'hi' isn't */ 8408 if (hi > lo) { 8409 /* need to check all range that end after 's' to see if 8410 * any are unacknowledged. 8411 */ 8412 while (lo >= 0 && 8413 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { 8414 if (BB_OFFSET(p[lo]) < target) { 8415 /* starts before the end, and finishes after 8416 * the start, so they must overlap 8417 */ 8418 if (rv != -1 && BB_ACK(p[lo])) 8419 rv = 1; 8420 else 8421 rv = -1; 8422 *first_bad = BB_OFFSET(p[lo]); 8423 *bad_sectors = BB_LEN(p[lo]); 8424 } 8425 lo--; 8426 } 8427 } 8428 8429 if (read_seqretry(&bb->lock, seq)) 8430 goto retry; 8431 8432 return rv; 8433 } 8434 EXPORT_SYMBOL_GPL(md_is_badblock); 8435 8436 /* 8437 * Add a range of bad blocks to the table. 8438 * This might extend the table, or might contract it 8439 * if two adjacent ranges can be merged. 8440 * We binary-search to find the 'insertion' point, then 8441 * decide how best to handle it. 8442 */ 8443 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, 8444 int acknowledged) 8445 { 8446 u64 *p; 8447 int lo, hi; 8448 int rv = 1; 8449 unsigned long flags; 8450 8451 if (bb->shift < 0) 8452 /* badblocks are disabled */ 8453 return 0; 8454 8455 if (bb->shift) { 8456 /* round the start down, and the end up */ 8457 sector_t next = s + sectors; 8458 s >>= bb->shift; 8459 next += (1<<bb->shift) - 1; 8460 next >>= bb->shift; 8461 sectors = next - s; 8462 } 8463 8464 write_seqlock_irqsave(&bb->lock, flags); 8465 8466 p = bb->page; 8467 lo = 0; 8468 hi = bb->count; 8469 /* Find the last range that starts at-or-before 's' */ 8470 while (hi - lo > 1) { 8471 int mid = (lo + hi) / 2; 8472 sector_t a = BB_OFFSET(p[mid]); 8473 if (a <= s) 8474 lo = mid; 8475 else 8476 hi = mid; 8477 } 8478 if (hi > lo && BB_OFFSET(p[lo]) > s) 8479 hi = lo; 8480 8481 if (hi > lo) { 8482 /* we found a range that might merge with the start 8483 * of our new range 8484 */ 8485 sector_t a = BB_OFFSET(p[lo]); 8486 sector_t e = a + BB_LEN(p[lo]); 8487 int ack = BB_ACK(p[lo]); 8488 if (e >= s) { 8489 /* Yes, we can merge with a previous range */ 8490 if (s == a && s + sectors >= e) 8491 /* new range covers old */ 8492 ack = acknowledged; 8493 else 8494 ack = ack && acknowledged; 8495 8496 if (e < s + sectors) 8497 e = s + sectors; 8498 if (e - a <= BB_MAX_LEN) { 8499 p[lo] = BB_MAKE(a, e-a, ack); 8500 s = e; 8501 } else { 8502 /* does not all fit in one range, 8503 * make p[lo] maximal 8504 */ 8505 if (BB_LEN(p[lo]) != BB_MAX_LEN) 8506 p[lo] = BB_MAKE(a, BB_MAX_LEN, ack); 8507 s = a + BB_MAX_LEN; 8508 } 8509 sectors = e - s; 8510 } 8511 } 8512 if (sectors && hi < bb->count) { 8513 /* 'hi' points to the first range that starts after 's'. 8514 * Maybe we can merge with the start of that range */ 8515 sector_t a = BB_OFFSET(p[hi]); 8516 sector_t e = a + BB_LEN(p[hi]); 8517 int ack = BB_ACK(p[hi]); 8518 if (a <= s + sectors) { 8519 /* merging is possible */ 8520 if (e <= s + sectors) { 8521 /* full overlap */ 8522 e = s + sectors; 8523 ack = acknowledged; 8524 } else 8525 ack = ack && acknowledged; 8526 8527 a = s; 8528 if (e - a <= BB_MAX_LEN) { 8529 p[hi] = BB_MAKE(a, e-a, ack); 8530 s = e; 8531 } else { 8532 p[hi] = BB_MAKE(a, BB_MAX_LEN, ack); 8533 s = a + BB_MAX_LEN; 8534 } 8535 sectors = e - s; 8536 lo = hi; 8537 hi++; 8538 } 8539 } 8540 if (sectors == 0 && hi < bb->count) { 8541 /* we might be able to combine lo and hi */ 8542 /* Note: 's' is at the end of 'lo' */ 8543 sector_t a = BB_OFFSET(p[hi]); 8544 int lolen = BB_LEN(p[lo]); 8545 int hilen = BB_LEN(p[hi]); 8546 int newlen = lolen + hilen - (s - a); 8547 if (s >= a && newlen < BB_MAX_LEN) { 8548 /* yes, we can combine them */ 8549 int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]); 8550 p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack); 8551 memmove(p + hi, p + hi + 1, 8552 (bb->count - hi - 1) * 8); 8553 bb->count--; 8554 } 8555 } 8556 while (sectors) { 8557 /* didn't merge (it all). 8558 * Need to add a range just before 'hi' */ 8559 if (bb->count >= MD_MAX_BADBLOCKS) { 8560 /* No room for more */ 8561 rv = 0; 8562 break; 8563 } else { 8564 int this_sectors = sectors; 8565 memmove(p + hi + 1, p + hi, 8566 (bb->count - hi) * 8); 8567 bb->count++; 8568 8569 if (this_sectors > BB_MAX_LEN) 8570 this_sectors = BB_MAX_LEN; 8571 p[hi] = BB_MAKE(s, this_sectors, acknowledged); 8572 sectors -= this_sectors; 8573 s += this_sectors; 8574 } 8575 } 8576 8577 bb->changed = 1; 8578 if (!acknowledged) 8579 bb->unacked_exist = 1; 8580 write_sequnlock_irqrestore(&bb->lock, flags); 8581 8582 return rv; 8583 } 8584 8585 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 8586 int is_new) 8587 { 8588 int rv; 8589 if (is_new) 8590 s += rdev->new_data_offset; 8591 else 8592 s += rdev->data_offset; 8593 rv = md_set_badblocks(&rdev->badblocks, 8594 s, sectors, 0); 8595 if (rv) { 8596 /* Make sure they get written out promptly */ 8597 sysfs_notify_dirent_safe(rdev->sysfs_state); 8598 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); 8599 md_wakeup_thread(rdev->mddev->thread); 8600 } 8601 return rv; 8602 } 8603 EXPORT_SYMBOL_GPL(rdev_set_badblocks); 8604 8605 /* 8606 * Remove a range of bad blocks from the table. 8607 * This may involve extending the table if we spilt a region, 8608 * but it must not fail. So if the table becomes full, we just 8609 * drop the remove request. 8610 */ 8611 static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors) 8612 { 8613 u64 *p; 8614 int lo, hi; 8615 sector_t target = s + sectors; 8616 int rv = 0; 8617 8618 if (bb->shift > 0) { 8619 /* When clearing we round the start up and the end down. 8620 * This should not matter as the shift should align with 8621 * the block size and no rounding should ever be needed. 8622 * However it is better the think a block is bad when it 8623 * isn't than to think a block is not bad when it is. 8624 */ 8625 s += (1<<bb->shift) - 1; 8626 s >>= bb->shift; 8627 target >>= bb->shift; 8628 sectors = target - s; 8629 } 8630 8631 write_seqlock_irq(&bb->lock); 8632 8633 p = bb->page; 8634 lo = 0; 8635 hi = bb->count; 8636 /* Find the last range that starts before 'target' */ 8637 while (hi - lo > 1) { 8638 int mid = (lo + hi) / 2; 8639 sector_t a = BB_OFFSET(p[mid]); 8640 if (a < target) 8641 lo = mid; 8642 else 8643 hi = mid; 8644 } 8645 if (hi > lo) { 8646 /* p[lo] is the last range that could overlap the 8647 * current range. Earlier ranges could also overlap, 8648 * but only this one can overlap the end of the range. 8649 */ 8650 if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) { 8651 /* Partial overlap, leave the tail of this range */ 8652 int ack = BB_ACK(p[lo]); 8653 sector_t a = BB_OFFSET(p[lo]); 8654 sector_t end = a + BB_LEN(p[lo]); 8655 8656 if (a < s) { 8657 /* we need to split this range */ 8658 if (bb->count >= MD_MAX_BADBLOCKS) { 8659 rv = -ENOSPC; 8660 goto out; 8661 } 8662 memmove(p+lo+1, p+lo, (bb->count - lo) * 8); 8663 bb->count++; 8664 p[lo] = BB_MAKE(a, s-a, ack); 8665 lo++; 8666 } 8667 p[lo] = BB_MAKE(target, end - target, ack); 8668 /* there is no longer an overlap */ 8669 hi = lo; 8670 lo--; 8671 } 8672 while (lo >= 0 && 8673 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { 8674 /* This range does overlap */ 8675 if (BB_OFFSET(p[lo]) < s) { 8676 /* Keep the early parts of this range. */ 8677 int ack = BB_ACK(p[lo]); 8678 sector_t start = BB_OFFSET(p[lo]); 8679 p[lo] = BB_MAKE(start, s - start, ack); 8680 /* now low doesn't overlap, so.. */ 8681 break; 8682 } 8683 lo--; 8684 } 8685 /* 'lo' is strictly before, 'hi' is strictly after, 8686 * anything between needs to be discarded 8687 */ 8688 if (hi - lo > 1) { 8689 memmove(p+lo+1, p+hi, (bb->count - hi) * 8); 8690 bb->count -= (hi - lo - 1); 8691 } 8692 } 8693 8694 bb->changed = 1; 8695 out: 8696 write_sequnlock_irq(&bb->lock); 8697 return rv; 8698 } 8699 8700 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 8701 int is_new) 8702 { 8703 if (is_new) 8704 s += rdev->new_data_offset; 8705 else 8706 s += rdev->data_offset; 8707 return md_clear_badblocks(&rdev->badblocks, 8708 s, sectors); 8709 } 8710 EXPORT_SYMBOL_GPL(rdev_clear_badblocks); 8711 8712 /* 8713 * Acknowledge all bad blocks in a list. 8714 * This only succeeds if ->changed is clear. It is used by 8715 * in-kernel metadata updates 8716 */ 8717 void md_ack_all_badblocks(struct badblocks *bb) 8718 { 8719 if (bb->page == NULL || bb->changed) 8720 /* no point even trying */ 8721 return; 8722 write_seqlock_irq(&bb->lock); 8723 8724 if (bb->changed == 0 && bb->unacked_exist) { 8725 u64 *p = bb->page; 8726 int i; 8727 for (i = 0; i < bb->count ; i++) { 8728 if (!BB_ACK(p[i])) { 8729 sector_t start = BB_OFFSET(p[i]); 8730 int len = BB_LEN(p[i]); 8731 p[i] = BB_MAKE(start, len, 1); 8732 } 8733 } 8734 bb->unacked_exist = 0; 8735 } 8736 write_sequnlock_irq(&bb->lock); 8737 } 8738 EXPORT_SYMBOL_GPL(md_ack_all_badblocks); 8739 8740 /* sysfs access to bad-blocks list. 8741 * We present two files. 8742 * 'bad-blocks' lists sector numbers and lengths of ranges that 8743 * are recorded as bad. The list is truncated to fit within 8744 * the one-page limit of sysfs. 8745 * Writing "sector length" to this file adds an acknowledged 8746 * bad block list. 8747 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet 8748 * been acknowledged. Writing to this file adds bad blocks 8749 * without acknowledging them. This is largely for testing. 8750 */ 8751 8752 static ssize_t 8753 badblocks_show(struct badblocks *bb, char *page, int unack) 8754 { 8755 size_t len; 8756 int i; 8757 u64 *p = bb->page; 8758 unsigned seq; 8759 8760 if (bb->shift < 0) 8761 return 0; 8762 8763 retry: 8764 seq = read_seqbegin(&bb->lock); 8765 8766 len = 0; 8767 i = 0; 8768 8769 while (len < PAGE_SIZE && i < bb->count) { 8770 sector_t s = BB_OFFSET(p[i]); 8771 unsigned int length = BB_LEN(p[i]); 8772 int ack = BB_ACK(p[i]); 8773 i++; 8774 8775 if (unack && ack) 8776 continue; 8777 8778 len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n", 8779 (unsigned long long)s << bb->shift, 8780 length << bb->shift); 8781 } 8782 if (unack && len == 0) 8783 bb->unacked_exist = 0; 8784 8785 if (read_seqretry(&bb->lock, seq)) 8786 goto retry; 8787 8788 return len; 8789 } 8790 8791 #define DO_DEBUG 1 8792 8793 static ssize_t 8794 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack) 8795 { 8796 unsigned long long sector; 8797 int length; 8798 char newline; 8799 #ifdef DO_DEBUG 8800 /* Allow clearing via sysfs *only* for testing/debugging. 8801 * Normally only a successful write may clear a badblock 8802 */ 8803 int clear = 0; 8804 if (page[0] == '-') { 8805 clear = 1; 8806 page++; 8807 } 8808 #endif /* DO_DEBUG */ 8809 8810 switch (sscanf(page, "%llu %d%c", §or, &length, &newline)) { 8811 case 3: 8812 if (newline != '\n') 8813 return -EINVAL; 8814 case 2: 8815 if (length <= 0) 8816 return -EINVAL; 8817 break; 8818 default: 8819 return -EINVAL; 8820 } 8821 8822 #ifdef DO_DEBUG 8823 if (clear) { 8824 md_clear_badblocks(bb, sector, length); 8825 return len; 8826 } 8827 #endif /* DO_DEBUG */ 8828 if (md_set_badblocks(bb, sector, length, !unack)) 8829 return len; 8830 else 8831 return -ENOSPC; 8832 } 8833 8834 static int md_notify_reboot(struct notifier_block *this, 8835 unsigned long code, void *x) 8836 { 8837 struct list_head *tmp; 8838 struct mddev *mddev; 8839 int need_delay = 0; 8840 8841 for_each_mddev(mddev, tmp) { 8842 if (mddev_trylock(mddev)) { 8843 if (mddev->pers) 8844 __md_stop_writes(mddev); 8845 if (mddev->persistent) 8846 mddev->safemode = 2; 8847 mddev_unlock(mddev); 8848 } 8849 need_delay = 1; 8850 } 8851 /* 8852 * certain more exotic SCSI devices are known to be 8853 * volatile wrt too early system reboots. While the 8854 * right place to handle this issue is the given 8855 * driver, we do want to have a safe RAID driver ... 8856 */ 8857 if (need_delay) 8858 mdelay(1000*1); 8859 8860 return NOTIFY_DONE; 8861 } 8862 8863 static struct notifier_block md_notifier = { 8864 .notifier_call = md_notify_reboot, 8865 .next = NULL, 8866 .priority = INT_MAX, /* before any real devices */ 8867 }; 8868 8869 static void md_geninit(void) 8870 { 8871 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 8872 8873 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); 8874 } 8875 8876 static int __init md_init(void) 8877 { 8878 int ret = -ENOMEM; 8879 8880 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); 8881 if (!md_wq) 8882 goto err_wq; 8883 8884 md_misc_wq = alloc_workqueue("md_misc", 0, 0); 8885 if (!md_misc_wq) 8886 goto err_misc_wq; 8887 8888 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0) 8889 goto err_md; 8890 8891 if ((ret = register_blkdev(0, "mdp")) < 0) 8892 goto err_mdp; 8893 mdp_major = ret; 8894 8895 blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE, 8896 md_probe, NULL, NULL); 8897 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 8898 md_probe, NULL, NULL); 8899 8900 register_reboot_notifier(&md_notifier); 8901 raid_table_header = register_sysctl_table(raid_root_table); 8902 8903 md_geninit(); 8904 return 0; 8905 8906 err_mdp: 8907 unregister_blkdev(MD_MAJOR, "md"); 8908 err_md: 8909 destroy_workqueue(md_misc_wq); 8910 err_misc_wq: 8911 destroy_workqueue(md_wq); 8912 err_wq: 8913 return ret; 8914 } 8915 8916 void md_reload_sb(struct mddev *mddev) 8917 { 8918 struct md_rdev *rdev, *tmp; 8919 8920 rdev_for_each_safe(rdev, tmp, mddev) { 8921 rdev->sb_loaded = 0; 8922 ClearPageUptodate(rdev->sb_page); 8923 } 8924 mddev->raid_disks = 0; 8925 analyze_sbs(mddev); 8926 rdev_for_each_safe(rdev, tmp, mddev) { 8927 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 8928 /* since we don't write to faulty devices, we figure out if the 8929 * disk is faulty by comparing events 8930 */ 8931 if (mddev->events > sb->events) 8932 set_bit(Faulty, &rdev->flags); 8933 } 8934 8935 } 8936 EXPORT_SYMBOL(md_reload_sb); 8937 8938 #ifndef MODULE 8939 8940 /* 8941 * Searches all registered partitions for autorun RAID arrays 8942 * at boot time. 8943 */ 8944 8945 static LIST_HEAD(all_detected_devices); 8946 struct detected_devices_node { 8947 struct list_head list; 8948 dev_t dev; 8949 }; 8950 8951 void md_autodetect_dev(dev_t dev) 8952 { 8953 struct detected_devices_node *node_detected_dev; 8954 8955 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 8956 if (node_detected_dev) { 8957 node_detected_dev->dev = dev; 8958 list_add_tail(&node_detected_dev->list, &all_detected_devices); 8959 } else { 8960 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed" 8961 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev)); 8962 } 8963 } 8964 8965 static void autostart_arrays(int part) 8966 { 8967 struct md_rdev *rdev; 8968 struct detected_devices_node *node_detected_dev; 8969 dev_t dev; 8970 int i_scanned, i_passed; 8971 8972 i_scanned = 0; 8973 i_passed = 0; 8974 8975 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 8976 8977 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 8978 i_scanned++; 8979 node_detected_dev = list_entry(all_detected_devices.next, 8980 struct detected_devices_node, list); 8981 list_del(&node_detected_dev->list); 8982 dev = node_detected_dev->dev; 8983 kfree(node_detected_dev); 8984 rdev = md_import_device(dev,0, 90); 8985 if (IS_ERR(rdev)) 8986 continue; 8987 8988 if (test_bit(Faulty, &rdev->flags)) 8989 continue; 8990 8991 set_bit(AutoDetected, &rdev->flags); 8992 list_add(&rdev->same_set, &pending_raid_disks); 8993 i_passed++; 8994 } 8995 8996 printk(KERN_INFO "md: Scanned %d and added %d devices.\n", 8997 i_scanned, i_passed); 8998 8999 autorun_devices(part); 9000 } 9001 9002 #endif /* !MODULE */ 9003 9004 static __exit void md_exit(void) 9005 { 9006 struct mddev *mddev; 9007 struct list_head *tmp; 9008 int delay = 1; 9009 9010 blk_unregister_region(MKDEV(MD_MAJOR,0), 512); 9011 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 9012 9013 unregister_blkdev(MD_MAJOR,"md"); 9014 unregister_blkdev(mdp_major, "mdp"); 9015 unregister_reboot_notifier(&md_notifier); 9016 unregister_sysctl_table(raid_table_header); 9017 9018 /* We cannot unload the modules while some process is 9019 * waiting for us in select() or poll() - wake them up 9020 */ 9021 md_unloading = 1; 9022 while (waitqueue_active(&md_event_waiters)) { 9023 /* not safe to leave yet */ 9024 wake_up(&md_event_waiters); 9025 msleep(delay); 9026 delay += delay; 9027 } 9028 remove_proc_entry("mdstat", NULL); 9029 9030 for_each_mddev(mddev, tmp) { 9031 export_array(mddev); 9032 mddev->hold_active = 0; 9033 } 9034 destroy_workqueue(md_misc_wq); 9035 destroy_workqueue(md_wq); 9036 } 9037 9038 subsys_initcall(md_init); 9039 module_exit(md_exit) 9040 9041 static int get_ro(char *buffer, struct kernel_param *kp) 9042 { 9043 return sprintf(buffer, "%d", start_readonly); 9044 } 9045 static int set_ro(const char *val, struct kernel_param *kp) 9046 { 9047 return kstrtouint(val, 10, (unsigned int *)&start_readonly); 9048 } 9049 9050 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 9051 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 9052 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 9053 9054 MODULE_LICENSE("GPL"); 9055 MODULE_DESCRIPTION("MD RAID framework"); 9056 MODULE_ALIAS("md"); 9057 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 9058