1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 md.c : Multiple Devices driver for Linux 4 Copyright (C) 1998, 1999, 2000 Ingo Molnar 5 6 completely rewritten, based on the MD driver code from Marc Zyngier 7 8 Changes: 9 10 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 11 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 12 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 13 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 14 - kmod support by: Cyrus Durgin 15 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 16 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 17 18 - lots of fixes and improvements to the RAID1/RAID5 and generic 19 RAID code (such as request based resynchronization): 20 21 Neil Brown <neilb@cse.unsw.edu.au>. 22 23 - persistent bitmap code 24 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 25 26 27 Errors, Warnings, etc. 28 Please use: 29 pr_crit() for error conditions that risk data loss 30 pr_err() for error conditions that are unexpected, like an IO error 31 or internal inconsistency 32 pr_warn() for error conditions that could have been predicated, like 33 adding a device to an array when it has incompatible metadata 34 pr_info() for every interesting, very rare events, like an array starting 35 or stopping, or resync starting or stopping 36 pr_debug() for everything else. 37 38 */ 39 40 #include <linux/sched/mm.h> 41 #include <linux/sched/signal.h> 42 #include <linux/kthread.h> 43 #include <linux/blkdev.h> 44 #include <linux/blk-integrity.h> 45 #include <linux/badblocks.h> 46 #include <linux/sysctl.h> 47 #include <linux/seq_file.h> 48 #include <linux/fs.h> 49 #include <linux/poll.h> 50 #include <linux/ctype.h> 51 #include <linux/string.h> 52 #include <linux/hdreg.h> 53 #include <linux/proc_fs.h> 54 #include <linux/random.h> 55 #include <linux/major.h> 56 #include <linux/module.h> 57 #include <linux/reboot.h> 58 #include <linux/file.h> 59 #include <linux/compat.h> 60 #include <linux/delay.h> 61 #include <linux/raid/md_p.h> 62 #include <linux/raid/md_u.h> 63 #include <linux/raid/detect.h> 64 #include <linux/slab.h> 65 #include <linux/percpu-refcount.h> 66 #include <linux/part_stat.h> 67 68 #include <trace/events/block.h> 69 #include "md.h" 70 #include "md-bitmap.h" 71 #include "md-cluster.h" 72 73 /* pers_list is a list of registered personalities protected 74 * by pers_lock. 75 * pers_lock does extra service to protect accesses to 76 * mddev->thread when the mutex cannot be held. 77 */ 78 static LIST_HEAD(pers_list); 79 static DEFINE_SPINLOCK(pers_lock); 80 81 static struct kobj_type md_ktype; 82 83 struct md_cluster_operations *md_cluster_ops; 84 EXPORT_SYMBOL(md_cluster_ops); 85 static struct module *md_cluster_mod; 86 87 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 88 static struct workqueue_struct *md_wq; 89 static struct workqueue_struct *md_misc_wq; 90 static struct workqueue_struct *md_rdev_misc_wq; 91 92 static int remove_and_add_spares(struct mddev *mddev, 93 struct md_rdev *this); 94 static void mddev_detach(struct mddev *mddev); 95 96 enum md_ro_state { 97 MD_RDWR, 98 MD_RDONLY, 99 MD_AUTO_READ, 100 MD_MAX_STATE 101 }; 102 103 static bool md_is_rdwr(struct mddev *mddev) 104 { 105 return (mddev->ro == MD_RDWR); 106 } 107 108 /* 109 * Default number of read corrections we'll attempt on an rdev 110 * before ejecting it from the array. We divide the read error 111 * count by 2 for every hour elapsed between read errors. 112 */ 113 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 114 /* Default safemode delay: 200 msec */ 115 #define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1) 116 /* 117 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 118 * is 1000 KB/sec, so the extra system load does not show up that much. 119 * Increase it if you want to have more _guaranteed_ speed. Note that 120 * the RAID driver will use the maximum available bandwidth if the IO 121 * subsystem is idle. There is also an 'absolute maximum' reconstruction 122 * speed limit - in case reconstruction slows down your system despite 123 * idle IO detection. 124 * 125 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 126 * or /sys/block/mdX/md/sync_speed_{min,max} 127 */ 128 129 static int sysctl_speed_limit_min = 1000; 130 static int sysctl_speed_limit_max = 200000; 131 static inline int speed_min(struct mddev *mddev) 132 { 133 return mddev->sync_speed_min ? 134 mddev->sync_speed_min : sysctl_speed_limit_min; 135 } 136 137 static inline int speed_max(struct mddev *mddev) 138 { 139 return mddev->sync_speed_max ? 140 mddev->sync_speed_max : sysctl_speed_limit_max; 141 } 142 143 static void rdev_uninit_serial(struct md_rdev *rdev) 144 { 145 if (!test_and_clear_bit(CollisionCheck, &rdev->flags)) 146 return; 147 148 kvfree(rdev->serial); 149 rdev->serial = NULL; 150 } 151 152 static void rdevs_uninit_serial(struct mddev *mddev) 153 { 154 struct md_rdev *rdev; 155 156 rdev_for_each(rdev, mddev) 157 rdev_uninit_serial(rdev); 158 } 159 160 static int rdev_init_serial(struct md_rdev *rdev) 161 { 162 /* serial_nums equals with BARRIER_BUCKETS_NR */ 163 int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t)))); 164 struct serial_in_rdev *serial = NULL; 165 166 if (test_bit(CollisionCheck, &rdev->flags)) 167 return 0; 168 169 serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums, 170 GFP_KERNEL); 171 if (!serial) 172 return -ENOMEM; 173 174 for (i = 0; i < serial_nums; i++) { 175 struct serial_in_rdev *serial_tmp = &serial[i]; 176 177 spin_lock_init(&serial_tmp->serial_lock); 178 serial_tmp->serial_rb = RB_ROOT_CACHED; 179 init_waitqueue_head(&serial_tmp->serial_io_wait); 180 } 181 182 rdev->serial = serial; 183 set_bit(CollisionCheck, &rdev->flags); 184 185 return 0; 186 } 187 188 static int rdevs_init_serial(struct mddev *mddev) 189 { 190 struct md_rdev *rdev; 191 int ret = 0; 192 193 rdev_for_each(rdev, mddev) { 194 ret = rdev_init_serial(rdev); 195 if (ret) 196 break; 197 } 198 199 /* Free all resources if pool is not existed */ 200 if (ret && !mddev->serial_info_pool) 201 rdevs_uninit_serial(mddev); 202 203 return ret; 204 } 205 206 /* 207 * rdev needs to enable serial stuffs if it meets the conditions: 208 * 1. it is multi-queue device flaged with writemostly. 209 * 2. the write-behind mode is enabled. 210 */ 211 static int rdev_need_serial(struct md_rdev *rdev) 212 { 213 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && 214 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 && 215 test_bit(WriteMostly, &rdev->flags)); 216 } 217 218 /* 219 * Init resource for rdev(s), then create serial_info_pool if: 220 * 1. rdev is the first device which return true from rdev_enable_serial. 221 * 2. rdev is NULL, means we want to enable serialization for all rdevs. 222 */ 223 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, 224 bool is_suspend) 225 { 226 int ret = 0; 227 228 if (rdev && !rdev_need_serial(rdev) && 229 !test_bit(CollisionCheck, &rdev->flags)) 230 return; 231 232 if (!is_suspend) 233 mddev_suspend(mddev); 234 235 if (!rdev) 236 ret = rdevs_init_serial(mddev); 237 else 238 ret = rdev_init_serial(rdev); 239 if (ret) 240 goto abort; 241 242 if (mddev->serial_info_pool == NULL) { 243 /* 244 * already in memalloc noio context by 245 * mddev_suspend() 246 */ 247 mddev->serial_info_pool = 248 mempool_create_kmalloc_pool(NR_SERIAL_INFOS, 249 sizeof(struct serial_info)); 250 if (!mddev->serial_info_pool) { 251 rdevs_uninit_serial(mddev); 252 pr_err("can't alloc memory pool for serialization\n"); 253 } 254 } 255 256 abort: 257 if (!is_suspend) 258 mddev_resume(mddev); 259 } 260 261 /* 262 * Free resource from rdev(s), and destroy serial_info_pool under conditions: 263 * 1. rdev is the last device flaged with CollisionCheck. 264 * 2. when bitmap is destroyed while policy is not enabled. 265 * 3. for disable policy, the pool is destroyed only when no rdev needs it. 266 */ 267 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, 268 bool is_suspend) 269 { 270 if (rdev && !test_bit(CollisionCheck, &rdev->flags)) 271 return; 272 273 if (mddev->serial_info_pool) { 274 struct md_rdev *temp; 275 int num = 0; /* used to track if other rdevs need the pool */ 276 277 if (!is_suspend) 278 mddev_suspend(mddev); 279 rdev_for_each(temp, mddev) { 280 if (!rdev) { 281 if (!mddev->serialize_policy || 282 !rdev_need_serial(temp)) 283 rdev_uninit_serial(temp); 284 else 285 num++; 286 } else if (temp != rdev && 287 test_bit(CollisionCheck, &temp->flags)) 288 num++; 289 } 290 291 if (rdev) 292 rdev_uninit_serial(rdev); 293 294 if (num) 295 pr_info("The mempool could be used by other devices\n"); 296 else { 297 mempool_destroy(mddev->serial_info_pool); 298 mddev->serial_info_pool = NULL; 299 } 300 if (!is_suspend) 301 mddev_resume(mddev); 302 } 303 } 304 305 static struct ctl_table_header *raid_table_header; 306 307 static struct ctl_table raid_table[] = { 308 { 309 .procname = "speed_limit_min", 310 .data = &sysctl_speed_limit_min, 311 .maxlen = sizeof(int), 312 .mode = S_IRUGO|S_IWUSR, 313 .proc_handler = proc_dointvec, 314 }, 315 { 316 .procname = "speed_limit_max", 317 .data = &sysctl_speed_limit_max, 318 .maxlen = sizeof(int), 319 .mode = S_IRUGO|S_IWUSR, 320 .proc_handler = proc_dointvec, 321 }, 322 { } 323 }; 324 325 static struct ctl_table raid_dir_table[] = { 326 { 327 .procname = "raid", 328 .maxlen = 0, 329 .mode = S_IRUGO|S_IXUGO, 330 .child = raid_table, 331 }, 332 { } 333 }; 334 335 static struct ctl_table raid_root_table[] = { 336 { 337 .procname = "dev", 338 .maxlen = 0, 339 .mode = 0555, 340 .child = raid_dir_table, 341 }, 342 { } 343 }; 344 345 static int start_readonly; 346 347 /* 348 * The original mechanism for creating an md device is to create 349 * a device node in /dev and to open it. This causes races with device-close. 350 * The preferred method is to write to the "new_array" module parameter. 351 * This can avoid races. 352 * Setting create_on_open to false disables the original mechanism 353 * so all the races disappear. 354 */ 355 static bool create_on_open = true; 356 357 /* 358 * We have a system wide 'event count' that is incremented 359 * on any 'interesting' event, and readers of /proc/mdstat 360 * can use 'poll' or 'select' to find out when the event 361 * count increases. 362 * 363 * Events are: 364 * start array, stop array, error, add device, remove device, 365 * start build, activate spare 366 */ 367 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 368 static atomic_t md_event_count; 369 void md_new_event(void) 370 { 371 atomic_inc(&md_event_count); 372 wake_up(&md_event_waiters); 373 } 374 EXPORT_SYMBOL_GPL(md_new_event); 375 376 /* 377 * Enables to iterate over all existing md arrays 378 * all_mddevs_lock protects this list. 379 */ 380 static LIST_HEAD(all_mddevs); 381 static DEFINE_SPINLOCK(all_mddevs_lock); 382 383 /* Rather than calling directly into the personality make_request function, 384 * IO requests come here first so that we can check if the device is 385 * being suspended pending a reconfiguration. 386 * We hold a refcount over the call to ->make_request. By the time that 387 * call has finished, the bio has been linked into some internal structure 388 * and so is visible to ->quiesce(), so we don't need the refcount any more. 389 */ 390 static bool is_suspended(struct mddev *mddev, struct bio *bio) 391 { 392 if (mddev->suspended) 393 return true; 394 if (bio_data_dir(bio) != WRITE) 395 return false; 396 if (mddev->suspend_lo >= mddev->suspend_hi) 397 return false; 398 if (bio->bi_iter.bi_sector >= mddev->suspend_hi) 399 return false; 400 if (bio_end_sector(bio) < mddev->suspend_lo) 401 return false; 402 return true; 403 } 404 405 void md_handle_request(struct mddev *mddev, struct bio *bio) 406 { 407 check_suspended: 408 rcu_read_lock(); 409 if (is_suspended(mddev, bio)) { 410 DEFINE_WAIT(__wait); 411 /* Bail out if REQ_NOWAIT is set for the bio */ 412 if (bio->bi_opf & REQ_NOWAIT) { 413 rcu_read_unlock(); 414 bio_wouldblock_error(bio); 415 return; 416 } 417 for (;;) { 418 prepare_to_wait(&mddev->sb_wait, &__wait, 419 TASK_UNINTERRUPTIBLE); 420 if (!is_suspended(mddev, bio)) 421 break; 422 rcu_read_unlock(); 423 schedule(); 424 rcu_read_lock(); 425 } 426 finish_wait(&mddev->sb_wait, &__wait); 427 } 428 atomic_inc(&mddev->active_io); 429 rcu_read_unlock(); 430 431 if (!mddev->pers->make_request(mddev, bio)) { 432 atomic_dec(&mddev->active_io); 433 wake_up(&mddev->sb_wait); 434 goto check_suspended; 435 } 436 437 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 438 wake_up(&mddev->sb_wait); 439 } 440 EXPORT_SYMBOL(md_handle_request); 441 442 static void md_submit_bio(struct bio *bio) 443 { 444 const int rw = bio_data_dir(bio); 445 struct mddev *mddev = bio->bi_bdev->bd_disk->private_data; 446 447 if (mddev == NULL || mddev->pers == NULL) { 448 bio_io_error(bio); 449 return; 450 } 451 452 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { 453 bio_io_error(bio); 454 return; 455 } 456 457 bio = bio_split_to_limits(bio); 458 459 if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) { 460 if (bio_sectors(bio) != 0) 461 bio->bi_status = BLK_STS_IOERR; 462 bio_endio(bio); 463 return; 464 } 465 466 /* bio could be mergeable after passing to underlayer */ 467 bio->bi_opf &= ~REQ_NOMERGE; 468 469 md_handle_request(mddev, bio); 470 } 471 472 /* mddev_suspend makes sure no new requests are submitted 473 * to the device, and that any requests that have been submitted 474 * are completely handled. 475 * Once mddev_detach() is called and completes, the module will be 476 * completely unused. 477 */ 478 void mddev_suspend(struct mddev *mddev) 479 { 480 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); 481 lockdep_assert_held(&mddev->reconfig_mutex); 482 if (mddev->suspended++) 483 return; 484 synchronize_rcu(); 485 wake_up(&mddev->sb_wait); 486 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags); 487 smp_mb__after_atomic(); 488 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 489 mddev->pers->quiesce(mddev, 1); 490 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); 491 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags)); 492 493 del_timer_sync(&mddev->safemode_timer); 494 /* restrict memory reclaim I/O during raid array is suspend */ 495 mddev->noio_flag = memalloc_noio_save(); 496 } 497 EXPORT_SYMBOL_GPL(mddev_suspend); 498 499 void mddev_resume(struct mddev *mddev) 500 { 501 /* entred the memalloc scope from mddev_suspend() */ 502 memalloc_noio_restore(mddev->noio_flag); 503 lockdep_assert_held(&mddev->reconfig_mutex); 504 if (--mddev->suspended) 505 return; 506 wake_up(&mddev->sb_wait); 507 mddev->pers->quiesce(mddev, 0); 508 509 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 510 md_wakeup_thread(mddev->thread); 511 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 512 } 513 EXPORT_SYMBOL_GPL(mddev_resume); 514 515 /* 516 * Generic flush handling for md 517 */ 518 519 static void md_end_flush(struct bio *bio) 520 { 521 struct md_rdev *rdev = bio->bi_private; 522 struct mddev *mddev = rdev->mddev; 523 524 bio_put(bio); 525 526 rdev_dec_pending(rdev, mddev); 527 528 if (atomic_dec_and_test(&mddev->flush_pending)) { 529 /* The pre-request flush has finished */ 530 queue_work(md_wq, &mddev->flush_work); 531 } 532 } 533 534 static void md_submit_flush_data(struct work_struct *ws); 535 536 static void submit_flushes(struct work_struct *ws) 537 { 538 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 539 struct md_rdev *rdev; 540 541 mddev->start_flush = ktime_get_boottime(); 542 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 543 atomic_set(&mddev->flush_pending, 1); 544 rcu_read_lock(); 545 rdev_for_each_rcu(rdev, mddev) 546 if (rdev->raid_disk >= 0 && 547 !test_bit(Faulty, &rdev->flags)) { 548 /* Take two references, one is dropped 549 * when request finishes, one after 550 * we reclaim rcu_read_lock 551 */ 552 struct bio *bi; 553 atomic_inc(&rdev->nr_pending); 554 atomic_inc(&rdev->nr_pending); 555 rcu_read_unlock(); 556 bi = bio_alloc_bioset(rdev->bdev, 0, 557 REQ_OP_WRITE | REQ_PREFLUSH, 558 GFP_NOIO, &mddev->bio_set); 559 bi->bi_end_io = md_end_flush; 560 bi->bi_private = rdev; 561 atomic_inc(&mddev->flush_pending); 562 submit_bio(bi); 563 rcu_read_lock(); 564 rdev_dec_pending(rdev, mddev); 565 } 566 rcu_read_unlock(); 567 if (atomic_dec_and_test(&mddev->flush_pending)) 568 queue_work(md_wq, &mddev->flush_work); 569 } 570 571 static void md_submit_flush_data(struct work_struct *ws) 572 { 573 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 574 struct bio *bio = mddev->flush_bio; 575 576 /* 577 * must reset flush_bio before calling into md_handle_request to avoid a 578 * deadlock, because other bios passed md_handle_request suspend check 579 * could wait for this and below md_handle_request could wait for those 580 * bios because of suspend check 581 */ 582 spin_lock_irq(&mddev->lock); 583 mddev->prev_flush_start = mddev->start_flush; 584 mddev->flush_bio = NULL; 585 spin_unlock_irq(&mddev->lock); 586 wake_up(&mddev->sb_wait); 587 588 if (bio->bi_iter.bi_size == 0) { 589 /* an empty barrier - all done */ 590 bio_endio(bio); 591 } else { 592 bio->bi_opf &= ~REQ_PREFLUSH; 593 md_handle_request(mddev, bio); 594 } 595 } 596 597 /* 598 * Manages consolidation of flushes and submitting any flushes needed for 599 * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is 600 * being finished in another context. Returns false if the flushing is 601 * complete but still needs the I/O portion of the bio to be processed. 602 */ 603 bool md_flush_request(struct mddev *mddev, struct bio *bio) 604 { 605 ktime_t req_start = ktime_get_boottime(); 606 spin_lock_irq(&mddev->lock); 607 /* flush requests wait until ongoing flush completes, 608 * hence coalescing all the pending requests. 609 */ 610 wait_event_lock_irq(mddev->sb_wait, 611 !mddev->flush_bio || 612 ktime_before(req_start, mddev->prev_flush_start), 613 mddev->lock); 614 /* new request after previous flush is completed */ 615 if (ktime_after(req_start, mddev->prev_flush_start)) { 616 WARN_ON(mddev->flush_bio); 617 mddev->flush_bio = bio; 618 bio = NULL; 619 } 620 spin_unlock_irq(&mddev->lock); 621 622 if (!bio) { 623 INIT_WORK(&mddev->flush_work, submit_flushes); 624 queue_work(md_wq, &mddev->flush_work); 625 } else { 626 /* flush was performed for some other bio while we waited. */ 627 if (bio->bi_iter.bi_size == 0) 628 /* an empty barrier - all done */ 629 bio_endio(bio); 630 else { 631 bio->bi_opf &= ~REQ_PREFLUSH; 632 return false; 633 } 634 } 635 return true; 636 } 637 EXPORT_SYMBOL(md_flush_request); 638 639 static inline struct mddev *mddev_get(struct mddev *mddev) 640 { 641 lockdep_assert_held(&all_mddevs_lock); 642 643 if (test_bit(MD_DELETED, &mddev->flags)) 644 return NULL; 645 atomic_inc(&mddev->active); 646 return mddev; 647 } 648 649 static void mddev_delayed_delete(struct work_struct *ws); 650 651 void mddev_put(struct mddev *mddev) 652 { 653 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 654 return; 655 if (!mddev->raid_disks && list_empty(&mddev->disks) && 656 mddev->ctime == 0 && !mddev->hold_active) { 657 /* Array is not configured at all, and not held active, 658 * so destroy it */ 659 set_bit(MD_DELETED, &mddev->flags); 660 661 /* 662 * Call queue_work inside the spinlock so that 663 * flush_workqueue() after mddev_find will succeed in waiting 664 * for the work to be done. 665 */ 666 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 667 queue_work(md_misc_wq, &mddev->del_work); 668 } 669 spin_unlock(&all_mddevs_lock); 670 } 671 672 static void md_safemode_timeout(struct timer_list *t); 673 674 void mddev_init(struct mddev *mddev) 675 { 676 mutex_init(&mddev->open_mutex); 677 mutex_init(&mddev->reconfig_mutex); 678 mutex_init(&mddev->bitmap_info.mutex); 679 INIT_LIST_HEAD(&mddev->disks); 680 INIT_LIST_HEAD(&mddev->all_mddevs); 681 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); 682 atomic_set(&mddev->active, 1); 683 atomic_set(&mddev->openers, 0); 684 atomic_set(&mddev->active_io, 0); 685 spin_lock_init(&mddev->lock); 686 atomic_set(&mddev->flush_pending, 0); 687 init_waitqueue_head(&mddev->sb_wait); 688 init_waitqueue_head(&mddev->recovery_wait); 689 mddev->reshape_position = MaxSector; 690 mddev->reshape_backwards = 0; 691 mddev->last_sync_action = "none"; 692 mddev->resync_min = 0; 693 mddev->resync_max = MaxSector; 694 mddev->level = LEVEL_NONE; 695 } 696 EXPORT_SYMBOL_GPL(mddev_init); 697 698 static struct mddev *mddev_find_locked(dev_t unit) 699 { 700 struct mddev *mddev; 701 702 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 703 if (mddev->unit == unit) 704 return mddev; 705 706 return NULL; 707 } 708 709 /* find an unused unit number */ 710 static dev_t mddev_alloc_unit(void) 711 { 712 static int next_minor = 512; 713 int start = next_minor; 714 bool is_free = 0; 715 dev_t dev = 0; 716 717 while (!is_free) { 718 dev = MKDEV(MD_MAJOR, next_minor); 719 next_minor++; 720 if (next_minor > MINORMASK) 721 next_minor = 0; 722 if (next_minor == start) 723 return 0; /* Oh dear, all in use. */ 724 is_free = !mddev_find_locked(dev); 725 } 726 727 return dev; 728 } 729 730 static struct mddev *mddev_alloc(dev_t unit) 731 { 732 struct mddev *new; 733 int error; 734 735 if (unit && MAJOR(unit) != MD_MAJOR) 736 unit &= ~((1 << MdpMinorShift) - 1); 737 738 new = kzalloc(sizeof(*new), GFP_KERNEL); 739 if (!new) 740 return ERR_PTR(-ENOMEM); 741 mddev_init(new); 742 743 spin_lock(&all_mddevs_lock); 744 if (unit) { 745 error = -EEXIST; 746 if (mddev_find_locked(unit)) 747 goto out_free_new; 748 new->unit = unit; 749 if (MAJOR(unit) == MD_MAJOR) 750 new->md_minor = MINOR(unit); 751 else 752 new->md_minor = MINOR(unit) >> MdpMinorShift; 753 new->hold_active = UNTIL_IOCTL; 754 } else { 755 error = -ENODEV; 756 new->unit = mddev_alloc_unit(); 757 if (!new->unit) 758 goto out_free_new; 759 new->md_minor = MINOR(new->unit); 760 new->hold_active = UNTIL_STOP; 761 } 762 763 list_add(&new->all_mddevs, &all_mddevs); 764 spin_unlock(&all_mddevs_lock); 765 return new; 766 out_free_new: 767 spin_unlock(&all_mddevs_lock); 768 kfree(new); 769 return ERR_PTR(error); 770 } 771 772 static void mddev_free(struct mddev *mddev) 773 { 774 spin_lock(&all_mddevs_lock); 775 list_del(&mddev->all_mddevs); 776 spin_unlock(&all_mddevs_lock); 777 778 kfree(mddev); 779 } 780 781 static const struct attribute_group md_redundancy_group; 782 783 void mddev_unlock(struct mddev *mddev) 784 { 785 if (mddev->to_remove) { 786 /* These cannot be removed under reconfig_mutex as 787 * an access to the files will try to take reconfig_mutex 788 * while holding the file unremovable, which leads to 789 * a deadlock. 790 * So hold set sysfs_active while the remove in happeing, 791 * and anything else which might set ->to_remove or my 792 * otherwise change the sysfs namespace will fail with 793 * -EBUSY if sysfs_active is still set. 794 * We set sysfs_active under reconfig_mutex and elsewhere 795 * test it under the same mutex to ensure its correct value 796 * is seen. 797 */ 798 const struct attribute_group *to_remove = mddev->to_remove; 799 mddev->to_remove = NULL; 800 mddev->sysfs_active = 1; 801 mutex_unlock(&mddev->reconfig_mutex); 802 803 if (mddev->kobj.sd) { 804 if (to_remove != &md_redundancy_group) 805 sysfs_remove_group(&mddev->kobj, to_remove); 806 if (mddev->pers == NULL || 807 mddev->pers->sync_request == NULL) { 808 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 809 if (mddev->sysfs_action) 810 sysfs_put(mddev->sysfs_action); 811 if (mddev->sysfs_completed) 812 sysfs_put(mddev->sysfs_completed); 813 if (mddev->sysfs_degraded) 814 sysfs_put(mddev->sysfs_degraded); 815 mddev->sysfs_action = NULL; 816 mddev->sysfs_completed = NULL; 817 mddev->sysfs_degraded = NULL; 818 } 819 } 820 mddev->sysfs_active = 0; 821 } else 822 mutex_unlock(&mddev->reconfig_mutex); 823 824 /* As we've dropped the mutex we need a spinlock to 825 * make sure the thread doesn't disappear 826 */ 827 spin_lock(&pers_lock); 828 md_wakeup_thread(mddev->thread); 829 wake_up(&mddev->sb_wait); 830 spin_unlock(&pers_lock); 831 } 832 EXPORT_SYMBOL_GPL(mddev_unlock); 833 834 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) 835 { 836 struct md_rdev *rdev; 837 838 rdev_for_each_rcu(rdev, mddev) 839 if (rdev->desc_nr == nr) 840 return rdev; 841 842 return NULL; 843 } 844 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu); 845 846 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) 847 { 848 struct md_rdev *rdev; 849 850 rdev_for_each(rdev, mddev) 851 if (rdev->bdev->bd_dev == dev) 852 return rdev; 853 854 return NULL; 855 } 856 857 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev) 858 { 859 struct md_rdev *rdev; 860 861 rdev_for_each_rcu(rdev, mddev) 862 if (rdev->bdev->bd_dev == dev) 863 return rdev; 864 865 return NULL; 866 } 867 EXPORT_SYMBOL_GPL(md_find_rdev_rcu); 868 869 static struct md_personality *find_pers(int level, char *clevel) 870 { 871 struct md_personality *pers; 872 list_for_each_entry(pers, &pers_list, list) { 873 if (level != LEVEL_NONE && pers->level == level) 874 return pers; 875 if (strcmp(pers->name, clevel)==0) 876 return pers; 877 } 878 return NULL; 879 } 880 881 /* return the offset of the super block in 512byte sectors */ 882 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) 883 { 884 return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev)); 885 } 886 887 static int alloc_disk_sb(struct md_rdev *rdev) 888 { 889 rdev->sb_page = alloc_page(GFP_KERNEL); 890 if (!rdev->sb_page) 891 return -ENOMEM; 892 return 0; 893 } 894 895 void md_rdev_clear(struct md_rdev *rdev) 896 { 897 if (rdev->sb_page) { 898 put_page(rdev->sb_page); 899 rdev->sb_loaded = 0; 900 rdev->sb_page = NULL; 901 rdev->sb_start = 0; 902 rdev->sectors = 0; 903 } 904 if (rdev->bb_page) { 905 put_page(rdev->bb_page); 906 rdev->bb_page = NULL; 907 } 908 badblocks_exit(&rdev->badblocks); 909 } 910 EXPORT_SYMBOL_GPL(md_rdev_clear); 911 912 static void super_written(struct bio *bio) 913 { 914 struct md_rdev *rdev = bio->bi_private; 915 struct mddev *mddev = rdev->mddev; 916 917 if (bio->bi_status) { 918 pr_err("md: %s gets error=%d\n", __func__, 919 blk_status_to_errno(bio->bi_status)); 920 md_error(mddev, rdev); 921 if (!test_bit(Faulty, &rdev->flags) 922 && (bio->bi_opf & MD_FAILFAST)) { 923 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); 924 set_bit(LastDev, &rdev->flags); 925 } 926 } else 927 clear_bit(LastDev, &rdev->flags); 928 929 bio_put(bio); 930 931 rdev_dec_pending(rdev, mddev); 932 933 if (atomic_dec_and_test(&mddev->pending_writes)) 934 wake_up(&mddev->sb_wait); 935 } 936 937 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, 938 sector_t sector, int size, struct page *page) 939 { 940 /* write first size bytes of page to sector of rdev 941 * Increment mddev->pending_writes before returning 942 * and decrement it on completion, waking up sb_wait 943 * if zero is reached. 944 * If an error occurred, call md_error 945 */ 946 struct bio *bio; 947 948 if (!page) 949 return; 950 951 if (test_bit(Faulty, &rdev->flags)) 952 return; 953 954 bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev, 955 1, 956 REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA, 957 GFP_NOIO, &mddev->sync_set); 958 959 atomic_inc(&rdev->nr_pending); 960 961 bio->bi_iter.bi_sector = sector; 962 bio_add_page(bio, page, size, 0); 963 bio->bi_private = rdev; 964 bio->bi_end_io = super_written; 965 966 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && 967 test_bit(FailFast, &rdev->flags) && 968 !test_bit(LastDev, &rdev->flags)) 969 bio->bi_opf |= MD_FAILFAST; 970 971 atomic_inc(&mddev->pending_writes); 972 submit_bio(bio); 973 } 974 975 int md_super_wait(struct mddev *mddev) 976 { 977 /* wait for all superblock writes that were scheduled to complete */ 978 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); 979 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) 980 return -EAGAIN; 981 return 0; 982 } 983 984 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 985 struct page *page, blk_opf_t opf, bool metadata_op) 986 { 987 struct bio bio; 988 struct bio_vec bvec; 989 990 if (metadata_op && rdev->meta_bdev) 991 bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf); 992 else 993 bio_init(&bio, rdev->bdev, &bvec, 1, opf); 994 995 if (metadata_op) 996 bio.bi_iter.bi_sector = sector + rdev->sb_start; 997 else if (rdev->mddev->reshape_position != MaxSector && 998 (rdev->mddev->reshape_backwards == 999 (sector >= rdev->mddev->reshape_position))) 1000 bio.bi_iter.bi_sector = sector + rdev->new_data_offset; 1001 else 1002 bio.bi_iter.bi_sector = sector + rdev->data_offset; 1003 bio_add_page(&bio, page, size, 0); 1004 1005 submit_bio_wait(&bio); 1006 1007 return !bio.bi_status; 1008 } 1009 EXPORT_SYMBOL_GPL(sync_page_io); 1010 1011 static int read_disk_sb(struct md_rdev *rdev, int size) 1012 { 1013 if (rdev->sb_loaded) 1014 return 0; 1015 1016 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) 1017 goto fail; 1018 rdev->sb_loaded = 1; 1019 return 0; 1020 1021 fail: 1022 pr_err("md: disabled device %pg, could not read superblock.\n", 1023 rdev->bdev); 1024 return -EINVAL; 1025 } 1026 1027 static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 1028 { 1029 return sb1->set_uuid0 == sb2->set_uuid0 && 1030 sb1->set_uuid1 == sb2->set_uuid1 && 1031 sb1->set_uuid2 == sb2->set_uuid2 && 1032 sb1->set_uuid3 == sb2->set_uuid3; 1033 } 1034 1035 static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 1036 { 1037 int ret; 1038 mdp_super_t *tmp1, *tmp2; 1039 1040 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 1041 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 1042 1043 if (!tmp1 || !tmp2) { 1044 ret = 0; 1045 goto abort; 1046 } 1047 1048 *tmp1 = *sb1; 1049 *tmp2 = *sb2; 1050 1051 /* 1052 * nr_disks is not constant 1053 */ 1054 tmp1->nr_disks = 0; 1055 tmp2->nr_disks = 0; 1056 1057 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 1058 abort: 1059 kfree(tmp1); 1060 kfree(tmp2); 1061 return ret; 1062 } 1063 1064 static u32 md_csum_fold(u32 csum) 1065 { 1066 csum = (csum & 0xffff) + (csum >> 16); 1067 return (csum & 0xffff) + (csum >> 16); 1068 } 1069 1070 static unsigned int calc_sb_csum(mdp_super_t *sb) 1071 { 1072 u64 newcsum = 0; 1073 u32 *sb32 = (u32*)sb; 1074 int i; 1075 unsigned int disk_csum, csum; 1076 1077 disk_csum = sb->sb_csum; 1078 sb->sb_csum = 0; 1079 1080 for (i = 0; i < MD_SB_BYTES/4 ; i++) 1081 newcsum += sb32[i]; 1082 csum = (newcsum & 0xffffffff) + (newcsum>>32); 1083 1084 #ifdef CONFIG_ALPHA 1085 /* This used to use csum_partial, which was wrong for several 1086 * reasons including that different results are returned on 1087 * different architectures. It isn't critical that we get exactly 1088 * the same return value as before (we always csum_fold before 1089 * testing, and that removes any differences). However as we 1090 * know that csum_partial always returned a 16bit value on 1091 * alphas, do a fold to maximise conformity to previous behaviour. 1092 */ 1093 sb->sb_csum = md_csum_fold(disk_csum); 1094 #else 1095 sb->sb_csum = disk_csum; 1096 #endif 1097 return csum; 1098 } 1099 1100 /* 1101 * Handle superblock details. 1102 * We want to be able to handle multiple superblock formats 1103 * so we have a common interface to them all, and an array of 1104 * different handlers. 1105 * We rely on user-space to write the initial superblock, and support 1106 * reading and updating of superblocks. 1107 * Interface methods are: 1108 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version) 1109 * loads and validates a superblock on dev. 1110 * if refdev != NULL, compare superblocks on both devices 1111 * Return: 1112 * 0 - dev has a superblock that is compatible with refdev 1113 * 1 - dev has a superblock that is compatible and newer than refdev 1114 * so dev should be used as the refdev in future 1115 * -EINVAL superblock incompatible or invalid 1116 * -othererror e.g. -EIO 1117 * 1118 * int validate_super(struct mddev *mddev, struct md_rdev *dev) 1119 * Verify that dev is acceptable into mddev. 1120 * The first time, mddev->raid_disks will be 0, and data from 1121 * dev should be merged in. Subsequent calls check that dev 1122 * is new enough. Return 0 or -EINVAL 1123 * 1124 * void sync_super(struct mddev *mddev, struct md_rdev *dev) 1125 * Update the superblock for rdev with data in mddev 1126 * This does not write to disc. 1127 * 1128 */ 1129 1130 struct super_type { 1131 char *name; 1132 struct module *owner; 1133 int (*load_super)(struct md_rdev *rdev, 1134 struct md_rdev *refdev, 1135 int minor_version); 1136 int (*validate_super)(struct mddev *mddev, 1137 struct md_rdev *rdev); 1138 void (*sync_super)(struct mddev *mddev, 1139 struct md_rdev *rdev); 1140 unsigned long long (*rdev_size_change)(struct md_rdev *rdev, 1141 sector_t num_sectors); 1142 int (*allow_new_offset)(struct md_rdev *rdev, 1143 unsigned long long new_offset); 1144 }; 1145 1146 /* 1147 * Check that the given mddev has no bitmap. 1148 * 1149 * This function is called from the run method of all personalities that do not 1150 * support bitmaps. It prints an error message and returns non-zero if mddev 1151 * has a bitmap. Otherwise, it returns 0. 1152 * 1153 */ 1154 int md_check_no_bitmap(struct mddev *mddev) 1155 { 1156 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 1157 return 0; 1158 pr_warn("%s: bitmaps are not supported for %s\n", 1159 mdname(mddev), mddev->pers->name); 1160 return 1; 1161 } 1162 EXPORT_SYMBOL(md_check_no_bitmap); 1163 1164 /* 1165 * load_super for 0.90.0 1166 */ 1167 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1168 { 1169 mdp_super_t *sb; 1170 int ret; 1171 bool spare_disk = true; 1172 1173 /* 1174 * Calculate the position of the superblock (512byte sectors), 1175 * it's at the end of the disk. 1176 * 1177 * It also happens to be a multiple of 4Kb. 1178 */ 1179 rdev->sb_start = calc_dev_sboffset(rdev); 1180 1181 ret = read_disk_sb(rdev, MD_SB_BYTES); 1182 if (ret) 1183 return ret; 1184 1185 ret = -EINVAL; 1186 1187 sb = page_address(rdev->sb_page); 1188 1189 if (sb->md_magic != MD_SB_MAGIC) { 1190 pr_warn("md: invalid raid superblock magic on %pg\n", 1191 rdev->bdev); 1192 goto abort; 1193 } 1194 1195 if (sb->major_version != 0 || 1196 sb->minor_version < 90 || 1197 sb->minor_version > 91) { 1198 pr_warn("Bad version number %d.%d on %pg\n", 1199 sb->major_version, sb->minor_version, rdev->bdev); 1200 goto abort; 1201 } 1202 1203 if (sb->raid_disks <= 0) 1204 goto abort; 1205 1206 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 1207 pr_warn("md: invalid superblock checksum on %pg\n", rdev->bdev); 1208 goto abort; 1209 } 1210 1211 rdev->preferred_minor = sb->md_minor; 1212 rdev->data_offset = 0; 1213 rdev->new_data_offset = 0; 1214 rdev->sb_size = MD_SB_BYTES; 1215 rdev->badblocks.shift = -1; 1216 1217 if (sb->level == LEVEL_MULTIPATH) 1218 rdev->desc_nr = -1; 1219 else 1220 rdev->desc_nr = sb->this_disk.number; 1221 1222 /* not spare disk, or LEVEL_MULTIPATH */ 1223 if (sb->level == LEVEL_MULTIPATH || 1224 (rdev->desc_nr >= 0 && 1225 rdev->desc_nr < MD_SB_DISKS && 1226 sb->disks[rdev->desc_nr].state & 1227 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))) 1228 spare_disk = false; 1229 1230 if (!refdev) { 1231 if (!spare_disk) 1232 ret = 1; 1233 else 1234 ret = 0; 1235 } else { 1236 __u64 ev1, ev2; 1237 mdp_super_t *refsb = page_address(refdev->sb_page); 1238 if (!md_uuid_equal(refsb, sb)) { 1239 pr_warn("md: %pg has different UUID to %pg\n", 1240 rdev->bdev, refdev->bdev); 1241 goto abort; 1242 } 1243 if (!md_sb_equal(refsb, sb)) { 1244 pr_warn("md: %pg has same UUID but different superblock to %pg\n", 1245 rdev->bdev, refdev->bdev); 1246 goto abort; 1247 } 1248 ev1 = md_event(sb); 1249 ev2 = md_event(refsb); 1250 1251 if (!spare_disk && ev1 > ev2) 1252 ret = 1; 1253 else 1254 ret = 0; 1255 } 1256 rdev->sectors = rdev->sb_start; 1257 /* Limit to 4TB as metadata cannot record more than that. 1258 * (not needed for Linear and RAID0 as metadata doesn't 1259 * record this size) 1260 */ 1261 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1) 1262 rdev->sectors = (sector_t)(2ULL << 32) - 2; 1263 1264 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) 1265 /* "this cannot possibly happen" ... */ 1266 ret = -EINVAL; 1267 1268 abort: 1269 return ret; 1270 } 1271 1272 /* 1273 * validate_super for 0.90.0 1274 */ 1275 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) 1276 { 1277 mdp_disk_t *desc; 1278 mdp_super_t *sb = page_address(rdev->sb_page); 1279 __u64 ev1 = md_event(sb); 1280 1281 rdev->raid_disk = -1; 1282 clear_bit(Faulty, &rdev->flags); 1283 clear_bit(In_sync, &rdev->flags); 1284 clear_bit(Bitmap_sync, &rdev->flags); 1285 clear_bit(WriteMostly, &rdev->flags); 1286 1287 if (mddev->raid_disks == 0) { 1288 mddev->major_version = 0; 1289 mddev->minor_version = sb->minor_version; 1290 mddev->patch_version = sb->patch_version; 1291 mddev->external = 0; 1292 mddev->chunk_sectors = sb->chunk_size >> 9; 1293 mddev->ctime = sb->ctime; 1294 mddev->utime = sb->utime; 1295 mddev->level = sb->level; 1296 mddev->clevel[0] = 0; 1297 mddev->layout = sb->layout; 1298 mddev->raid_disks = sb->raid_disks; 1299 mddev->dev_sectors = ((sector_t)sb->size) * 2; 1300 mddev->events = ev1; 1301 mddev->bitmap_info.offset = 0; 1302 mddev->bitmap_info.space = 0; 1303 /* bitmap can use 60 K after the 4K superblocks */ 1304 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1305 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 1306 mddev->reshape_backwards = 0; 1307 1308 if (mddev->minor_version >= 91) { 1309 mddev->reshape_position = sb->reshape_position; 1310 mddev->delta_disks = sb->delta_disks; 1311 mddev->new_level = sb->new_level; 1312 mddev->new_layout = sb->new_layout; 1313 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1314 if (mddev->delta_disks < 0) 1315 mddev->reshape_backwards = 1; 1316 } else { 1317 mddev->reshape_position = MaxSector; 1318 mddev->delta_disks = 0; 1319 mddev->new_level = mddev->level; 1320 mddev->new_layout = mddev->layout; 1321 mddev->new_chunk_sectors = mddev->chunk_sectors; 1322 } 1323 if (mddev->level == 0) 1324 mddev->layout = -1; 1325 1326 if (sb->state & (1<<MD_SB_CLEAN)) 1327 mddev->recovery_cp = MaxSector; 1328 else { 1329 if (sb->events_hi == sb->cp_events_hi && 1330 sb->events_lo == sb->cp_events_lo) { 1331 mddev->recovery_cp = sb->recovery_cp; 1332 } else 1333 mddev->recovery_cp = 0; 1334 } 1335 1336 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1337 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1338 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1339 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1340 1341 mddev->max_disks = MD_SB_DISKS; 1342 1343 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1344 mddev->bitmap_info.file == NULL) { 1345 mddev->bitmap_info.offset = 1346 mddev->bitmap_info.default_offset; 1347 mddev->bitmap_info.space = 1348 mddev->bitmap_info.default_space; 1349 } 1350 1351 } else if (mddev->pers == NULL) { 1352 /* Insist on good event counter while assembling, except 1353 * for spares (which don't need an event count) */ 1354 ++ev1; 1355 if (sb->disks[rdev->desc_nr].state & ( 1356 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1357 if (ev1 < mddev->events) 1358 return -EINVAL; 1359 } else if (mddev->bitmap) { 1360 /* if adding to array with a bitmap, then we can accept an 1361 * older device ... but not too old. 1362 */ 1363 if (ev1 < mddev->bitmap->events_cleared) 1364 return 0; 1365 if (ev1 < mddev->events) 1366 set_bit(Bitmap_sync, &rdev->flags); 1367 } else { 1368 if (ev1 < mddev->events) 1369 /* just a hot-add of a new device, leave raid_disk at -1 */ 1370 return 0; 1371 } 1372 1373 if (mddev->level != LEVEL_MULTIPATH) { 1374 desc = sb->disks + rdev->desc_nr; 1375 1376 if (desc->state & (1<<MD_DISK_FAULTY)) 1377 set_bit(Faulty, &rdev->flags); 1378 else if (desc->state & (1<<MD_DISK_SYNC) /* && 1379 desc->raid_disk < mddev->raid_disks */) { 1380 set_bit(In_sync, &rdev->flags); 1381 rdev->raid_disk = desc->raid_disk; 1382 rdev->saved_raid_disk = desc->raid_disk; 1383 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1384 /* active but not in sync implies recovery up to 1385 * reshape position. We don't know exactly where 1386 * that is, so set to zero for now */ 1387 if (mddev->minor_version >= 91) { 1388 rdev->recovery_offset = 0; 1389 rdev->raid_disk = desc->raid_disk; 1390 } 1391 } 1392 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1393 set_bit(WriteMostly, &rdev->flags); 1394 if (desc->state & (1<<MD_DISK_FAILFAST)) 1395 set_bit(FailFast, &rdev->flags); 1396 } else /* MULTIPATH are always insync */ 1397 set_bit(In_sync, &rdev->flags); 1398 return 0; 1399 } 1400 1401 /* 1402 * sync_super for 0.90.0 1403 */ 1404 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) 1405 { 1406 mdp_super_t *sb; 1407 struct md_rdev *rdev2; 1408 int next_spare = mddev->raid_disks; 1409 1410 /* make rdev->sb match mddev data.. 1411 * 1412 * 1/ zero out disks 1413 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1414 * 3/ any empty disks < next_spare become removed 1415 * 1416 * disks[0] gets initialised to REMOVED because 1417 * we cannot be sure from other fields if it has 1418 * been initialised or not. 1419 */ 1420 int i; 1421 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1422 1423 rdev->sb_size = MD_SB_BYTES; 1424 1425 sb = page_address(rdev->sb_page); 1426 1427 memset(sb, 0, sizeof(*sb)); 1428 1429 sb->md_magic = MD_SB_MAGIC; 1430 sb->major_version = mddev->major_version; 1431 sb->patch_version = mddev->patch_version; 1432 sb->gvalid_words = 0; /* ignored */ 1433 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1434 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1435 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1436 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1437 1438 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); 1439 sb->level = mddev->level; 1440 sb->size = mddev->dev_sectors / 2; 1441 sb->raid_disks = mddev->raid_disks; 1442 sb->md_minor = mddev->md_minor; 1443 sb->not_persistent = 0; 1444 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); 1445 sb->state = 0; 1446 sb->events_hi = (mddev->events>>32); 1447 sb->events_lo = (u32)mddev->events; 1448 1449 if (mddev->reshape_position == MaxSector) 1450 sb->minor_version = 90; 1451 else { 1452 sb->minor_version = 91; 1453 sb->reshape_position = mddev->reshape_position; 1454 sb->new_level = mddev->new_level; 1455 sb->delta_disks = mddev->delta_disks; 1456 sb->new_layout = mddev->new_layout; 1457 sb->new_chunk = mddev->new_chunk_sectors << 9; 1458 } 1459 mddev->minor_version = sb->minor_version; 1460 if (mddev->in_sync) 1461 { 1462 sb->recovery_cp = mddev->recovery_cp; 1463 sb->cp_events_hi = (mddev->events>>32); 1464 sb->cp_events_lo = (u32)mddev->events; 1465 if (mddev->recovery_cp == MaxSector) 1466 sb->state = (1<< MD_SB_CLEAN); 1467 } else 1468 sb->recovery_cp = 0; 1469 1470 sb->layout = mddev->layout; 1471 sb->chunk_size = mddev->chunk_sectors << 9; 1472 1473 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1474 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1475 1476 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1477 rdev_for_each(rdev2, mddev) { 1478 mdp_disk_t *d; 1479 int desc_nr; 1480 int is_active = test_bit(In_sync, &rdev2->flags); 1481 1482 if (rdev2->raid_disk >= 0 && 1483 sb->minor_version >= 91) 1484 /* we have nowhere to store the recovery_offset, 1485 * but if it is not below the reshape_position, 1486 * we can piggy-back on that. 1487 */ 1488 is_active = 1; 1489 if (rdev2->raid_disk < 0 || 1490 test_bit(Faulty, &rdev2->flags)) 1491 is_active = 0; 1492 if (is_active) 1493 desc_nr = rdev2->raid_disk; 1494 else 1495 desc_nr = next_spare++; 1496 rdev2->desc_nr = desc_nr; 1497 d = &sb->disks[rdev2->desc_nr]; 1498 nr_disks++; 1499 d->number = rdev2->desc_nr; 1500 d->major = MAJOR(rdev2->bdev->bd_dev); 1501 d->minor = MINOR(rdev2->bdev->bd_dev); 1502 if (is_active) 1503 d->raid_disk = rdev2->raid_disk; 1504 else 1505 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1506 if (test_bit(Faulty, &rdev2->flags)) 1507 d->state = (1<<MD_DISK_FAULTY); 1508 else if (is_active) { 1509 d->state = (1<<MD_DISK_ACTIVE); 1510 if (test_bit(In_sync, &rdev2->flags)) 1511 d->state |= (1<<MD_DISK_SYNC); 1512 active++; 1513 working++; 1514 } else { 1515 d->state = 0; 1516 spare++; 1517 working++; 1518 } 1519 if (test_bit(WriteMostly, &rdev2->flags)) 1520 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1521 if (test_bit(FailFast, &rdev2->flags)) 1522 d->state |= (1<<MD_DISK_FAILFAST); 1523 } 1524 /* now set the "removed" and "faulty" bits on any missing devices */ 1525 for (i=0 ; i < mddev->raid_disks ; i++) { 1526 mdp_disk_t *d = &sb->disks[i]; 1527 if (d->state == 0 && d->number == 0) { 1528 d->number = i; 1529 d->raid_disk = i; 1530 d->state = (1<<MD_DISK_REMOVED); 1531 d->state |= (1<<MD_DISK_FAULTY); 1532 failed++; 1533 } 1534 } 1535 sb->nr_disks = nr_disks; 1536 sb->active_disks = active; 1537 sb->working_disks = working; 1538 sb->failed_disks = failed; 1539 sb->spare_disks = spare; 1540 1541 sb->this_disk = sb->disks[rdev->desc_nr]; 1542 sb->sb_csum = calc_sb_csum(sb); 1543 } 1544 1545 /* 1546 * rdev_size_change for 0.90.0 1547 */ 1548 static unsigned long long 1549 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1550 { 1551 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1552 return 0; /* component must fit device */ 1553 if (rdev->mddev->bitmap_info.offset) 1554 return 0; /* can't move bitmap */ 1555 rdev->sb_start = calc_dev_sboffset(rdev); 1556 if (!num_sectors || num_sectors > rdev->sb_start) 1557 num_sectors = rdev->sb_start; 1558 /* Limit to 4TB as metadata cannot record more than that. 1559 * 4TB == 2^32 KB, or 2*2^32 sectors. 1560 */ 1561 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) 1562 num_sectors = (sector_t)(2ULL << 32) - 2; 1563 do { 1564 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1565 rdev->sb_page); 1566 } while (md_super_wait(rdev->mddev) < 0); 1567 return num_sectors; 1568 } 1569 1570 static int 1571 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) 1572 { 1573 /* non-zero offset changes not possible with v0.90 */ 1574 return new_offset == 0; 1575 } 1576 1577 /* 1578 * version 1 superblock 1579 */ 1580 1581 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb) 1582 { 1583 __le32 disk_csum; 1584 u32 csum; 1585 unsigned long long newcsum; 1586 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1587 __le32 *isuper = (__le32*)sb; 1588 1589 disk_csum = sb->sb_csum; 1590 sb->sb_csum = 0; 1591 newcsum = 0; 1592 for (; size >= 4; size -= 4) 1593 newcsum += le32_to_cpu(*isuper++); 1594 1595 if (size == 2) 1596 newcsum += le16_to_cpu(*(__le16*) isuper); 1597 1598 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1599 sb->sb_csum = disk_csum; 1600 return cpu_to_le32(csum); 1601 } 1602 1603 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1604 { 1605 struct mdp_superblock_1 *sb; 1606 int ret; 1607 sector_t sb_start; 1608 sector_t sectors; 1609 int bmask; 1610 bool spare_disk = true; 1611 1612 /* 1613 * Calculate the position of the superblock in 512byte sectors. 1614 * It is always aligned to a 4K boundary and 1615 * depeding on minor_version, it can be: 1616 * 0: At least 8K, but less than 12K, from end of device 1617 * 1: At start of device 1618 * 2: 4K from start of device. 1619 */ 1620 switch(minor_version) { 1621 case 0: 1622 sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2; 1623 sb_start &= ~(sector_t)(4*2-1); 1624 break; 1625 case 1: 1626 sb_start = 0; 1627 break; 1628 case 2: 1629 sb_start = 8; 1630 break; 1631 default: 1632 return -EINVAL; 1633 } 1634 rdev->sb_start = sb_start; 1635 1636 /* superblock is rarely larger than 1K, but it can be larger, 1637 * and it is safe to read 4k, so we do that 1638 */ 1639 ret = read_disk_sb(rdev, 4096); 1640 if (ret) return ret; 1641 1642 sb = page_address(rdev->sb_page); 1643 1644 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1645 sb->major_version != cpu_to_le32(1) || 1646 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1647 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1648 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1649 return -EINVAL; 1650 1651 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1652 pr_warn("md: invalid superblock checksum on %pg\n", 1653 rdev->bdev); 1654 return -EINVAL; 1655 } 1656 if (le64_to_cpu(sb->data_size) < 10) { 1657 pr_warn("md: data_size too small on %pg\n", 1658 rdev->bdev); 1659 return -EINVAL; 1660 } 1661 if (sb->pad0 || 1662 sb->pad3[0] || 1663 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) 1664 /* Some padding is non-zero, might be a new feature */ 1665 return -EINVAL; 1666 1667 rdev->preferred_minor = 0xffff; 1668 rdev->data_offset = le64_to_cpu(sb->data_offset); 1669 rdev->new_data_offset = rdev->data_offset; 1670 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) && 1671 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) 1672 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); 1673 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1674 1675 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1676 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1677 if (rdev->sb_size & bmask) 1678 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1679 1680 if (minor_version 1681 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1682 return -EINVAL; 1683 if (minor_version 1684 && rdev->new_data_offset < sb_start + (rdev->sb_size/512)) 1685 return -EINVAL; 1686 1687 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1688 rdev->desc_nr = -1; 1689 else 1690 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1691 1692 if (!rdev->bb_page) { 1693 rdev->bb_page = alloc_page(GFP_KERNEL); 1694 if (!rdev->bb_page) 1695 return -ENOMEM; 1696 } 1697 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && 1698 rdev->badblocks.count == 0) { 1699 /* need to load the bad block list. 1700 * Currently we limit it to one page. 1701 */ 1702 s32 offset; 1703 sector_t bb_sector; 1704 __le64 *bbp; 1705 int i; 1706 int sectors = le16_to_cpu(sb->bblog_size); 1707 if (sectors > (PAGE_SIZE / 512)) 1708 return -EINVAL; 1709 offset = le32_to_cpu(sb->bblog_offset); 1710 if (offset == 0) 1711 return -EINVAL; 1712 bb_sector = (long long)offset; 1713 if (!sync_page_io(rdev, bb_sector, sectors << 9, 1714 rdev->bb_page, REQ_OP_READ, true)) 1715 return -EIO; 1716 bbp = (__le64 *)page_address(rdev->bb_page); 1717 rdev->badblocks.shift = sb->bblog_shift; 1718 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { 1719 u64 bb = le64_to_cpu(*bbp); 1720 int count = bb & (0x3ff); 1721 u64 sector = bb >> 10; 1722 sector <<= sb->bblog_shift; 1723 count <<= sb->bblog_shift; 1724 if (bb + 1 == 0) 1725 break; 1726 if (badblocks_set(&rdev->badblocks, sector, count, 1)) 1727 return -EINVAL; 1728 } 1729 } else if (sb->bblog_offset != 0) 1730 rdev->badblocks.shift = 0; 1731 1732 if ((le32_to_cpu(sb->feature_map) & 1733 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) { 1734 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset); 1735 rdev->ppl.size = le16_to_cpu(sb->ppl.size); 1736 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset; 1737 } 1738 1739 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) && 1740 sb->level != 0) 1741 return -EINVAL; 1742 1743 /* not spare disk, or LEVEL_MULTIPATH */ 1744 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) || 1745 (rdev->desc_nr >= 0 && 1746 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1747 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || 1748 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))) 1749 spare_disk = false; 1750 1751 if (!refdev) { 1752 if (!spare_disk) 1753 ret = 1; 1754 else 1755 ret = 0; 1756 } else { 1757 __u64 ev1, ev2; 1758 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); 1759 1760 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1761 sb->level != refsb->level || 1762 sb->layout != refsb->layout || 1763 sb->chunksize != refsb->chunksize) { 1764 pr_warn("md: %pg has strangely different superblock to %pg\n", 1765 rdev->bdev, 1766 refdev->bdev); 1767 return -EINVAL; 1768 } 1769 ev1 = le64_to_cpu(sb->events); 1770 ev2 = le64_to_cpu(refsb->events); 1771 1772 if (!spare_disk && ev1 > ev2) 1773 ret = 1; 1774 else 1775 ret = 0; 1776 } 1777 if (minor_version) 1778 sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset; 1779 else 1780 sectors = rdev->sb_start; 1781 if (sectors < le64_to_cpu(sb->data_size)) 1782 return -EINVAL; 1783 rdev->sectors = le64_to_cpu(sb->data_size); 1784 return ret; 1785 } 1786 1787 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) 1788 { 1789 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 1790 __u64 ev1 = le64_to_cpu(sb->events); 1791 1792 rdev->raid_disk = -1; 1793 clear_bit(Faulty, &rdev->flags); 1794 clear_bit(In_sync, &rdev->flags); 1795 clear_bit(Bitmap_sync, &rdev->flags); 1796 clear_bit(WriteMostly, &rdev->flags); 1797 1798 if (mddev->raid_disks == 0) { 1799 mddev->major_version = 1; 1800 mddev->patch_version = 0; 1801 mddev->external = 0; 1802 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1803 mddev->ctime = le64_to_cpu(sb->ctime); 1804 mddev->utime = le64_to_cpu(sb->utime); 1805 mddev->level = le32_to_cpu(sb->level); 1806 mddev->clevel[0] = 0; 1807 mddev->layout = le32_to_cpu(sb->layout); 1808 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1809 mddev->dev_sectors = le64_to_cpu(sb->size); 1810 mddev->events = ev1; 1811 mddev->bitmap_info.offset = 0; 1812 mddev->bitmap_info.space = 0; 1813 /* Default location for bitmap is 1K after superblock 1814 * using 3K - total of 4K 1815 */ 1816 mddev->bitmap_info.default_offset = 1024 >> 9; 1817 mddev->bitmap_info.default_space = (4096-1024) >> 9; 1818 mddev->reshape_backwards = 0; 1819 1820 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1821 memcpy(mddev->uuid, sb->set_uuid, 16); 1822 1823 mddev->max_disks = (4096-256)/2; 1824 1825 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1826 mddev->bitmap_info.file == NULL) { 1827 mddev->bitmap_info.offset = 1828 (__s32)le32_to_cpu(sb->bitmap_offset); 1829 /* Metadata doesn't record how much space is available. 1830 * For 1.0, we assume we can use up to the superblock 1831 * if before, else to 4K beyond superblock. 1832 * For others, assume no change is possible. 1833 */ 1834 if (mddev->minor_version > 0) 1835 mddev->bitmap_info.space = 0; 1836 else if (mddev->bitmap_info.offset > 0) 1837 mddev->bitmap_info.space = 1838 8 - mddev->bitmap_info.offset; 1839 else 1840 mddev->bitmap_info.space = 1841 -mddev->bitmap_info.offset; 1842 } 1843 1844 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1845 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1846 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1847 mddev->new_level = le32_to_cpu(sb->new_level); 1848 mddev->new_layout = le32_to_cpu(sb->new_layout); 1849 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1850 if (mddev->delta_disks < 0 || 1851 (mddev->delta_disks == 0 && 1852 (le32_to_cpu(sb->feature_map) 1853 & MD_FEATURE_RESHAPE_BACKWARDS))) 1854 mddev->reshape_backwards = 1; 1855 } else { 1856 mddev->reshape_position = MaxSector; 1857 mddev->delta_disks = 0; 1858 mddev->new_level = mddev->level; 1859 mddev->new_layout = mddev->layout; 1860 mddev->new_chunk_sectors = mddev->chunk_sectors; 1861 } 1862 1863 if (mddev->level == 0 && 1864 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT)) 1865 mddev->layout = -1; 1866 1867 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) 1868 set_bit(MD_HAS_JOURNAL, &mddev->flags); 1869 1870 if (le32_to_cpu(sb->feature_map) & 1871 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) { 1872 if (le32_to_cpu(sb->feature_map) & 1873 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL)) 1874 return -EINVAL; 1875 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) && 1876 (le32_to_cpu(sb->feature_map) & 1877 MD_FEATURE_MULTIPLE_PPLS)) 1878 return -EINVAL; 1879 set_bit(MD_HAS_PPL, &mddev->flags); 1880 } 1881 } else if (mddev->pers == NULL) { 1882 /* Insist of good event counter while assembling, except for 1883 * spares (which don't need an event count) */ 1884 ++ev1; 1885 if (rdev->desc_nr >= 0 && 1886 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1887 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || 1888 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)) 1889 if (ev1 < mddev->events) 1890 return -EINVAL; 1891 } else if (mddev->bitmap) { 1892 /* If adding to array with a bitmap, then we can accept an 1893 * older device, but not too old. 1894 */ 1895 if (ev1 < mddev->bitmap->events_cleared) 1896 return 0; 1897 if (ev1 < mddev->events) 1898 set_bit(Bitmap_sync, &rdev->flags); 1899 } else { 1900 if (ev1 < mddev->events) 1901 /* just a hot-add of a new device, leave raid_disk at -1 */ 1902 return 0; 1903 } 1904 if (mddev->level != LEVEL_MULTIPATH) { 1905 int role; 1906 if (rdev->desc_nr < 0 || 1907 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1908 role = MD_DISK_ROLE_SPARE; 1909 rdev->desc_nr = -1; 1910 } else 1911 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1912 switch(role) { 1913 case MD_DISK_ROLE_SPARE: /* spare */ 1914 break; 1915 case MD_DISK_ROLE_FAULTY: /* faulty */ 1916 set_bit(Faulty, &rdev->flags); 1917 break; 1918 case MD_DISK_ROLE_JOURNAL: /* journal device */ 1919 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) { 1920 /* journal device without journal feature */ 1921 pr_warn("md: journal device provided without journal feature, ignoring the device\n"); 1922 return -EINVAL; 1923 } 1924 set_bit(Journal, &rdev->flags); 1925 rdev->journal_tail = le64_to_cpu(sb->journal_tail); 1926 rdev->raid_disk = 0; 1927 break; 1928 default: 1929 rdev->saved_raid_disk = role; 1930 if ((le32_to_cpu(sb->feature_map) & 1931 MD_FEATURE_RECOVERY_OFFSET)) { 1932 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1933 if (!(le32_to_cpu(sb->feature_map) & 1934 MD_FEATURE_RECOVERY_BITMAP)) 1935 rdev->saved_raid_disk = -1; 1936 } else { 1937 /* 1938 * If the array is FROZEN, then the device can't 1939 * be in_sync with rest of array. 1940 */ 1941 if (!test_bit(MD_RECOVERY_FROZEN, 1942 &mddev->recovery)) 1943 set_bit(In_sync, &rdev->flags); 1944 } 1945 rdev->raid_disk = role; 1946 break; 1947 } 1948 if (sb->devflags & WriteMostly1) 1949 set_bit(WriteMostly, &rdev->flags); 1950 if (sb->devflags & FailFast1) 1951 set_bit(FailFast, &rdev->flags); 1952 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) 1953 set_bit(Replacement, &rdev->flags); 1954 } else /* MULTIPATH are always insync */ 1955 set_bit(In_sync, &rdev->flags); 1956 1957 return 0; 1958 } 1959 1960 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) 1961 { 1962 struct mdp_superblock_1 *sb; 1963 struct md_rdev *rdev2; 1964 int max_dev, i; 1965 /* make rdev->sb match mddev and rdev data. */ 1966 1967 sb = page_address(rdev->sb_page); 1968 1969 sb->feature_map = 0; 1970 sb->pad0 = 0; 1971 sb->recovery_offset = cpu_to_le64(0); 1972 memset(sb->pad3, 0, sizeof(sb->pad3)); 1973 1974 sb->utime = cpu_to_le64((__u64)mddev->utime); 1975 sb->events = cpu_to_le64(mddev->events); 1976 if (mddev->in_sync) 1977 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1978 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) 1979 sb->resync_offset = cpu_to_le64(MaxSector); 1980 else 1981 sb->resync_offset = cpu_to_le64(0); 1982 1983 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1984 1985 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1986 sb->size = cpu_to_le64(mddev->dev_sectors); 1987 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 1988 sb->level = cpu_to_le32(mddev->level); 1989 sb->layout = cpu_to_le32(mddev->layout); 1990 if (test_bit(FailFast, &rdev->flags)) 1991 sb->devflags |= FailFast1; 1992 else 1993 sb->devflags &= ~FailFast1; 1994 1995 if (test_bit(WriteMostly, &rdev->flags)) 1996 sb->devflags |= WriteMostly1; 1997 else 1998 sb->devflags &= ~WriteMostly1; 1999 sb->data_offset = cpu_to_le64(rdev->data_offset); 2000 sb->data_size = cpu_to_le64(rdev->sectors); 2001 2002 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 2003 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 2004 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 2005 } 2006 2007 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) && 2008 !test_bit(In_sync, &rdev->flags)) { 2009 sb->feature_map |= 2010 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 2011 sb->recovery_offset = 2012 cpu_to_le64(rdev->recovery_offset); 2013 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) 2014 sb->feature_map |= 2015 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP); 2016 } 2017 /* Note: recovery_offset and journal_tail share space */ 2018 if (test_bit(Journal, &rdev->flags)) 2019 sb->journal_tail = cpu_to_le64(rdev->journal_tail); 2020 if (test_bit(Replacement, &rdev->flags)) 2021 sb->feature_map |= 2022 cpu_to_le32(MD_FEATURE_REPLACEMENT); 2023 2024 if (mddev->reshape_position != MaxSector) { 2025 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 2026 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 2027 sb->new_layout = cpu_to_le32(mddev->new_layout); 2028 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 2029 sb->new_level = cpu_to_le32(mddev->new_level); 2030 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 2031 if (mddev->delta_disks == 0 && 2032 mddev->reshape_backwards) 2033 sb->feature_map 2034 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS); 2035 if (rdev->new_data_offset != rdev->data_offset) { 2036 sb->feature_map 2037 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET); 2038 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset 2039 - rdev->data_offset)); 2040 } 2041 } 2042 2043 if (mddev_is_clustered(mddev)) 2044 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED); 2045 2046 if (rdev->badblocks.count == 0) 2047 /* Nothing to do for bad blocks*/ ; 2048 else if (sb->bblog_offset == 0) 2049 /* Cannot record bad blocks on this device */ 2050 md_error(mddev, rdev); 2051 else { 2052 struct badblocks *bb = &rdev->badblocks; 2053 __le64 *bbp = (__le64 *)page_address(rdev->bb_page); 2054 u64 *p = bb->page; 2055 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); 2056 if (bb->changed) { 2057 unsigned seq; 2058 2059 retry: 2060 seq = read_seqbegin(&bb->lock); 2061 2062 memset(bbp, 0xff, PAGE_SIZE); 2063 2064 for (i = 0 ; i < bb->count ; i++) { 2065 u64 internal_bb = p[i]; 2066 u64 store_bb = ((BB_OFFSET(internal_bb) << 10) 2067 | BB_LEN(internal_bb)); 2068 bbp[i] = cpu_to_le64(store_bb); 2069 } 2070 bb->changed = 0; 2071 if (read_seqretry(&bb->lock, seq)) 2072 goto retry; 2073 2074 bb->sector = (rdev->sb_start + 2075 (int)le32_to_cpu(sb->bblog_offset)); 2076 bb->size = le16_to_cpu(sb->bblog_size); 2077 } 2078 } 2079 2080 max_dev = 0; 2081 rdev_for_each(rdev2, mddev) 2082 if (rdev2->desc_nr+1 > max_dev) 2083 max_dev = rdev2->desc_nr+1; 2084 2085 if (max_dev > le32_to_cpu(sb->max_dev)) { 2086 int bmask; 2087 sb->max_dev = cpu_to_le32(max_dev); 2088 rdev->sb_size = max_dev * 2 + 256; 2089 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 2090 if (rdev->sb_size & bmask) 2091 rdev->sb_size = (rdev->sb_size | bmask) + 1; 2092 } else 2093 max_dev = le32_to_cpu(sb->max_dev); 2094 2095 for (i=0; i<max_dev;i++) 2096 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 2097 2098 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) 2099 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL); 2100 2101 if (test_bit(MD_HAS_PPL, &mddev->flags)) { 2102 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags)) 2103 sb->feature_map |= 2104 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS); 2105 else 2106 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL); 2107 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset); 2108 sb->ppl.size = cpu_to_le16(rdev->ppl.size); 2109 } 2110 2111 rdev_for_each(rdev2, mddev) { 2112 i = rdev2->desc_nr; 2113 if (test_bit(Faulty, &rdev2->flags)) 2114 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); 2115 else if (test_bit(In_sync, &rdev2->flags)) 2116 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 2117 else if (test_bit(Journal, &rdev2->flags)) 2118 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL); 2119 else if (rdev2->raid_disk >= 0) 2120 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 2121 else 2122 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 2123 } 2124 2125 sb->sb_csum = calc_sb_1_csum(sb); 2126 } 2127 2128 static sector_t super_1_choose_bm_space(sector_t dev_size) 2129 { 2130 sector_t bm_space; 2131 2132 /* if the device is bigger than 8Gig, save 64k for bitmap 2133 * usage, if bigger than 200Gig, save 128k 2134 */ 2135 if (dev_size < 64*2) 2136 bm_space = 0; 2137 else if (dev_size - 64*2 >= 200*1024*1024*2) 2138 bm_space = 128*2; 2139 else if (dev_size - 4*2 > 8*1024*1024*2) 2140 bm_space = 64*2; 2141 else 2142 bm_space = 4*2; 2143 return bm_space; 2144 } 2145 2146 static unsigned long long 2147 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 2148 { 2149 struct mdp_superblock_1 *sb; 2150 sector_t max_sectors; 2151 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 2152 return 0; /* component must fit device */ 2153 if (rdev->data_offset != rdev->new_data_offset) 2154 return 0; /* too confusing */ 2155 if (rdev->sb_start < rdev->data_offset) { 2156 /* minor versions 1 and 2; superblock before data */ 2157 max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset; 2158 if (!num_sectors || num_sectors > max_sectors) 2159 num_sectors = max_sectors; 2160 } else if (rdev->mddev->bitmap_info.offset) { 2161 /* minor version 0 with bitmap we can't move */ 2162 return 0; 2163 } else { 2164 /* minor version 0; superblock after data */ 2165 sector_t sb_start, bm_space; 2166 sector_t dev_size = bdev_nr_sectors(rdev->bdev); 2167 2168 /* 8K is for superblock */ 2169 sb_start = dev_size - 8*2; 2170 sb_start &= ~(sector_t)(4*2 - 1); 2171 2172 bm_space = super_1_choose_bm_space(dev_size); 2173 2174 /* Space that can be used to store date needs to decrease 2175 * superblock bitmap space and bad block space(4K) 2176 */ 2177 max_sectors = sb_start - bm_space - 4*2; 2178 2179 if (!num_sectors || num_sectors > max_sectors) 2180 num_sectors = max_sectors; 2181 rdev->sb_start = sb_start; 2182 } 2183 sb = page_address(rdev->sb_page); 2184 sb->data_size = cpu_to_le64(num_sectors); 2185 sb->super_offset = cpu_to_le64(rdev->sb_start); 2186 sb->sb_csum = calc_sb_1_csum(sb); 2187 do { 2188 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 2189 rdev->sb_page); 2190 } while (md_super_wait(rdev->mddev) < 0); 2191 return num_sectors; 2192 2193 } 2194 2195 static int 2196 super_1_allow_new_offset(struct md_rdev *rdev, 2197 unsigned long long new_offset) 2198 { 2199 /* All necessary checks on new >= old have been done */ 2200 struct bitmap *bitmap; 2201 if (new_offset >= rdev->data_offset) 2202 return 1; 2203 2204 /* with 1.0 metadata, there is no metadata to tread on 2205 * so we can always move back */ 2206 if (rdev->mddev->minor_version == 0) 2207 return 1; 2208 2209 /* otherwise we must be sure not to step on 2210 * any metadata, so stay: 2211 * 36K beyond start of superblock 2212 * beyond end of badblocks 2213 * beyond write-intent bitmap 2214 */ 2215 if (rdev->sb_start + (32+4)*2 > new_offset) 2216 return 0; 2217 bitmap = rdev->mddev->bitmap; 2218 if (bitmap && !rdev->mddev->bitmap_info.file && 2219 rdev->sb_start + rdev->mddev->bitmap_info.offset + 2220 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset) 2221 return 0; 2222 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) 2223 return 0; 2224 2225 return 1; 2226 } 2227 2228 static struct super_type super_types[] = { 2229 [0] = { 2230 .name = "0.90.0", 2231 .owner = THIS_MODULE, 2232 .load_super = super_90_load, 2233 .validate_super = super_90_validate, 2234 .sync_super = super_90_sync, 2235 .rdev_size_change = super_90_rdev_size_change, 2236 .allow_new_offset = super_90_allow_new_offset, 2237 }, 2238 [1] = { 2239 .name = "md-1", 2240 .owner = THIS_MODULE, 2241 .load_super = super_1_load, 2242 .validate_super = super_1_validate, 2243 .sync_super = super_1_sync, 2244 .rdev_size_change = super_1_rdev_size_change, 2245 .allow_new_offset = super_1_allow_new_offset, 2246 }, 2247 }; 2248 2249 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) 2250 { 2251 if (mddev->sync_super) { 2252 mddev->sync_super(mddev, rdev); 2253 return; 2254 } 2255 2256 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); 2257 2258 super_types[mddev->major_version].sync_super(mddev, rdev); 2259 } 2260 2261 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) 2262 { 2263 struct md_rdev *rdev, *rdev2; 2264 2265 rcu_read_lock(); 2266 rdev_for_each_rcu(rdev, mddev1) { 2267 if (test_bit(Faulty, &rdev->flags) || 2268 test_bit(Journal, &rdev->flags) || 2269 rdev->raid_disk == -1) 2270 continue; 2271 rdev_for_each_rcu(rdev2, mddev2) { 2272 if (test_bit(Faulty, &rdev2->flags) || 2273 test_bit(Journal, &rdev2->flags) || 2274 rdev2->raid_disk == -1) 2275 continue; 2276 if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) { 2277 rcu_read_unlock(); 2278 return 1; 2279 } 2280 } 2281 } 2282 rcu_read_unlock(); 2283 return 0; 2284 } 2285 2286 static LIST_HEAD(pending_raid_disks); 2287 2288 /* 2289 * Try to register data integrity profile for an mddev 2290 * 2291 * This is called when an array is started and after a disk has been kicked 2292 * from the array. It only succeeds if all working and active component devices 2293 * are integrity capable with matching profiles. 2294 */ 2295 int md_integrity_register(struct mddev *mddev) 2296 { 2297 struct md_rdev *rdev, *reference = NULL; 2298 2299 if (list_empty(&mddev->disks)) 2300 return 0; /* nothing to do */ 2301 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) 2302 return 0; /* shouldn't register, or already is */ 2303 rdev_for_each(rdev, mddev) { 2304 /* skip spares and non-functional disks */ 2305 if (test_bit(Faulty, &rdev->flags)) 2306 continue; 2307 if (rdev->raid_disk < 0) 2308 continue; 2309 if (!reference) { 2310 /* Use the first rdev as the reference */ 2311 reference = rdev; 2312 continue; 2313 } 2314 /* does this rdev's profile match the reference profile? */ 2315 if (blk_integrity_compare(reference->bdev->bd_disk, 2316 rdev->bdev->bd_disk) < 0) 2317 return -EINVAL; 2318 } 2319 if (!reference || !bdev_get_integrity(reference->bdev)) 2320 return 0; 2321 /* 2322 * All component devices are integrity capable and have matching 2323 * profiles, register the common profile for the md device. 2324 */ 2325 blk_integrity_register(mddev->gendisk, 2326 bdev_get_integrity(reference->bdev)); 2327 2328 pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); 2329 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) || 2330 (mddev->level != 1 && mddev->level != 10 && 2331 bioset_integrity_create(&mddev->io_acct_set, BIO_POOL_SIZE))) { 2332 /* 2333 * No need to handle the failure of bioset_integrity_create, 2334 * because the function is called by md_run() -> pers->run(), 2335 * md_run calls bioset_exit -> bioset_integrity_free in case 2336 * of failure case. 2337 */ 2338 pr_err("md: failed to create integrity pool for %s\n", 2339 mdname(mddev)); 2340 return -EINVAL; 2341 } 2342 return 0; 2343 } 2344 EXPORT_SYMBOL(md_integrity_register); 2345 2346 /* 2347 * Attempt to add an rdev, but only if it is consistent with the current 2348 * integrity profile 2349 */ 2350 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) 2351 { 2352 struct blk_integrity *bi_mddev; 2353 2354 if (!mddev->gendisk) 2355 return 0; 2356 2357 bi_mddev = blk_get_integrity(mddev->gendisk); 2358 2359 if (!bi_mddev) /* nothing to do */ 2360 return 0; 2361 2362 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { 2363 pr_err("%s: incompatible integrity profile for %pg\n", 2364 mdname(mddev), rdev->bdev); 2365 return -ENXIO; 2366 } 2367 2368 return 0; 2369 } 2370 EXPORT_SYMBOL(md_integrity_add_rdev); 2371 2372 static bool rdev_read_only(struct md_rdev *rdev) 2373 { 2374 return bdev_read_only(rdev->bdev) || 2375 (rdev->meta_bdev && bdev_read_only(rdev->meta_bdev)); 2376 } 2377 2378 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) 2379 { 2380 char b[BDEVNAME_SIZE]; 2381 int err; 2382 2383 /* prevent duplicates */ 2384 if (find_rdev(mddev, rdev->bdev->bd_dev)) 2385 return -EEXIST; 2386 2387 if (rdev_read_only(rdev) && mddev->pers) 2388 return -EROFS; 2389 2390 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 2391 if (!test_bit(Journal, &rdev->flags) && 2392 rdev->sectors && 2393 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { 2394 if (mddev->pers) { 2395 /* Cannot change size, so fail 2396 * If mddev->level <= 0, then we don't care 2397 * about aligning sizes (e.g. linear) 2398 */ 2399 if (mddev->level > 0) 2400 return -ENOSPC; 2401 } else 2402 mddev->dev_sectors = rdev->sectors; 2403 } 2404 2405 /* Verify rdev->desc_nr is unique. 2406 * If it is -1, assign a free number, else 2407 * check number is not in use 2408 */ 2409 rcu_read_lock(); 2410 if (rdev->desc_nr < 0) { 2411 int choice = 0; 2412 if (mddev->pers) 2413 choice = mddev->raid_disks; 2414 while (md_find_rdev_nr_rcu(mddev, choice)) 2415 choice++; 2416 rdev->desc_nr = choice; 2417 } else { 2418 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { 2419 rcu_read_unlock(); 2420 return -EBUSY; 2421 } 2422 } 2423 rcu_read_unlock(); 2424 if (!test_bit(Journal, &rdev->flags) && 2425 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 2426 pr_warn("md: %s: array is limited to %d devices\n", 2427 mdname(mddev), mddev->max_disks); 2428 return -EBUSY; 2429 } 2430 snprintf(b, sizeof(b), "%pg", rdev->bdev); 2431 strreplace(b, '/', '!'); 2432 2433 rdev->mddev = mddev; 2434 pr_debug("md: bind<%s>\n", b); 2435 2436 if (mddev->raid_disks) 2437 mddev_create_serial_pool(mddev, rdev, false); 2438 2439 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 2440 goto fail; 2441 2442 /* failure here is OK */ 2443 err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block"); 2444 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 2445 rdev->sysfs_unack_badblocks = 2446 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks"); 2447 rdev->sysfs_badblocks = 2448 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks"); 2449 2450 list_add_rcu(&rdev->same_set, &mddev->disks); 2451 bd_link_disk_holder(rdev->bdev, mddev->gendisk); 2452 2453 /* May as well allow recovery to be retried once */ 2454 mddev->recovery_disabled++; 2455 2456 return 0; 2457 2458 fail: 2459 pr_warn("md: failed to register dev-%s for %s\n", 2460 b, mdname(mddev)); 2461 return err; 2462 } 2463 2464 static void rdev_delayed_delete(struct work_struct *ws) 2465 { 2466 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); 2467 kobject_del(&rdev->kobj); 2468 kobject_put(&rdev->kobj); 2469 } 2470 2471 static void unbind_rdev_from_array(struct md_rdev *rdev) 2472 { 2473 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); 2474 list_del_rcu(&rdev->same_set); 2475 pr_debug("md: unbind<%pg>\n", rdev->bdev); 2476 mddev_destroy_serial_pool(rdev->mddev, rdev, false); 2477 rdev->mddev = NULL; 2478 sysfs_remove_link(&rdev->kobj, "block"); 2479 sysfs_put(rdev->sysfs_state); 2480 sysfs_put(rdev->sysfs_unack_badblocks); 2481 sysfs_put(rdev->sysfs_badblocks); 2482 rdev->sysfs_state = NULL; 2483 rdev->sysfs_unack_badblocks = NULL; 2484 rdev->sysfs_badblocks = NULL; 2485 rdev->badblocks.count = 0; 2486 /* We need to delay this, otherwise we can deadlock when 2487 * writing to 'remove' to "dev/state". We also need 2488 * to delay it due to rcu usage. 2489 */ 2490 synchronize_rcu(); 2491 INIT_WORK(&rdev->del_work, rdev_delayed_delete); 2492 kobject_get(&rdev->kobj); 2493 queue_work(md_rdev_misc_wq, &rdev->del_work); 2494 } 2495 2496 void md_autodetect_dev(dev_t dev); 2497 2498 static void export_rdev(struct md_rdev *rdev) 2499 { 2500 pr_debug("md: export_rdev(%pg)\n", rdev->bdev); 2501 md_rdev_clear(rdev); 2502 #ifndef MODULE 2503 if (test_bit(AutoDetected, &rdev->flags)) 2504 md_autodetect_dev(rdev->bdev->bd_dev); 2505 #endif 2506 blkdev_put(rdev->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2507 rdev->bdev = NULL; 2508 kobject_put(&rdev->kobj); 2509 } 2510 2511 static void md_kick_rdev_from_array(struct md_rdev *rdev) 2512 { 2513 unbind_rdev_from_array(rdev); 2514 export_rdev(rdev); 2515 } 2516 2517 static void export_array(struct mddev *mddev) 2518 { 2519 struct md_rdev *rdev; 2520 2521 while (!list_empty(&mddev->disks)) { 2522 rdev = list_first_entry(&mddev->disks, struct md_rdev, 2523 same_set); 2524 md_kick_rdev_from_array(rdev); 2525 } 2526 mddev->raid_disks = 0; 2527 mddev->major_version = 0; 2528 } 2529 2530 static bool set_in_sync(struct mddev *mddev) 2531 { 2532 lockdep_assert_held(&mddev->lock); 2533 if (!mddev->in_sync) { 2534 mddev->sync_checkers++; 2535 spin_unlock(&mddev->lock); 2536 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending); 2537 spin_lock(&mddev->lock); 2538 if (!mddev->in_sync && 2539 percpu_ref_is_zero(&mddev->writes_pending)) { 2540 mddev->in_sync = 1; 2541 /* 2542 * Ensure ->in_sync is visible before we clear 2543 * ->sync_checkers. 2544 */ 2545 smp_mb(); 2546 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 2547 sysfs_notify_dirent_safe(mddev->sysfs_state); 2548 } 2549 if (--mddev->sync_checkers == 0) 2550 percpu_ref_switch_to_percpu(&mddev->writes_pending); 2551 } 2552 if (mddev->safemode == 1) 2553 mddev->safemode = 0; 2554 return mddev->in_sync; 2555 } 2556 2557 static void sync_sbs(struct mddev *mddev, int nospares) 2558 { 2559 /* Update each superblock (in-memory image), but 2560 * if we are allowed to, skip spares which already 2561 * have the right event counter, or have one earlier 2562 * (which would mean they aren't being marked as dirty 2563 * with the rest of the array) 2564 */ 2565 struct md_rdev *rdev; 2566 rdev_for_each(rdev, mddev) { 2567 if (rdev->sb_events == mddev->events || 2568 (nospares && 2569 rdev->raid_disk < 0 && 2570 rdev->sb_events+1 == mddev->events)) { 2571 /* Don't update this superblock */ 2572 rdev->sb_loaded = 2; 2573 } else { 2574 sync_super(mddev, rdev); 2575 rdev->sb_loaded = 1; 2576 } 2577 } 2578 } 2579 2580 static bool does_sb_need_changing(struct mddev *mddev) 2581 { 2582 struct md_rdev *rdev = NULL, *iter; 2583 struct mdp_superblock_1 *sb; 2584 int role; 2585 2586 /* Find a good rdev */ 2587 rdev_for_each(iter, mddev) 2588 if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) { 2589 rdev = iter; 2590 break; 2591 } 2592 2593 /* No good device found. */ 2594 if (!rdev) 2595 return false; 2596 2597 sb = page_address(rdev->sb_page); 2598 /* Check if a device has become faulty or a spare become active */ 2599 rdev_for_each(rdev, mddev) { 2600 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 2601 /* Device activated? */ 2602 if (role == MD_DISK_ROLE_SPARE && rdev->raid_disk >= 0 && 2603 !test_bit(Faulty, &rdev->flags)) 2604 return true; 2605 /* Device turned faulty? */ 2606 if (test_bit(Faulty, &rdev->flags) && (role < MD_DISK_ROLE_MAX)) 2607 return true; 2608 } 2609 2610 /* Check if any mddev parameters have changed */ 2611 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || 2612 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || 2613 (mddev->layout != le32_to_cpu(sb->layout)) || 2614 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || 2615 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) 2616 return true; 2617 2618 return false; 2619 } 2620 2621 void md_update_sb(struct mddev *mddev, int force_change) 2622 { 2623 struct md_rdev *rdev; 2624 int sync_req; 2625 int nospares = 0; 2626 int any_badblocks_changed = 0; 2627 int ret = -1; 2628 2629 if (!md_is_rdwr(mddev)) { 2630 if (force_change) 2631 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2632 return; 2633 } 2634 2635 repeat: 2636 if (mddev_is_clustered(mddev)) { 2637 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) 2638 force_change = 1; 2639 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) 2640 nospares = 1; 2641 ret = md_cluster_ops->metadata_update_start(mddev); 2642 /* Has someone else has updated the sb */ 2643 if (!does_sb_need_changing(mddev)) { 2644 if (ret == 0) 2645 md_cluster_ops->metadata_update_cancel(mddev); 2646 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), 2647 BIT(MD_SB_CHANGE_DEVS) | 2648 BIT(MD_SB_CHANGE_CLEAN)); 2649 return; 2650 } 2651 } 2652 2653 /* 2654 * First make sure individual recovery_offsets are correct 2655 * curr_resync_completed can only be used during recovery. 2656 * During reshape/resync it might use array-addresses rather 2657 * that device addresses. 2658 */ 2659 rdev_for_each(rdev, mddev) { 2660 if (rdev->raid_disk >= 0 && 2661 mddev->delta_disks >= 0 && 2662 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 2663 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && 2664 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 2665 !test_bit(Journal, &rdev->flags) && 2666 !test_bit(In_sync, &rdev->flags) && 2667 mddev->curr_resync_completed > rdev->recovery_offset) 2668 rdev->recovery_offset = mddev->curr_resync_completed; 2669 2670 } 2671 if (!mddev->persistent) { 2672 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 2673 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2674 if (!mddev->external) { 2675 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 2676 rdev_for_each(rdev, mddev) { 2677 if (rdev->badblocks.changed) { 2678 rdev->badblocks.changed = 0; 2679 ack_all_badblocks(&rdev->badblocks); 2680 md_error(mddev, rdev); 2681 } 2682 clear_bit(Blocked, &rdev->flags); 2683 clear_bit(BlockedBadBlocks, &rdev->flags); 2684 wake_up(&rdev->blocked_wait); 2685 } 2686 } 2687 wake_up(&mddev->sb_wait); 2688 return; 2689 } 2690 2691 spin_lock(&mddev->lock); 2692 2693 mddev->utime = ktime_get_real_seconds(); 2694 2695 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) 2696 force_change = 1; 2697 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) 2698 /* just a clean<-> dirty transition, possibly leave spares alone, 2699 * though if events isn't the right even/odd, we will have to do 2700 * spares after all 2701 */ 2702 nospares = 1; 2703 if (force_change) 2704 nospares = 0; 2705 if (mddev->degraded) 2706 /* If the array is degraded, then skipping spares is both 2707 * dangerous and fairly pointless. 2708 * Dangerous because a device that was removed from the array 2709 * might have a event_count that still looks up-to-date, 2710 * so it can be re-added without a resync. 2711 * Pointless because if there are any spares to skip, 2712 * then a recovery will happen and soon that array won't 2713 * be degraded any more and the spare can go back to sleep then. 2714 */ 2715 nospares = 0; 2716 2717 sync_req = mddev->in_sync; 2718 2719 /* If this is just a dirty<->clean transition, and the array is clean 2720 * and 'events' is odd, we can roll back to the previous clean state */ 2721 if (nospares 2722 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2723 && mddev->can_decrease_events 2724 && mddev->events != 1) { 2725 mddev->events--; 2726 mddev->can_decrease_events = 0; 2727 } else { 2728 /* otherwise we have to go forward and ... */ 2729 mddev->events ++; 2730 mddev->can_decrease_events = nospares; 2731 } 2732 2733 /* 2734 * This 64-bit counter should never wrap. 2735 * Either we are in around ~1 trillion A.C., assuming 2736 * 1 reboot per second, or we have a bug... 2737 */ 2738 WARN_ON(mddev->events == 0); 2739 2740 rdev_for_each(rdev, mddev) { 2741 if (rdev->badblocks.changed) 2742 any_badblocks_changed++; 2743 if (test_bit(Faulty, &rdev->flags)) 2744 set_bit(FaultRecorded, &rdev->flags); 2745 } 2746 2747 sync_sbs(mddev, nospares); 2748 spin_unlock(&mddev->lock); 2749 2750 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", 2751 mdname(mddev), mddev->in_sync); 2752 2753 if (mddev->queue) 2754 blk_add_trace_msg(mddev->queue, "md md_update_sb"); 2755 rewrite: 2756 md_bitmap_update_sb(mddev->bitmap); 2757 rdev_for_each(rdev, mddev) { 2758 if (rdev->sb_loaded != 1) 2759 continue; /* no noise on spare devices */ 2760 2761 if (!test_bit(Faulty, &rdev->flags)) { 2762 md_super_write(mddev,rdev, 2763 rdev->sb_start, rdev->sb_size, 2764 rdev->sb_page); 2765 pr_debug("md: (write) %pg's sb offset: %llu\n", 2766 rdev->bdev, 2767 (unsigned long long)rdev->sb_start); 2768 rdev->sb_events = mddev->events; 2769 if (rdev->badblocks.size) { 2770 md_super_write(mddev, rdev, 2771 rdev->badblocks.sector, 2772 rdev->badblocks.size << 9, 2773 rdev->bb_page); 2774 rdev->badblocks.size = 0; 2775 } 2776 2777 } else 2778 pr_debug("md: %pg (skipping faulty)\n", 2779 rdev->bdev); 2780 2781 if (mddev->level == LEVEL_MULTIPATH) 2782 /* only need to write one superblock... */ 2783 break; 2784 } 2785 if (md_super_wait(mddev) < 0) 2786 goto rewrite; 2787 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */ 2788 2789 if (mddev_is_clustered(mddev) && ret == 0) 2790 md_cluster_ops->metadata_update_finish(mddev); 2791 2792 if (mddev->in_sync != sync_req || 2793 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), 2794 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN))) 2795 /* have to write it out again */ 2796 goto repeat; 2797 wake_up(&mddev->sb_wait); 2798 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2799 sysfs_notify_dirent_safe(mddev->sysfs_completed); 2800 2801 rdev_for_each(rdev, mddev) { 2802 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) 2803 clear_bit(Blocked, &rdev->flags); 2804 2805 if (any_badblocks_changed) 2806 ack_all_badblocks(&rdev->badblocks); 2807 clear_bit(BlockedBadBlocks, &rdev->flags); 2808 wake_up(&rdev->blocked_wait); 2809 } 2810 } 2811 EXPORT_SYMBOL(md_update_sb); 2812 2813 static int add_bound_rdev(struct md_rdev *rdev) 2814 { 2815 struct mddev *mddev = rdev->mddev; 2816 int err = 0; 2817 bool add_journal = test_bit(Journal, &rdev->flags); 2818 2819 if (!mddev->pers->hot_remove_disk || add_journal) { 2820 /* If there is hot_add_disk but no hot_remove_disk 2821 * then added disks for geometry changes, 2822 * and should be added immediately. 2823 */ 2824 super_types[mddev->major_version]. 2825 validate_super(mddev, rdev); 2826 if (add_journal) 2827 mddev_suspend(mddev); 2828 err = mddev->pers->hot_add_disk(mddev, rdev); 2829 if (add_journal) 2830 mddev_resume(mddev); 2831 if (err) { 2832 md_kick_rdev_from_array(rdev); 2833 return err; 2834 } 2835 } 2836 sysfs_notify_dirent_safe(rdev->sysfs_state); 2837 2838 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2839 if (mddev->degraded) 2840 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 2841 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2842 md_new_event(); 2843 md_wakeup_thread(mddev->thread); 2844 return 0; 2845 } 2846 2847 /* words written to sysfs files may, or may not, be \n terminated. 2848 * We want to accept with case. For this we use cmd_match. 2849 */ 2850 static int cmd_match(const char *cmd, const char *str) 2851 { 2852 /* See if cmd, written into a sysfs file, matches 2853 * str. They must either be the same, or cmd can 2854 * have a trailing newline 2855 */ 2856 while (*cmd && *str && *cmd == *str) { 2857 cmd++; 2858 str++; 2859 } 2860 if (*cmd == '\n') 2861 cmd++; 2862 if (*str || *cmd) 2863 return 0; 2864 return 1; 2865 } 2866 2867 struct rdev_sysfs_entry { 2868 struct attribute attr; 2869 ssize_t (*show)(struct md_rdev *, char *); 2870 ssize_t (*store)(struct md_rdev *, const char *, size_t); 2871 }; 2872 2873 static ssize_t 2874 state_show(struct md_rdev *rdev, char *page) 2875 { 2876 char *sep = ","; 2877 size_t len = 0; 2878 unsigned long flags = READ_ONCE(rdev->flags); 2879 2880 if (test_bit(Faulty, &flags) || 2881 (!test_bit(ExternalBbl, &flags) && 2882 rdev->badblocks.unacked_exist)) 2883 len += sprintf(page+len, "faulty%s", sep); 2884 if (test_bit(In_sync, &flags)) 2885 len += sprintf(page+len, "in_sync%s", sep); 2886 if (test_bit(Journal, &flags)) 2887 len += sprintf(page+len, "journal%s", sep); 2888 if (test_bit(WriteMostly, &flags)) 2889 len += sprintf(page+len, "write_mostly%s", sep); 2890 if (test_bit(Blocked, &flags) || 2891 (rdev->badblocks.unacked_exist 2892 && !test_bit(Faulty, &flags))) 2893 len += sprintf(page+len, "blocked%s", sep); 2894 if (!test_bit(Faulty, &flags) && 2895 !test_bit(Journal, &flags) && 2896 !test_bit(In_sync, &flags)) 2897 len += sprintf(page+len, "spare%s", sep); 2898 if (test_bit(WriteErrorSeen, &flags)) 2899 len += sprintf(page+len, "write_error%s", sep); 2900 if (test_bit(WantReplacement, &flags)) 2901 len += sprintf(page+len, "want_replacement%s", sep); 2902 if (test_bit(Replacement, &flags)) 2903 len += sprintf(page+len, "replacement%s", sep); 2904 if (test_bit(ExternalBbl, &flags)) 2905 len += sprintf(page+len, "external_bbl%s", sep); 2906 if (test_bit(FailFast, &flags)) 2907 len += sprintf(page+len, "failfast%s", sep); 2908 2909 if (len) 2910 len -= strlen(sep); 2911 2912 return len+sprintf(page+len, "\n"); 2913 } 2914 2915 static ssize_t 2916 state_store(struct md_rdev *rdev, const char *buf, size_t len) 2917 { 2918 /* can write 2919 * faulty - simulates an error 2920 * remove - disconnects the device 2921 * writemostly - sets write_mostly 2922 * -writemostly - clears write_mostly 2923 * blocked - sets the Blocked flags 2924 * -blocked - clears the Blocked and possibly simulates an error 2925 * insync - sets Insync providing device isn't active 2926 * -insync - clear Insync for a device with a slot assigned, 2927 * so that it gets rebuilt based on bitmap 2928 * write_error - sets WriteErrorSeen 2929 * -write_error - clears WriteErrorSeen 2930 * {,-}failfast - set/clear FailFast 2931 */ 2932 2933 struct mddev *mddev = rdev->mddev; 2934 int err = -EINVAL; 2935 bool need_update_sb = false; 2936 2937 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2938 md_error(rdev->mddev, rdev); 2939 2940 if (test_bit(MD_BROKEN, &rdev->mddev->flags)) 2941 err = -EBUSY; 2942 else 2943 err = 0; 2944 } else if (cmd_match(buf, "remove")) { 2945 if (rdev->mddev->pers) { 2946 clear_bit(Blocked, &rdev->flags); 2947 remove_and_add_spares(rdev->mddev, rdev); 2948 } 2949 if (rdev->raid_disk >= 0) 2950 err = -EBUSY; 2951 else { 2952 err = 0; 2953 if (mddev_is_clustered(mddev)) 2954 err = md_cluster_ops->remove_disk(mddev, rdev); 2955 2956 if (err == 0) { 2957 md_kick_rdev_from_array(rdev); 2958 if (mddev->pers) { 2959 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2960 md_wakeup_thread(mddev->thread); 2961 } 2962 md_new_event(); 2963 } 2964 } 2965 } else if (cmd_match(buf, "writemostly")) { 2966 set_bit(WriteMostly, &rdev->flags); 2967 mddev_create_serial_pool(rdev->mddev, rdev, false); 2968 need_update_sb = true; 2969 err = 0; 2970 } else if (cmd_match(buf, "-writemostly")) { 2971 mddev_destroy_serial_pool(rdev->mddev, rdev, false); 2972 clear_bit(WriteMostly, &rdev->flags); 2973 need_update_sb = true; 2974 err = 0; 2975 } else if (cmd_match(buf, "blocked")) { 2976 set_bit(Blocked, &rdev->flags); 2977 err = 0; 2978 } else if (cmd_match(buf, "-blocked")) { 2979 if (!test_bit(Faulty, &rdev->flags) && 2980 !test_bit(ExternalBbl, &rdev->flags) && 2981 rdev->badblocks.unacked_exist) { 2982 /* metadata handler doesn't understand badblocks, 2983 * so we need to fail the device 2984 */ 2985 md_error(rdev->mddev, rdev); 2986 } 2987 clear_bit(Blocked, &rdev->flags); 2988 clear_bit(BlockedBadBlocks, &rdev->flags); 2989 wake_up(&rdev->blocked_wait); 2990 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2991 md_wakeup_thread(rdev->mddev->thread); 2992 2993 err = 0; 2994 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2995 set_bit(In_sync, &rdev->flags); 2996 err = 0; 2997 } else if (cmd_match(buf, "failfast")) { 2998 set_bit(FailFast, &rdev->flags); 2999 need_update_sb = true; 3000 err = 0; 3001 } else if (cmd_match(buf, "-failfast")) { 3002 clear_bit(FailFast, &rdev->flags); 3003 need_update_sb = true; 3004 err = 0; 3005 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 && 3006 !test_bit(Journal, &rdev->flags)) { 3007 if (rdev->mddev->pers == NULL) { 3008 clear_bit(In_sync, &rdev->flags); 3009 rdev->saved_raid_disk = rdev->raid_disk; 3010 rdev->raid_disk = -1; 3011 err = 0; 3012 } 3013 } else if (cmd_match(buf, "write_error")) { 3014 set_bit(WriteErrorSeen, &rdev->flags); 3015 err = 0; 3016 } else if (cmd_match(buf, "-write_error")) { 3017 clear_bit(WriteErrorSeen, &rdev->flags); 3018 err = 0; 3019 } else if (cmd_match(buf, "want_replacement")) { 3020 /* Any non-spare device that is not a replacement can 3021 * become want_replacement at any time, but we then need to 3022 * check if recovery is needed. 3023 */ 3024 if (rdev->raid_disk >= 0 && 3025 !test_bit(Journal, &rdev->flags) && 3026 !test_bit(Replacement, &rdev->flags)) 3027 set_bit(WantReplacement, &rdev->flags); 3028 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 3029 md_wakeup_thread(rdev->mddev->thread); 3030 err = 0; 3031 } else if (cmd_match(buf, "-want_replacement")) { 3032 /* Clearing 'want_replacement' is always allowed. 3033 * Once replacements starts it is too late though. 3034 */ 3035 err = 0; 3036 clear_bit(WantReplacement, &rdev->flags); 3037 } else if (cmd_match(buf, "replacement")) { 3038 /* Can only set a device as a replacement when array has not 3039 * yet been started. Once running, replacement is automatic 3040 * from spares, or by assigning 'slot'. 3041 */ 3042 if (rdev->mddev->pers) 3043 err = -EBUSY; 3044 else { 3045 set_bit(Replacement, &rdev->flags); 3046 err = 0; 3047 } 3048 } else if (cmd_match(buf, "-replacement")) { 3049 /* Similarly, can only clear Replacement before start */ 3050 if (rdev->mddev->pers) 3051 err = -EBUSY; 3052 else { 3053 clear_bit(Replacement, &rdev->flags); 3054 err = 0; 3055 } 3056 } else if (cmd_match(buf, "re-add")) { 3057 if (!rdev->mddev->pers) 3058 err = -EINVAL; 3059 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) && 3060 rdev->saved_raid_disk >= 0) { 3061 /* clear_bit is performed _after_ all the devices 3062 * have their local Faulty bit cleared. If any writes 3063 * happen in the meantime in the local node, they 3064 * will land in the local bitmap, which will be synced 3065 * by this node eventually 3066 */ 3067 if (!mddev_is_clustered(rdev->mddev) || 3068 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) { 3069 clear_bit(Faulty, &rdev->flags); 3070 err = add_bound_rdev(rdev); 3071 } 3072 } else 3073 err = -EBUSY; 3074 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) { 3075 set_bit(ExternalBbl, &rdev->flags); 3076 rdev->badblocks.shift = 0; 3077 err = 0; 3078 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) { 3079 clear_bit(ExternalBbl, &rdev->flags); 3080 err = 0; 3081 } 3082 if (need_update_sb) 3083 md_update_sb(mddev, 1); 3084 if (!err) 3085 sysfs_notify_dirent_safe(rdev->sysfs_state); 3086 return err ? err : len; 3087 } 3088 static struct rdev_sysfs_entry rdev_state = 3089 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store); 3090 3091 static ssize_t 3092 errors_show(struct md_rdev *rdev, char *page) 3093 { 3094 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 3095 } 3096 3097 static ssize_t 3098 errors_store(struct md_rdev *rdev, const char *buf, size_t len) 3099 { 3100 unsigned int n; 3101 int rv; 3102 3103 rv = kstrtouint(buf, 10, &n); 3104 if (rv < 0) 3105 return rv; 3106 atomic_set(&rdev->corrected_errors, n); 3107 return len; 3108 } 3109 static struct rdev_sysfs_entry rdev_errors = 3110 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 3111 3112 static ssize_t 3113 slot_show(struct md_rdev *rdev, char *page) 3114 { 3115 if (test_bit(Journal, &rdev->flags)) 3116 return sprintf(page, "journal\n"); 3117 else if (rdev->raid_disk < 0) 3118 return sprintf(page, "none\n"); 3119 else 3120 return sprintf(page, "%d\n", rdev->raid_disk); 3121 } 3122 3123 static ssize_t 3124 slot_store(struct md_rdev *rdev, const char *buf, size_t len) 3125 { 3126 int slot; 3127 int err; 3128 3129 if (test_bit(Journal, &rdev->flags)) 3130 return -EBUSY; 3131 if (strncmp(buf, "none", 4)==0) 3132 slot = -1; 3133 else { 3134 err = kstrtouint(buf, 10, (unsigned int *)&slot); 3135 if (err < 0) 3136 return err; 3137 } 3138 if (rdev->mddev->pers && slot == -1) { 3139 /* Setting 'slot' on an active array requires also 3140 * updating the 'rd%d' link, and communicating 3141 * with the personality with ->hot_*_disk. 3142 * For now we only support removing 3143 * failed/spare devices. This normally happens automatically, 3144 * but not when the metadata is externally managed. 3145 */ 3146 if (rdev->raid_disk == -1) 3147 return -EEXIST; 3148 /* personality does all needed checks */ 3149 if (rdev->mddev->pers->hot_remove_disk == NULL) 3150 return -EINVAL; 3151 clear_bit(Blocked, &rdev->flags); 3152 remove_and_add_spares(rdev->mddev, rdev); 3153 if (rdev->raid_disk >= 0) 3154 return -EBUSY; 3155 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 3156 md_wakeup_thread(rdev->mddev->thread); 3157 } else if (rdev->mddev->pers) { 3158 /* Activating a spare .. or possibly reactivating 3159 * if we ever get bitmaps working here. 3160 */ 3161 int err; 3162 3163 if (rdev->raid_disk != -1) 3164 return -EBUSY; 3165 3166 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) 3167 return -EBUSY; 3168 3169 if (rdev->mddev->pers->hot_add_disk == NULL) 3170 return -EINVAL; 3171 3172 if (slot >= rdev->mddev->raid_disks && 3173 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 3174 return -ENOSPC; 3175 3176 rdev->raid_disk = slot; 3177 if (test_bit(In_sync, &rdev->flags)) 3178 rdev->saved_raid_disk = slot; 3179 else 3180 rdev->saved_raid_disk = -1; 3181 clear_bit(In_sync, &rdev->flags); 3182 clear_bit(Bitmap_sync, &rdev->flags); 3183 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev); 3184 if (err) { 3185 rdev->raid_disk = -1; 3186 return err; 3187 } else 3188 sysfs_notify_dirent_safe(rdev->sysfs_state); 3189 /* failure here is OK */; 3190 sysfs_link_rdev(rdev->mddev, rdev); 3191 /* don't wakeup anyone, leave that to userspace. */ 3192 } else { 3193 if (slot >= rdev->mddev->raid_disks && 3194 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 3195 return -ENOSPC; 3196 rdev->raid_disk = slot; 3197 /* assume it is working */ 3198 clear_bit(Faulty, &rdev->flags); 3199 clear_bit(WriteMostly, &rdev->flags); 3200 set_bit(In_sync, &rdev->flags); 3201 sysfs_notify_dirent_safe(rdev->sysfs_state); 3202 } 3203 return len; 3204 } 3205 3206 static struct rdev_sysfs_entry rdev_slot = 3207 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 3208 3209 static ssize_t 3210 offset_show(struct md_rdev *rdev, char *page) 3211 { 3212 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 3213 } 3214 3215 static ssize_t 3216 offset_store(struct md_rdev *rdev, const char *buf, size_t len) 3217 { 3218 unsigned long long offset; 3219 if (kstrtoull(buf, 10, &offset) < 0) 3220 return -EINVAL; 3221 if (rdev->mddev->pers && rdev->raid_disk >= 0) 3222 return -EBUSY; 3223 if (rdev->sectors && rdev->mddev->external) 3224 /* Must set offset before size, so overlap checks 3225 * can be sane */ 3226 return -EBUSY; 3227 rdev->data_offset = offset; 3228 rdev->new_data_offset = offset; 3229 return len; 3230 } 3231 3232 static struct rdev_sysfs_entry rdev_offset = 3233 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 3234 3235 static ssize_t new_offset_show(struct md_rdev *rdev, char *page) 3236 { 3237 return sprintf(page, "%llu\n", 3238 (unsigned long long)rdev->new_data_offset); 3239 } 3240 3241 static ssize_t new_offset_store(struct md_rdev *rdev, 3242 const char *buf, size_t len) 3243 { 3244 unsigned long long new_offset; 3245 struct mddev *mddev = rdev->mddev; 3246 3247 if (kstrtoull(buf, 10, &new_offset) < 0) 3248 return -EINVAL; 3249 3250 if (mddev->sync_thread || 3251 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) 3252 return -EBUSY; 3253 if (new_offset == rdev->data_offset) 3254 /* reset is always permitted */ 3255 ; 3256 else if (new_offset > rdev->data_offset) { 3257 /* must not push array size beyond rdev_sectors */ 3258 if (new_offset - rdev->data_offset 3259 + mddev->dev_sectors > rdev->sectors) 3260 return -E2BIG; 3261 } 3262 /* Metadata worries about other space details. */ 3263 3264 /* decreasing the offset is inconsistent with a backwards 3265 * reshape. 3266 */ 3267 if (new_offset < rdev->data_offset && 3268 mddev->reshape_backwards) 3269 return -EINVAL; 3270 /* Increasing offset is inconsistent with forwards 3271 * reshape. reshape_direction should be set to 3272 * 'backwards' first. 3273 */ 3274 if (new_offset > rdev->data_offset && 3275 !mddev->reshape_backwards) 3276 return -EINVAL; 3277 3278 if (mddev->pers && mddev->persistent && 3279 !super_types[mddev->major_version] 3280 .allow_new_offset(rdev, new_offset)) 3281 return -E2BIG; 3282 rdev->new_data_offset = new_offset; 3283 if (new_offset > rdev->data_offset) 3284 mddev->reshape_backwards = 1; 3285 else if (new_offset < rdev->data_offset) 3286 mddev->reshape_backwards = 0; 3287 3288 return len; 3289 } 3290 static struct rdev_sysfs_entry rdev_new_offset = 3291 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store); 3292 3293 static ssize_t 3294 rdev_size_show(struct md_rdev *rdev, char *page) 3295 { 3296 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 3297 } 3298 3299 static int md_rdevs_overlap(struct md_rdev *a, struct md_rdev *b) 3300 { 3301 /* check if two start/length pairs overlap */ 3302 if (a->data_offset + a->sectors <= b->data_offset) 3303 return false; 3304 if (b->data_offset + b->sectors <= a->data_offset) 3305 return false; 3306 return true; 3307 } 3308 3309 static bool md_rdev_overlaps(struct md_rdev *rdev) 3310 { 3311 struct mddev *mddev; 3312 struct md_rdev *rdev2; 3313 3314 spin_lock(&all_mddevs_lock); 3315 list_for_each_entry(mddev, &all_mddevs, all_mddevs) { 3316 if (test_bit(MD_DELETED, &mddev->flags)) 3317 continue; 3318 rdev_for_each(rdev2, mddev) { 3319 if (rdev != rdev2 && rdev->bdev == rdev2->bdev && 3320 md_rdevs_overlap(rdev, rdev2)) { 3321 spin_unlock(&all_mddevs_lock); 3322 return true; 3323 } 3324 } 3325 } 3326 spin_unlock(&all_mddevs_lock); 3327 return false; 3328 } 3329 3330 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 3331 { 3332 unsigned long long blocks; 3333 sector_t new; 3334 3335 if (kstrtoull(buf, 10, &blocks) < 0) 3336 return -EINVAL; 3337 3338 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 3339 return -EINVAL; /* sector conversion overflow */ 3340 3341 new = blocks * 2; 3342 if (new != blocks * 2) 3343 return -EINVAL; /* unsigned long long to sector_t overflow */ 3344 3345 *sectors = new; 3346 return 0; 3347 } 3348 3349 static ssize_t 3350 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) 3351 { 3352 struct mddev *my_mddev = rdev->mddev; 3353 sector_t oldsectors = rdev->sectors; 3354 sector_t sectors; 3355 3356 if (test_bit(Journal, &rdev->flags)) 3357 return -EBUSY; 3358 if (strict_blocks_to_sectors(buf, §ors) < 0) 3359 return -EINVAL; 3360 if (rdev->data_offset != rdev->new_data_offset) 3361 return -EINVAL; /* too confusing */ 3362 if (my_mddev->pers && rdev->raid_disk >= 0) { 3363 if (my_mddev->persistent) { 3364 sectors = super_types[my_mddev->major_version]. 3365 rdev_size_change(rdev, sectors); 3366 if (!sectors) 3367 return -EBUSY; 3368 } else if (!sectors) 3369 sectors = bdev_nr_sectors(rdev->bdev) - 3370 rdev->data_offset; 3371 if (!my_mddev->pers->resize) 3372 /* Cannot change size for RAID0 or Linear etc */ 3373 return -EINVAL; 3374 } 3375 if (sectors < my_mddev->dev_sectors) 3376 return -EINVAL; /* component must fit device */ 3377 3378 rdev->sectors = sectors; 3379 3380 /* 3381 * Check that all other rdevs with the same bdev do not overlap. This 3382 * check does not provide a hard guarantee, it just helps avoid 3383 * dangerous mistakes. 3384 */ 3385 if (sectors > oldsectors && my_mddev->external && 3386 md_rdev_overlaps(rdev)) { 3387 /* 3388 * Someone else could have slipped in a size change here, but 3389 * doing so is just silly. We put oldsectors back because we 3390 * know it is safe, and trust userspace not to race with itself. 3391 */ 3392 rdev->sectors = oldsectors; 3393 return -EBUSY; 3394 } 3395 return len; 3396 } 3397 3398 static struct rdev_sysfs_entry rdev_size = 3399 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 3400 3401 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) 3402 { 3403 unsigned long long recovery_start = rdev->recovery_offset; 3404 3405 if (test_bit(In_sync, &rdev->flags) || 3406 recovery_start == MaxSector) 3407 return sprintf(page, "none\n"); 3408 3409 return sprintf(page, "%llu\n", recovery_start); 3410 } 3411 3412 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) 3413 { 3414 unsigned long long recovery_start; 3415 3416 if (cmd_match(buf, "none")) 3417 recovery_start = MaxSector; 3418 else if (kstrtoull(buf, 10, &recovery_start)) 3419 return -EINVAL; 3420 3421 if (rdev->mddev->pers && 3422 rdev->raid_disk >= 0) 3423 return -EBUSY; 3424 3425 rdev->recovery_offset = recovery_start; 3426 if (recovery_start == MaxSector) 3427 set_bit(In_sync, &rdev->flags); 3428 else 3429 clear_bit(In_sync, &rdev->flags); 3430 return len; 3431 } 3432 3433 static struct rdev_sysfs_entry rdev_recovery_start = 3434 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 3435 3436 /* sysfs access to bad-blocks list. 3437 * We present two files. 3438 * 'bad-blocks' lists sector numbers and lengths of ranges that 3439 * are recorded as bad. The list is truncated to fit within 3440 * the one-page limit of sysfs. 3441 * Writing "sector length" to this file adds an acknowledged 3442 * bad block list. 3443 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet 3444 * been acknowledged. Writing to this file adds bad blocks 3445 * without acknowledging them. This is largely for testing. 3446 */ 3447 static ssize_t bb_show(struct md_rdev *rdev, char *page) 3448 { 3449 return badblocks_show(&rdev->badblocks, page, 0); 3450 } 3451 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) 3452 { 3453 int rv = badblocks_store(&rdev->badblocks, page, len, 0); 3454 /* Maybe that ack was all we needed */ 3455 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) 3456 wake_up(&rdev->blocked_wait); 3457 return rv; 3458 } 3459 static struct rdev_sysfs_entry rdev_bad_blocks = 3460 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); 3461 3462 static ssize_t ubb_show(struct md_rdev *rdev, char *page) 3463 { 3464 return badblocks_show(&rdev->badblocks, page, 1); 3465 } 3466 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) 3467 { 3468 return badblocks_store(&rdev->badblocks, page, len, 1); 3469 } 3470 static struct rdev_sysfs_entry rdev_unack_bad_blocks = 3471 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store); 3472 3473 static ssize_t 3474 ppl_sector_show(struct md_rdev *rdev, char *page) 3475 { 3476 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector); 3477 } 3478 3479 static ssize_t 3480 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len) 3481 { 3482 unsigned long long sector; 3483 3484 if (kstrtoull(buf, 10, §or) < 0) 3485 return -EINVAL; 3486 if (sector != (sector_t)sector) 3487 return -EINVAL; 3488 3489 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && 3490 rdev->raid_disk >= 0) 3491 return -EBUSY; 3492 3493 if (rdev->mddev->persistent) { 3494 if (rdev->mddev->major_version == 0) 3495 return -EINVAL; 3496 if ((sector > rdev->sb_start && 3497 sector - rdev->sb_start > S16_MAX) || 3498 (sector < rdev->sb_start && 3499 rdev->sb_start - sector > -S16_MIN)) 3500 return -EINVAL; 3501 rdev->ppl.offset = sector - rdev->sb_start; 3502 } else if (!rdev->mddev->external) { 3503 return -EBUSY; 3504 } 3505 rdev->ppl.sector = sector; 3506 return len; 3507 } 3508 3509 static struct rdev_sysfs_entry rdev_ppl_sector = 3510 __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store); 3511 3512 static ssize_t 3513 ppl_size_show(struct md_rdev *rdev, char *page) 3514 { 3515 return sprintf(page, "%u\n", rdev->ppl.size); 3516 } 3517 3518 static ssize_t 3519 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len) 3520 { 3521 unsigned int size; 3522 3523 if (kstrtouint(buf, 10, &size) < 0) 3524 return -EINVAL; 3525 3526 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && 3527 rdev->raid_disk >= 0) 3528 return -EBUSY; 3529 3530 if (rdev->mddev->persistent) { 3531 if (rdev->mddev->major_version == 0) 3532 return -EINVAL; 3533 if (size > U16_MAX) 3534 return -EINVAL; 3535 } else if (!rdev->mddev->external) { 3536 return -EBUSY; 3537 } 3538 rdev->ppl.size = size; 3539 return len; 3540 } 3541 3542 static struct rdev_sysfs_entry rdev_ppl_size = 3543 __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store); 3544 3545 static struct attribute *rdev_default_attrs[] = { 3546 &rdev_state.attr, 3547 &rdev_errors.attr, 3548 &rdev_slot.attr, 3549 &rdev_offset.attr, 3550 &rdev_new_offset.attr, 3551 &rdev_size.attr, 3552 &rdev_recovery_start.attr, 3553 &rdev_bad_blocks.attr, 3554 &rdev_unack_bad_blocks.attr, 3555 &rdev_ppl_sector.attr, 3556 &rdev_ppl_size.attr, 3557 NULL, 3558 }; 3559 ATTRIBUTE_GROUPS(rdev_default); 3560 static ssize_t 3561 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3562 { 3563 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3564 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3565 3566 if (!entry->show) 3567 return -EIO; 3568 if (!rdev->mddev) 3569 return -ENODEV; 3570 return entry->show(rdev, page); 3571 } 3572 3573 static ssize_t 3574 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 3575 const char *page, size_t length) 3576 { 3577 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3578 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3579 ssize_t rv; 3580 struct mddev *mddev = rdev->mddev; 3581 3582 if (!entry->store) 3583 return -EIO; 3584 if (!capable(CAP_SYS_ADMIN)) 3585 return -EACCES; 3586 rv = mddev ? mddev_lock(mddev) : -ENODEV; 3587 if (!rv) { 3588 if (rdev->mddev == NULL) 3589 rv = -ENODEV; 3590 else 3591 rv = entry->store(rdev, page, length); 3592 mddev_unlock(mddev); 3593 } 3594 return rv; 3595 } 3596 3597 static void rdev_free(struct kobject *ko) 3598 { 3599 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); 3600 kfree(rdev); 3601 } 3602 static const struct sysfs_ops rdev_sysfs_ops = { 3603 .show = rdev_attr_show, 3604 .store = rdev_attr_store, 3605 }; 3606 static struct kobj_type rdev_ktype = { 3607 .release = rdev_free, 3608 .sysfs_ops = &rdev_sysfs_ops, 3609 .default_groups = rdev_default_groups, 3610 }; 3611 3612 int md_rdev_init(struct md_rdev *rdev) 3613 { 3614 rdev->desc_nr = -1; 3615 rdev->saved_raid_disk = -1; 3616 rdev->raid_disk = -1; 3617 rdev->flags = 0; 3618 rdev->data_offset = 0; 3619 rdev->new_data_offset = 0; 3620 rdev->sb_events = 0; 3621 rdev->last_read_error = 0; 3622 rdev->sb_loaded = 0; 3623 rdev->bb_page = NULL; 3624 atomic_set(&rdev->nr_pending, 0); 3625 atomic_set(&rdev->read_errors, 0); 3626 atomic_set(&rdev->corrected_errors, 0); 3627 3628 INIT_LIST_HEAD(&rdev->same_set); 3629 init_waitqueue_head(&rdev->blocked_wait); 3630 3631 /* Add space to store bad block list. 3632 * This reserves the space even on arrays where it cannot 3633 * be used - I wonder if that matters 3634 */ 3635 return badblocks_init(&rdev->badblocks, 0); 3636 } 3637 EXPORT_SYMBOL_GPL(md_rdev_init); 3638 /* 3639 * Import a device. If 'super_format' >= 0, then sanity check the superblock 3640 * 3641 * mark the device faulty if: 3642 * 3643 * - the device is nonexistent (zero size) 3644 * - the device has no valid superblock 3645 * 3646 * a faulty rdev _never_ has rdev->sb set. 3647 */ 3648 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) 3649 { 3650 static struct md_rdev *claim_rdev; /* just for claiming the bdev */ 3651 struct md_rdev *rdev; 3652 sector_t size; 3653 int err; 3654 3655 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 3656 if (!rdev) 3657 return ERR_PTR(-ENOMEM); 3658 3659 err = md_rdev_init(rdev); 3660 if (err) 3661 goto out_free_rdev; 3662 err = alloc_disk_sb(rdev); 3663 if (err) 3664 goto out_clear_rdev; 3665 3666 rdev->bdev = blkdev_get_by_dev(newdev, 3667 FMODE_READ | FMODE_WRITE | FMODE_EXCL, 3668 super_format == -2 ? claim_rdev : rdev); 3669 if (IS_ERR(rdev->bdev)) { 3670 pr_warn("md: could not open device unknown-block(%u,%u).\n", 3671 MAJOR(newdev), MINOR(newdev)); 3672 err = PTR_ERR(rdev->bdev); 3673 goto out_clear_rdev; 3674 } 3675 3676 kobject_init(&rdev->kobj, &rdev_ktype); 3677 3678 size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS; 3679 if (!size) { 3680 pr_warn("md: %pg has zero or unknown size, marking faulty!\n", 3681 rdev->bdev); 3682 err = -EINVAL; 3683 goto out_blkdev_put; 3684 } 3685 3686 if (super_format >= 0) { 3687 err = super_types[super_format]. 3688 load_super(rdev, NULL, super_minor); 3689 if (err == -EINVAL) { 3690 pr_warn("md: %pg does not have a valid v%d.%d superblock, not importing!\n", 3691 rdev->bdev, 3692 super_format, super_minor); 3693 goto out_blkdev_put; 3694 } 3695 if (err < 0) { 3696 pr_warn("md: could not read %pg's sb, not importing!\n", 3697 rdev->bdev); 3698 goto out_blkdev_put; 3699 } 3700 } 3701 3702 return rdev; 3703 3704 out_blkdev_put: 3705 blkdev_put(rdev->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 3706 out_clear_rdev: 3707 md_rdev_clear(rdev); 3708 out_free_rdev: 3709 kfree(rdev); 3710 return ERR_PTR(err); 3711 } 3712 3713 /* 3714 * Check a full RAID array for plausibility 3715 */ 3716 3717 static int analyze_sbs(struct mddev *mddev) 3718 { 3719 int i; 3720 struct md_rdev *rdev, *freshest, *tmp; 3721 3722 freshest = NULL; 3723 rdev_for_each_safe(rdev, tmp, mddev) 3724 switch (super_types[mddev->major_version]. 3725 load_super(rdev, freshest, mddev->minor_version)) { 3726 case 1: 3727 freshest = rdev; 3728 break; 3729 case 0: 3730 break; 3731 default: 3732 pr_warn("md: fatal superblock inconsistency in %pg -- removing from array\n", 3733 rdev->bdev); 3734 md_kick_rdev_from_array(rdev); 3735 } 3736 3737 /* Cannot find a valid fresh disk */ 3738 if (!freshest) { 3739 pr_warn("md: cannot find a valid disk\n"); 3740 return -EINVAL; 3741 } 3742 3743 super_types[mddev->major_version]. 3744 validate_super(mddev, freshest); 3745 3746 i = 0; 3747 rdev_for_each_safe(rdev, tmp, mddev) { 3748 if (mddev->max_disks && 3749 (rdev->desc_nr >= mddev->max_disks || 3750 i > mddev->max_disks)) { 3751 pr_warn("md: %s: %pg: only %d devices permitted\n", 3752 mdname(mddev), rdev->bdev, 3753 mddev->max_disks); 3754 md_kick_rdev_from_array(rdev); 3755 continue; 3756 } 3757 if (rdev != freshest) { 3758 if (super_types[mddev->major_version]. 3759 validate_super(mddev, rdev)) { 3760 pr_warn("md: kicking non-fresh %pg from array!\n", 3761 rdev->bdev); 3762 md_kick_rdev_from_array(rdev); 3763 continue; 3764 } 3765 } 3766 if (mddev->level == LEVEL_MULTIPATH) { 3767 rdev->desc_nr = i++; 3768 rdev->raid_disk = rdev->desc_nr; 3769 set_bit(In_sync, &rdev->flags); 3770 } else if (rdev->raid_disk >= 3771 (mddev->raid_disks - min(0, mddev->delta_disks)) && 3772 !test_bit(Journal, &rdev->flags)) { 3773 rdev->raid_disk = -1; 3774 clear_bit(In_sync, &rdev->flags); 3775 } 3776 } 3777 3778 return 0; 3779 } 3780 3781 /* Read a fixed-point number. 3782 * Numbers in sysfs attributes should be in "standard" units where 3783 * possible, so time should be in seconds. 3784 * However we internally use a a much smaller unit such as 3785 * milliseconds or jiffies. 3786 * This function takes a decimal number with a possible fractional 3787 * component, and produces an integer which is the result of 3788 * multiplying that number by 10^'scale'. 3789 * all without any floating-point arithmetic. 3790 */ 3791 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 3792 { 3793 unsigned long result = 0; 3794 long decimals = -1; 3795 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 3796 if (*cp == '.') 3797 decimals = 0; 3798 else if (decimals < scale) { 3799 unsigned int value; 3800 value = *cp - '0'; 3801 result = result * 10 + value; 3802 if (decimals >= 0) 3803 decimals++; 3804 } 3805 cp++; 3806 } 3807 if (*cp == '\n') 3808 cp++; 3809 if (*cp) 3810 return -EINVAL; 3811 if (decimals < 0) 3812 decimals = 0; 3813 *res = result * int_pow(10, scale - decimals); 3814 return 0; 3815 } 3816 3817 static ssize_t 3818 safe_delay_show(struct mddev *mddev, char *page) 3819 { 3820 int msec = (mddev->safemode_delay*1000)/HZ; 3821 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 3822 } 3823 static ssize_t 3824 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) 3825 { 3826 unsigned long msec; 3827 3828 if (mddev_is_clustered(mddev)) { 3829 pr_warn("md: Safemode is disabled for clustered mode\n"); 3830 return -EINVAL; 3831 } 3832 3833 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) 3834 return -EINVAL; 3835 if (msec == 0) 3836 mddev->safemode_delay = 0; 3837 else { 3838 unsigned long old_delay = mddev->safemode_delay; 3839 unsigned long new_delay = (msec*HZ)/1000; 3840 3841 if (new_delay == 0) 3842 new_delay = 1; 3843 mddev->safemode_delay = new_delay; 3844 if (new_delay < old_delay || old_delay == 0) 3845 mod_timer(&mddev->safemode_timer, jiffies+1); 3846 } 3847 return len; 3848 } 3849 static struct md_sysfs_entry md_safe_delay = 3850 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 3851 3852 static ssize_t 3853 level_show(struct mddev *mddev, char *page) 3854 { 3855 struct md_personality *p; 3856 int ret; 3857 spin_lock(&mddev->lock); 3858 p = mddev->pers; 3859 if (p) 3860 ret = sprintf(page, "%s\n", p->name); 3861 else if (mddev->clevel[0]) 3862 ret = sprintf(page, "%s\n", mddev->clevel); 3863 else if (mddev->level != LEVEL_NONE) 3864 ret = sprintf(page, "%d\n", mddev->level); 3865 else 3866 ret = 0; 3867 spin_unlock(&mddev->lock); 3868 return ret; 3869 } 3870 3871 static ssize_t 3872 level_store(struct mddev *mddev, const char *buf, size_t len) 3873 { 3874 char clevel[16]; 3875 ssize_t rv; 3876 size_t slen = len; 3877 struct md_personality *pers, *oldpers; 3878 long level; 3879 void *priv, *oldpriv; 3880 struct md_rdev *rdev; 3881 3882 if (slen == 0 || slen >= sizeof(clevel)) 3883 return -EINVAL; 3884 3885 rv = mddev_lock(mddev); 3886 if (rv) 3887 return rv; 3888 3889 if (mddev->pers == NULL) { 3890 strncpy(mddev->clevel, buf, slen); 3891 if (mddev->clevel[slen-1] == '\n') 3892 slen--; 3893 mddev->clevel[slen] = 0; 3894 mddev->level = LEVEL_NONE; 3895 rv = len; 3896 goto out_unlock; 3897 } 3898 rv = -EROFS; 3899 if (!md_is_rdwr(mddev)) 3900 goto out_unlock; 3901 3902 /* request to change the personality. Need to ensure: 3903 * - array is not engaged in resync/recovery/reshape 3904 * - old personality can be suspended 3905 * - new personality will access other array. 3906 */ 3907 3908 rv = -EBUSY; 3909 if (mddev->sync_thread || 3910 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3911 mddev->reshape_position != MaxSector || 3912 mddev->sysfs_active) 3913 goto out_unlock; 3914 3915 rv = -EINVAL; 3916 if (!mddev->pers->quiesce) { 3917 pr_warn("md: %s: %s does not support online personality change\n", 3918 mdname(mddev), mddev->pers->name); 3919 goto out_unlock; 3920 } 3921 3922 /* Now find the new personality */ 3923 strncpy(clevel, buf, slen); 3924 if (clevel[slen-1] == '\n') 3925 slen--; 3926 clevel[slen] = 0; 3927 if (kstrtol(clevel, 10, &level)) 3928 level = LEVEL_NONE; 3929 3930 if (request_module("md-%s", clevel) != 0) 3931 request_module("md-level-%s", clevel); 3932 spin_lock(&pers_lock); 3933 pers = find_pers(level, clevel); 3934 if (!pers || !try_module_get(pers->owner)) { 3935 spin_unlock(&pers_lock); 3936 pr_warn("md: personality %s not loaded\n", clevel); 3937 rv = -EINVAL; 3938 goto out_unlock; 3939 } 3940 spin_unlock(&pers_lock); 3941 3942 if (pers == mddev->pers) { 3943 /* Nothing to do! */ 3944 module_put(pers->owner); 3945 rv = len; 3946 goto out_unlock; 3947 } 3948 if (!pers->takeover) { 3949 module_put(pers->owner); 3950 pr_warn("md: %s: %s does not support personality takeover\n", 3951 mdname(mddev), clevel); 3952 rv = -EINVAL; 3953 goto out_unlock; 3954 } 3955 3956 rdev_for_each(rdev, mddev) 3957 rdev->new_raid_disk = rdev->raid_disk; 3958 3959 /* ->takeover must set new_* and/or delta_disks 3960 * if it succeeds, and may set them when it fails. 3961 */ 3962 priv = pers->takeover(mddev); 3963 if (IS_ERR(priv)) { 3964 mddev->new_level = mddev->level; 3965 mddev->new_layout = mddev->layout; 3966 mddev->new_chunk_sectors = mddev->chunk_sectors; 3967 mddev->raid_disks -= mddev->delta_disks; 3968 mddev->delta_disks = 0; 3969 mddev->reshape_backwards = 0; 3970 module_put(pers->owner); 3971 pr_warn("md: %s: %s would not accept array\n", 3972 mdname(mddev), clevel); 3973 rv = PTR_ERR(priv); 3974 goto out_unlock; 3975 } 3976 3977 /* Looks like we have a winner */ 3978 mddev_suspend(mddev); 3979 mddev_detach(mddev); 3980 3981 spin_lock(&mddev->lock); 3982 oldpers = mddev->pers; 3983 oldpriv = mddev->private; 3984 mddev->pers = pers; 3985 mddev->private = priv; 3986 strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 3987 mddev->level = mddev->new_level; 3988 mddev->layout = mddev->new_layout; 3989 mddev->chunk_sectors = mddev->new_chunk_sectors; 3990 mddev->delta_disks = 0; 3991 mddev->reshape_backwards = 0; 3992 mddev->degraded = 0; 3993 spin_unlock(&mddev->lock); 3994 3995 if (oldpers->sync_request == NULL && 3996 mddev->external) { 3997 /* We are converting from a no-redundancy array 3998 * to a redundancy array and metadata is managed 3999 * externally so we need to be sure that writes 4000 * won't block due to a need to transition 4001 * clean->dirty 4002 * until external management is started. 4003 */ 4004 mddev->in_sync = 0; 4005 mddev->safemode_delay = 0; 4006 mddev->safemode = 0; 4007 } 4008 4009 oldpers->free(mddev, oldpriv); 4010 4011 if (oldpers->sync_request == NULL && 4012 pers->sync_request != NULL) { 4013 /* need to add the md_redundancy_group */ 4014 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 4015 pr_warn("md: cannot register extra attributes for %s\n", 4016 mdname(mddev)); 4017 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); 4018 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); 4019 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); 4020 } 4021 if (oldpers->sync_request != NULL && 4022 pers->sync_request == NULL) { 4023 /* need to remove the md_redundancy_group */ 4024 if (mddev->to_remove == NULL) 4025 mddev->to_remove = &md_redundancy_group; 4026 } 4027 4028 module_put(oldpers->owner); 4029 4030 rdev_for_each(rdev, mddev) { 4031 if (rdev->raid_disk < 0) 4032 continue; 4033 if (rdev->new_raid_disk >= mddev->raid_disks) 4034 rdev->new_raid_disk = -1; 4035 if (rdev->new_raid_disk == rdev->raid_disk) 4036 continue; 4037 sysfs_unlink_rdev(mddev, rdev); 4038 } 4039 rdev_for_each(rdev, mddev) { 4040 if (rdev->raid_disk < 0) 4041 continue; 4042 if (rdev->new_raid_disk == rdev->raid_disk) 4043 continue; 4044 rdev->raid_disk = rdev->new_raid_disk; 4045 if (rdev->raid_disk < 0) 4046 clear_bit(In_sync, &rdev->flags); 4047 else { 4048 if (sysfs_link_rdev(mddev, rdev)) 4049 pr_warn("md: cannot register rd%d for %s after level change\n", 4050 rdev->raid_disk, mdname(mddev)); 4051 } 4052 } 4053 4054 if (pers->sync_request == NULL) { 4055 /* this is now an array without redundancy, so 4056 * it must always be in_sync 4057 */ 4058 mddev->in_sync = 1; 4059 del_timer_sync(&mddev->safemode_timer); 4060 } 4061 blk_set_stacking_limits(&mddev->queue->limits); 4062 pers->run(mddev); 4063 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 4064 mddev_resume(mddev); 4065 if (!mddev->thread) 4066 md_update_sb(mddev, 1); 4067 sysfs_notify_dirent_safe(mddev->sysfs_level); 4068 md_new_event(); 4069 rv = len; 4070 out_unlock: 4071 mddev_unlock(mddev); 4072 return rv; 4073 } 4074 4075 static struct md_sysfs_entry md_level = 4076 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 4077 4078 static ssize_t 4079 layout_show(struct mddev *mddev, char *page) 4080 { 4081 /* just a number, not meaningful for all levels */ 4082 if (mddev->reshape_position != MaxSector && 4083 mddev->layout != mddev->new_layout) 4084 return sprintf(page, "%d (%d)\n", 4085 mddev->new_layout, mddev->layout); 4086 return sprintf(page, "%d\n", mddev->layout); 4087 } 4088 4089 static ssize_t 4090 layout_store(struct mddev *mddev, const char *buf, size_t len) 4091 { 4092 unsigned int n; 4093 int err; 4094 4095 err = kstrtouint(buf, 10, &n); 4096 if (err < 0) 4097 return err; 4098 err = mddev_lock(mddev); 4099 if (err) 4100 return err; 4101 4102 if (mddev->pers) { 4103 if (mddev->pers->check_reshape == NULL) 4104 err = -EBUSY; 4105 else if (!md_is_rdwr(mddev)) 4106 err = -EROFS; 4107 else { 4108 mddev->new_layout = n; 4109 err = mddev->pers->check_reshape(mddev); 4110 if (err) 4111 mddev->new_layout = mddev->layout; 4112 } 4113 } else { 4114 mddev->new_layout = n; 4115 if (mddev->reshape_position == MaxSector) 4116 mddev->layout = n; 4117 } 4118 mddev_unlock(mddev); 4119 return err ?: len; 4120 } 4121 static struct md_sysfs_entry md_layout = 4122 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 4123 4124 static ssize_t 4125 raid_disks_show(struct mddev *mddev, char *page) 4126 { 4127 if (mddev->raid_disks == 0) 4128 return 0; 4129 if (mddev->reshape_position != MaxSector && 4130 mddev->delta_disks != 0) 4131 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 4132 mddev->raid_disks - mddev->delta_disks); 4133 return sprintf(page, "%d\n", mddev->raid_disks); 4134 } 4135 4136 static int update_raid_disks(struct mddev *mddev, int raid_disks); 4137 4138 static ssize_t 4139 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) 4140 { 4141 unsigned int n; 4142 int err; 4143 4144 err = kstrtouint(buf, 10, &n); 4145 if (err < 0) 4146 return err; 4147 4148 err = mddev_lock(mddev); 4149 if (err) 4150 return err; 4151 if (mddev->pers) 4152 err = update_raid_disks(mddev, n); 4153 else if (mddev->reshape_position != MaxSector) { 4154 struct md_rdev *rdev; 4155 int olddisks = mddev->raid_disks - mddev->delta_disks; 4156 4157 err = -EINVAL; 4158 rdev_for_each(rdev, mddev) { 4159 if (olddisks < n && 4160 rdev->data_offset < rdev->new_data_offset) 4161 goto out_unlock; 4162 if (olddisks > n && 4163 rdev->data_offset > rdev->new_data_offset) 4164 goto out_unlock; 4165 } 4166 err = 0; 4167 mddev->delta_disks = n - olddisks; 4168 mddev->raid_disks = n; 4169 mddev->reshape_backwards = (mddev->delta_disks < 0); 4170 } else 4171 mddev->raid_disks = n; 4172 out_unlock: 4173 mddev_unlock(mddev); 4174 return err ? err : len; 4175 } 4176 static struct md_sysfs_entry md_raid_disks = 4177 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 4178 4179 static ssize_t 4180 uuid_show(struct mddev *mddev, char *page) 4181 { 4182 return sprintf(page, "%pU\n", mddev->uuid); 4183 } 4184 static struct md_sysfs_entry md_uuid = 4185 __ATTR(uuid, S_IRUGO, uuid_show, NULL); 4186 4187 static ssize_t 4188 chunk_size_show(struct mddev *mddev, char *page) 4189 { 4190 if (mddev->reshape_position != MaxSector && 4191 mddev->chunk_sectors != mddev->new_chunk_sectors) 4192 return sprintf(page, "%d (%d)\n", 4193 mddev->new_chunk_sectors << 9, 4194 mddev->chunk_sectors << 9); 4195 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 4196 } 4197 4198 static ssize_t 4199 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) 4200 { 4201 unsigned long n; 4202 int err; 4203 4204 err = kstrtoul(buf, 10, &n); 4205 if (err < 0) 4206 return err; 4207 4208 err = mddev_lock(mddev); 4209 if (err) 4210 return err; 4211 if (mddev->pers) { 4212 if (mddev->pers->check_reshape == NULL) 4213 err = -EBUSY; 4214 else if (!md_is_rdwr(mddev)) 4215 err = -EROFS; 4216 else { 4217 mddev->new_chunk_sectors = n >> 9; 4218 err = mddev->pers->check_reshape(mddev); 4219 if (err) 4220 mddev->new_chunk_sectors = mddev->chunk_sectors; 4221 } 4222 } else { 4223 mddev->new_chunk_sectors = n >> 9; 4224 if (mddev->reshape_position == MaxSector) 4225 mddev->chunk_sectors = n >> 9; 4226 } 4227 mddev_unlock(mddev); 4228 return err ?: len; 4229 } 4230 static struct md_sysfs_entry md_chunk_size = 4231 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 4232 4233 static ssize_t 4234 resync_start_show(struct mddev *mddev, char *page) 4235 { 4236 if (mddev->recovery_cp == MaxSector) 4237 return sprintf(page, "none\n"); 4238 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 4239 } 4240 4241 static ssize_t 4242 resync_start_store(struct mddev *mddev, const char *buf, size_t len) 4243 { 4244 unsigned long long n; 4245 int err; 4246 4247 if (cmd_match(buf, "none")) 4248 n = MaxSector; 4249 else { 4250 err = kstrtoull(buf, 10, &n); 4251 if (err < 0) 4252 return err; 4253 if (n != (sector_t)n) 4254 return -EINVAL; 4255 } 4256 4257 err = mddev_lock(mddev); 4258 if (err) 4259 return err; 4260 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 4261 err = -EBUSY; 4262 4263 if (!err) { 4264 mddev->recovery_cp = n; 4265 if (mddev->pers) 4266 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 4267 } 4268 mddev_unlock(mddev); 4269 return err ?: len; 4270 } 4271 static struct md_sysfs_entry md_resync_start = 4272 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR, 4273 resync_start_show, resync_start_store); 4274 4275 /* 4276 * The array state can be: 4277 * 4278 * clear 4279 * No devices, no size, no level 4280 * Equivalent to STOP_ARRAY ioctl 4281 * inactive 4282 * May have some settings, but array is not active 4283 * all IO results in error 4284 * When written, doesn't tear down array, but just stops it 4285 * suspended (not supported yet) 4286 * All IO requests will block. The array can be reconfigured. 4287 * Writing this, if accepted, will block until array is quiescent 4288 * readonly 4289 * no resync can happen. no superblocks get written. 4290 * write requests fail 4291 * read-auto 4292 * like readonly, but behaves like 'clean' on a write request. 4293 * 4294 * clean - no pending writes, but otherwise active. 4295 * When written to inactive array, starts without resync 4296 * If a write request arrives then 4297 * if metadata is known, mark 'dirty' and switch to 'active'. 4298 * if not known, block and switch to write-pending 4299 * If written to an active array that has pending writes, then fails. 4300 * active 4301 * fully active: IO and resync can be happening. 4302 * When written to inactive array, starts with resync 4303 * 4304 * write-pending 4305 * clean, but writes are blocked waiting for 'active' to be written. 4306 * 4307 * active-idle 4308 * like active, but no writes have been seen for a while (100msec). 4309 * 4310 * broken 4311 * Array is failed. It's useful because mounted-arrays aren't stopped 4312 * when array is failed, so this state will at least alert the user that 4313 * something is wrong. 4314 */ 4315 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 4316 write_pending, active_idle, broken, bad_word}; 4317 static char *array_states[] = { 4318 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 4319 "write-pending", "active-idle", "broken", NULL }; 4320 4321 static int match_word(const char *word, char **list) 4322 { 4323 int n; 4324 for (n=0; list[n]; n++) 4325 if (cmd_match(word, list[n])) 4326 break; 4327 return n; 4328 } 4329 4330 static ssize_t 4331 array_state_show(struct mddev *mddev, char *page) 4332 { 4333 enum array_state st = inactive; 4334 4335 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) { 4336 switch(mddev->ro) { 4337 case MD_RDONLY: 4338 st = readonly; 4339 break; 4340 case MD_AUTO_READ: 4341 st = read_auto; 4342 break; 4343 case MD_RDWR: 4344 spin_lock(&mddev->lock); 4345 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 4346 st = write_pending; 4347 else if (mddev->in_sync) 4348 st = clean; 4349 else if (mddev->safemode) 4350 st = active_idle; 4351 else 4352 st = active; 4353 spin_unlock(&mddev->lock); 4354 } 4355 4356 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean) 4357 st = broken; 4358 } else { 4359 if (list_empty(&mddev->disks) && 4360 mddev->raid_disks == 0 && 4361 mddev->dev_sectors == 0) 4362 st = clear; 4363 else 4364 st = inactive; 4365 } 4366 return sprintf(page, "%s\n", array_states[st]); 4367 } 4368 4369 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev); 4370 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev); 4371 static int restart_array(struct mddev *mddev); 4372 4373 static ssize_t 4374 array_state_store(struct mddev *mddev, const char *buf, size_t len) 4375 { 4376 int err = 0; 4377 enum array_state st = match_word(buf, array_states); 4378 4379 if (mddev->pers && (st == active || st == clean) && 4380 mddev->ro != MD_RDONLY) { 4381 /* don't take reconfig_mutex when toggling between 4382 * clean and active 4383 */ 4384 spin_lock(&mddev->lock); 4385 if (st == active) { 4386 restart_array(mddev); 4387 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 4388 md_wakeup_thread(mddev->thread); 4389 wake_up(&mddev->sb_wait); 4390 } else /* st == clean */ { 4391 restart_array(mddev); 4392 if (!set_in_sync(mddev)) 4393 err = -EBUSY; 4394 } 4395 if (!err) 4396 sysfs_notify_dirent_safe(mddev->sysfs_state); 4397 spin_unlock(&mddev->lock); 4398 return err ?: len; 4399 } 4400 err = mddev_lock(mddev); 4401 if (err) 4402 return err; 4403 err = -EINVAL; 4404 switch(st) { 4405 case bad_word: 4406 break; 4407 case clear: 4408 /* stopping an active array */ 4409 err = do_md_stop(mddev, 0, NULL); 4410 break; 4411 case inactive: 4412 /* stopping an active array */ 4413 if (mddev->pers) 4414 err = do_md_stop(mddev, 2, NULL); 4415 else 4416 err = 0; /* already inactive */ 4417 break; 4418 case suspended: 4419 break; /* not supported yet */ 4420 case readonly: 4421 if (mddev->pers) 4422 err = md_set_readonly(mddev, NULL); 4423 else { 4424 mddev->ro = MD_RDONLY; 4425 set_disk_ro(mddev->gendisk, 1); 4426 err = do_md_run(mddev); 4427 } 4428 break; 4429 case read_auto: 4430 if (mddev->pers) { 4431 if (md_is_rdwr(mddev)) 4432 err = md_set_readonly(mddev, NULL); 4433 else if (mddev->ro == MD_RDONLY) 4434 err = restart_array(mddev); 4435 if (err == 0) { 4436 mddev->ro = MD_AUTO_READ; 4437 set_disk_ro(mddev->gendisk, 0); 4438 } 4439 } else { 4440 mddev->ro = MD_AUTO_READ; 4441 err = do_md_run(mddev); 4442 } 4443 break; 4444 case clean: 4445 if (mddev->pers) { 4446 err = restart_array(mddev); 4447 if (err) 4448 break; 4449 spin_lock(&mddev->lock); 4450 if (!set_in_sync(mddev)) 4451 err = -EBUSY; 4452 spin_unlock(&mddev->lock); 4453 } else 4454 err = -EINVAL; 4455 break; 4456 case active: 4457 if (mddev->pers) { 4458 err = restart_array(mddev); 4459 if (err) 4460 break; 4461 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 4462 wake_up(&mddev->sb_wait); 4463 err = 0; 4464 } else { 4465 mddev->ro = MD_RDWR; 4466 set_disk_ro(mddev->gendisk, 0); 4467 err = do_md_run(mddev); 4468 } 4469 break; 4470 case write_pending: 4471 case active_idle: 4472 case broken: 4473 /* these cannot be set */ 4474 break; 4475 } 4476 4477 if (!err) { 4478 if (mddev->hold_active == UNTIL_IOCTL) 4479 mddev->hold_active = 0; 4480 sysfs_notify_dirent_safe(mddev->sysfs_state); 4481 } 4482 mddev_unlock(mddev); 4483 return err ?: len; 4484 } 4485 static struct md_sysfs_entry md_array_state = 4486 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 4487 4488 static ssize_t 4489 max_corrected_read_errors_show(struct mddev *mddev, char *page) { 4490 return sprintf(page, "%d\n", 4491 atomic_read(&mddev->max_corr_read_errors)); 4492 } 4493 4494 static ssize_t 4495 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) 4496 { 4497 unsigned int n; 4498 int rv; 4499 4500 rv = kstrtouint(buf, 10, &n); 4501 if (rv < 0) 4502 return rv; 4503 atomic_set(&mddev->max_corr_read_errors, n); 4504 return len; 4505 } 4506 4507 static struct md_sysfs_entry max_corr_read_errors = 4508 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 4509 max_corrected_read_errors_store); 4510 4511 static ssize_t 4512 null_show(struct mddev *mddev, char *page) 4513 { 4514 return -EINVAL; 4515 } 4516 4517 /* need to ensure rdev_delayed_delete() has completed */ 4518 static void flush_rdev_wq(struct mddev *mddev) 4519 { 4520 struct md_rdev *rdev; 4521 4522 rcu_read_lock(); 4523 rdev_for_each_rcu(rdev, mddev) 4524 if (work_pending(&rdev->del_work)) { 4525 flush_workqueue(md_rdev_misc_wq); 4526 break; 4527 } 4528 rcu_read_unlock(); 4529 } 4530 4531 static ssize_t 4532 new_dev_store(struct mddev *mddev, const char *buf, size_t len) 4533 { 4534 /* buf must be %d:%d\n? giving major and minor numbers */ 4535 /* The new device is added to the array. 4536 * If the array has a persistent superblock, we read the 4537 * superblock to initialise info and check validity. 4538 * Otherwise, only checking done is that in bind_rdev_to_array, 4539 * which mainly checks size. 4540 */ 4541 char *e; 4542 int major = simple_strtoul(buf, &e, 10); 4543 int minor; 4544 dev_t dev; 4545 struct md_rdev *rdev; 4546 int err; 4547 4548 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 4549 return -EINVAL; 4550 minor = simple_strtoul(e+1, &e, 10); 4551 if (*e && *e != '\n') 4552 return -EINVAL; 4553 dev = MKDEV(major, minor); 4554 if (major != MAJOR(dev) || 4555 minor != MINOR(dev)) 4556 return -EOVERFLOW; 4557 4558 flush_rdev_wq(mddev); 4559 err = mddev_lock(mddev); 4560 if (err) 4561 return err; 4562 if (mddev->persistent) { 4563 rdev = md_import_device(dev, mddev->major_version, 4564 mddev->minor_version); 4565 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 4566 struct md_rdev *rdev0 4567 = list_entry(mddev->disks.next, 4568 struct md_rdev, same_set); 4569 err = super_types[mddev->major_version] 4570 .load_super(rdev, rdev0, mddev->minor_version); 4571 if (err < 0) 4572 goto out; 4573 } 4574 } else if (mddev->external) 4575 rdev = md_import_device(dev, -2, -1); 4576 else 4577 rdev = md_import_device(dev, -1, -1); 4578 4579 if (IS_ERR(rdev)) { 4580 mddev_unlock(mddev); 4581 return PTR_ERR(rdev); 4582 } 4583 err = bind_rdev_to_array(rdev, mddev); 4584 out: 4585 if (err) 4586 export_rdev(rdev); 4587 mddev_unlock(mddev); 4588 if (!err) 4589 md_new_event(); 4590 return err ? err : len; 4591 } 4592 4593 static struct md_sysfs_entry md_new_device = 4594 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 4595 4596 static ssize_t 4597 bitmap_store(struct mddev *mddev, const char *buf, size_t len) 4598 { 4599 char *end; 4600 unsigned long chunk, end_chunk; 4601 int err; 4602 4603 err = mddev_lock(mddev); 4604 if (err) 4605 return err; 4606 if (!mddev->bitmap) 4607 goto out; 4608 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 4609 while (*buf) { 4610 chunk = end_chunk = simple_strtoul(buf, &end, 0); 4611 if (buf == end) break; 4612 if (*end == '-') { /* range */ 4613 buf = end + 1; 4614 end_chunk = simple_strtoul(buf, &end, 0); 4615 if (buf == end) break; 4616 } 4617 if (*end && !isspace(*end)) break; 4618 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 4619 buf = skip_spaces(end); 4620 } 4621 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 4622 out: 4623 mddev_unlock(mddev); 4624 return len; 4625 } 4626 4627 static struct md_sysfs_entry md_bitmap = 4628 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 4629 4630 static ssize_t 4631 size_show(struct mddev *mddev, char *page) 4632 { 4633 return sprintf(page, "%llu\n", 4634 (unsigned long long)mddev->dev_sectors / 2); 4635 } 4636 4637 static int update_size(struct mddev *mddev, sector_t num_sectors); 4638 4639 static ssize_t 4640 size_store(struct mddev *mddev, const char *buf, size_t len) 4641 { 4642 /* If array is inactive, we can reduce the component size, but 4643 * not increase it (except from 0). 4644 * If array is active, we can try an on-line resize 4645 */ 4646 sector_t sectors; 4647 int err = strict_blocks_to_sectors(buf, §ors); 4648 4649 if (err < 0) 4650 return err; 4651 err = mddev_lock(mddev); 4652 if (err) 4653 return err; 4654 if (mddev->pers) { 4655 err = update_size(mddev, sectors); 4656 if (err == 0) 4657 md_update_sb(mddev, 1); 4658 } else { 4659 if (mddev->dev_sectors == 0 || 4660 mddev->dev_sectors > sectors) 4661 mddev->dev_sectors = sectors; 4662 else 4663 err = -ENOSPC; 4664 } 4665 mddev_unlock(mddev); 4666 return err ? err : len; 4667 } 4668 4669 static struct md_sysfs_entry md_size = 4670 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 4671 4672 /* Metadata version. 4673 * This is one of 4674 * 'none' for arrays with no metadata (good luck...) 4675 * 'external' for arrays with externally managed metadata, 4676 * or N.M for internally known formats 4677 */ 4678 static ssize_t 4679 metadata_show(struct mddev *mddev, char *page) 4680 { 4681 if (mddev->persistent) 4682 return sprintf(page, "%d.%d\n", 4683 mddev->major_version, mddev->minor_version); 4684 else if (mddev->external) 4685 return sprintf(page, "external:%s\n", mddev->metadata_type); 4686 else 4687 return sprintf(page, "none\n"); 4688 } 4689 4690 static ssize_t 4691 metadata_store(struct mddev *mddev, const char *buf, size_t len) 4692 { 4693 int major, minor; 4694 char *e; 4695 int err; 4696 /* Changing the details of 'external' metadata is 4697 * always permitted. Otherwise there must be 4698 * no devices attached to the array. 4699 */ 4700 4701 err = mddev_lock(mddev); 4702 if (err) 4703 return err; 4704 err = -EBUSY; 4705 if (mddev->external && strncmp(buf, "external:", 9) == 0) 4706 ; 4707 else if (!list_empty(&mddev->disks)) 4708 goto out_unlock; 4709 4710 err = 0; 4711 if (cmd_match(buf, "none")) { 4712 mddev->persistent = 0; 4713 mddev->external = 0; 4714 mddev->major_version = 0; 4715 mddev->minor_version = 90; 4716 goto out_unlock; 4717 } 4718 if (strncmp(buf, "external:", 9) == 0) { 4719 size_t namelen = len-9; 4720 if (namelen >= sizeof(mddev->metadata_type)) 4721 namelen = sizeof(mddev->metadata_type)-1; 4722 strncpy(mddev->metadata_type, buf+9, namelen); 4723 mddev->metadata_type[namelen] = 0; 4724 if (namelen && mddev->metadata_type[namelen-1] == '\n') 4725 mddev->metadata_type[--namelen] = 0; 4726 mddev->persistent = 0; 4727 mddev->external = 1; 4728 mddev->major_version = 0; 4729 mddev->minor_version = 90; 4730 goto out_unlock; 4731 } 4732 major = simple_strtoul(buf, &e, 10); 4733 err = -EINVAL; 4734 if (e==buf || *e != '.') 4735 goto out_unlock; 4736 buf = e+1; 4737 minor = simple_strtoul(buf, &e, 10); 4738 if (e==buf || (*e && *e != '\n') ) 4739 goto out_unlock; 4740 err = -ENOENT; 4741 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 4742 goto out_unlock; 4743 mddev->major_version = major; 4744 mddev->minor_version = minor; 4745 mddev->persistent = 1; 4746 mddev->external = 0; 4747 err = 0; 4748 out_unlock: 4749 mddev_unlock(mddev); 4750 return err ?: len; 4751 } 4752 4753 static struct md_sysfs_entry md_metadata = 4754 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4755 4756 static ssize_t 4757 action_show(struct mddev *mddev, char *page) 4758 { 4759 char *type = "idle"; 4760 unsigned long recovery = mddev->recovery; 4761 if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 4762 type = "frozen"; 4763 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) || 4764 (md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery))) { 4765 if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 4766 type = "reshape"; 4767 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 4768 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) 4769 type = "resync"; 4770 else if (test_bit(MD_RECOVERY_CHECK, &recovery)) 4771 type = "check"; 4772 else 4773 type = "repair"; 4774 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 4775 type = "recover"; 4776 else if (mddev->reshape_position != MaxSector) 4777 type = "reshape"; 4778 } 4779 return sprintf(page, "%s\n", type); 4780 } 4781 4782 static ssize_t 4783 action_store(struct mddev *mddev, const char *page, size_t len) 4784 { 4785 if (!mddev->pers || !mddev->pers->sync_request) 4786 return -EINVAL; 4787 4788 4789 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4790 if (cmd_match(page, "frozen")) 4791 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4792 else 4793 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4794 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 4795 mddev_lock(mddev) == 0) { 4796 if (work_pending(&mddev->del_work)) 4797 flush_workqueue(md_misc_wq); 4798 if (mddev->sync_thread) { 4799 sector_t save_rp = mddev->reshape_position; 4800 4801 mddev_unlock(mddev); 4802 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4803 md_unregister_thread(&mddev->sync_thread); 4804 mddev_lock_nointr(mddev); 4805 /* 4806 * set RECOVERY_INTR again and restore reshape 4807 * position in case others changed them after 4808 * got lock, eg, reshape_position_store and 4809 * md_check_recovery. 4810 */ 4811 mddev->reshape_position = save_rp; 4812 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4813 md_reap_sync_thread(mddev); 4814 } 4815 mddev_unlock(mddev); 4816 } 4817 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4818 return -EBUSY; 4819 else if (cmd_match(page, "resync")) 4820 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4821 else if (cmd_match(page, "recover")) { 4822 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4823 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4824 } else if (cmd_match(page, "reshape")) { 4825 int err; 4826 if (mddev->pers->start_reshape == NULL) 4827 return -EINVAL; 4828 err = mddev_lock(mddev); 4829 if (!err) { 4830 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4831 err = -EBUSY; 4832 else { 4833 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4834 err = mddev->pers->start_reshape(mddev); 4835 } 4836 mddev_unlock(mddev); 4837 } 4838 if (err) 4839 return err; 4840 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 4841 } else { 4842 if (cmd_match(page, "check")) 4843 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4844 else if (!cmd_match(page, "repair")) 4845 return -EINVAL; 4846 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4847 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4848 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4849 } 4850 if (mddev->ro == MD_AUTO_READ) { 4851 /* A write to sync_action is enough to justify 4852 * canceling read-auto mode 4853 */ 4854 mddev->ro = MD_RDWR; 4855 md_wakeup_thread(mddev->sync_thread); 4856 } 4857 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4858 md_wakeup_thread(mddev->thread); 4859 sysfs_notify_dirent_safe(mddev->sysfs_action); 4860 return len; 4861 } 4862 4863 static struct md_sysfs_entry md_scan_mode = 4864 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 4865 4866 static ssize_t 4867 last_sync_action_show(struct mddev *mddev, char *page) 4868 { 4869 return sprintf(page, "%s\n", mddev->last_sync_action); 4870 } 4871 4872 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action); 4873 4874 static ssize_t 4875 mismatch_cnt_show(struct mddev *mddev, char *page) 4876 { 4877 return sprintf(page, "%llu\n", 4878 (unsigned long long) 4879 atomic64_read(&mddev->resync_mismatches)); 4880 } 4881 4882 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 4883 4884 static ssize_t 4885 sync_min_show(struct mddev *mddev, char *page) 4886 { 4887 return sprintf(page, "%d (%s)\n", speed_min(mddev), 4888 mddev->sync_speed_min ? "local": "system"); 4889 } 4890 4891 static ssize_t 4892 sync_min_store(struct mddev *mddev, const char *buf, size_t len) 4893 { 4894 unsigned int min; 4895 int rv; 4896 4897 if (strncmp(buf, "system", 6)==0) { 4898 min = 0; 4899 } else { 4900 rv = kstrtouint(buf, 10, &min); 4901 if (rv < 0) 4902 return rv; 4903 if (min == 0) 4904 return -EINVAL; 4905 } 4906 mddev->sync_speed_min = min; 4907 return len; 4908 } 4909 4910 static struct md_sysfs_entry md_sync_min = 4911 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 4912 4913 static ssize_t 4914 sync_max_show(struct mddev *mddev, char *page) 4915 { 4916 return sprintf(page, "%d (%s)\n", speed_max(mddev), 4917 mddev->sync_speed_max ? "local": "system"); 4918 } 4919 4920 static ssize_t 4921 sync_max_store(struct mddev *mddev, const char *buf, size_t len) 4922 { 4923 unsigned int max; 4924 int rv; 4925 4926 if (strncmp(buf, "system", 6)==0) { 4927 max = 0; 4928 } else { 4929 rv = kstrtouint(buf, 10, &max); 4930 if (rv < 0) 4931 return rv; 4932 if (max == 0) 4933 return -EINVAL; 4934 } 4935 mddev->sync_speed_max = max; 4936 return len; 4937 } 4938 4939 static struct md_sysfs_entry md_sync_max = 4940 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 4941 4942 static ssize_t 4943 degraded_show(struct mddev *mddev, char *page) 4944 { 4945 return sprintf(page, "%d\n", mddev->degraded); 4946 } 4947 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 4948 4949 static ssize_t 4950 sync_force_parallel_show(struct mddev *mddev, char *page) 4951 { 4952 return sprintf(page, "%d\n", mddev->parallel_resync); 4953 } 4954 4955 static ssize_t 4956 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) 4957 { 4958 long n; 4959 4960 if (kstrtol(buf, 10, &n)) 4961 return -EINVAL; 4962 4963 if (n != 0 && n != 1) 4964 return -EINVAL; 4965 4966 mddev->parallel_resync = n; 4967 4968 if (mddev->sync_thread) 4969 wake_up(&resync_wait); 4970 4971 return len; 4972 } 4973 4974 /* force parallel resync, even with shared block devices */ 4975 static struct md_sysfs_entry md_sync_force_parallel = 4976 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 4977 sync_force_parallel_show, sync_force_parallel_store); 4978 4979 static ssize_t 4980 sync_speed_show(struct mddev *mddev, char *page) 4981 { 4982 unsigned long resync, dt, db; 4983 if (mddev->curr_resync == MD_RESYNC_NONE) 4984 return sprintf(page, "none\n"); 4985 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 4986 dt = (jiffies - mddev->resync_mark) / HZ; 4987 if (!dt) dt++; 4988 db = resync - mddev->resync_mark_cnt; 4989 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 4990 } 4991 4992 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 4993 4994 static ssize_t 4995 sync_completed_show(struct mddev *mddev, char *page) 4996 { 4997 unsigned long long max_sectors, resync; 4998 4999 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5000 return sprintf(page, "none\n"); 5001 5002 if (mddev->curr_resync == MD_RESYNC_YIELDED || 5003 mddev->curr_resync == MD_RESYNC_DELAYED) 5004 return sprintf(page, "delayed\n"); 5005 5006 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 5007 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 5008 max_sectors = mddev->resync_max_sectors; 5009 else 5010 max_sectors = mddev->dev_sectors; 5011 5012 resync = mddev->curr_resync_completed; 5013 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 5014 } 5015 5016 static struct md_sysfs_entry md_sync_completed = 5017 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL); 5018 5019 static ssize_t 5020 min_sync_show(struct mddev *mddev, char *page) 5021 { 5022 return sprintf(page, "%llu\n", 5023 (unsigned long long)mddev->resync_min); 5024 } 5025 static ssize_t 5026 min_sync_store(struct mddev *mddev, const char *buf, size_t len) 5027 { 5028 unsigned long long min; 5029 int err; 5030 5031 if (kstrtoull(buf, 10, &min)) 5032 return -EINVAL; 5033 5034 spin_lock(&mddev->lock); 5035 err = -EINVAL; 5036 if (min > mddev->resync_max) 5037 goto out_unlock; 5038 5039 err = -EBUSY; 5040 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5041 goto out_unlock; 5042 5043 /* Round down to multiple of 4K for safety */ 5044 mddev->resync_min = round_down(min, 8); 5045 err = 0; 5046 5047 out_unlock: 5048 spin_unlock(&mddev->lock); 5049 return err ?: len; 5050 } 5051 5052 static struct md_sysfs_entry md_min_sync = 5053 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 5054 5055 static ssize_t 5056 max_sync_show(struct mddev *mddev, char *page) 5057 { 5058 if (mddev->resync_max == MaxSector) 5059 return sprintf(page, "max\n"); 5060 else 5061 return sprintf(page, "%llu\n", 5062 (unsigned long long)mddev->resync_max); 5063 } 5064 static ssize_t 5065 max_sync_store(struct mddev *mddev, const char *buf, size_t len) 5066 { 5067 int err; 5068 spin_lock(&mddev->lock); 5069 if (strncmp(buf, "max", 3) == 0) 5070 mddev->resync_max = MaxSector; 5071 else { 5072 unsigned long long max; 5073 int chunk; 5074 5075 err = -EINVAL; 5076 if (kstrtoull(buf, 10, &max)) 5077 goto out_unlock; 5078 if (max < mddev->resync_min) 5079 goto out_unlock; 5080 5081 err = -EBUSY; 5082 if (max < mddev->resync_max && md_is_rdwr(mddev) && 5083 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5084 goto out_unlock; 5085 5086 /* Must be a multiple of chunk_size */ 5087 chunk = mddev->chunk_sectors; 5088 if (chunk) { 5089 sector_t temp = max; 5090 5091 err = -EINVAL; 5092 if (sector_div(temp, chunk)) 5093 goto out_unlock; 5094 } 5095 mddev->resync_max = max; 5096 } 5097 wake_up(&mddev->recovery_wait); 5098 err = 0; 5099 out_unlock: 5100 spin_unlock(&mddev->lock); 5101 return err ?: len; 5102 } 5103 5104 static struct md_sysfs_entry md_max_sync = 5105 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 5106 5107 static ssize_t 5108 suspend_lo_show(struct mddev *mddev, char *page) 5109 { 5110 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 5111 } 5112 5113 static ssize_t 5114 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) 5115 { 5116 unsigned long long new; 5117 int err; 5118 5119 err = kstrtoull(buf, 10, &new); 5120 if (err < 0) 5121 return err; 5122 if (new != (sector_t)new) 5123 return -EINVAL; 5124 5125 err = mddev_lock(mddev); 5126 if (err) 5127 return err; 5128 err = -EINVAL; 5129 if (mddev->pers == NULL || 5130 mddev->pers->quiesce == NULL) 5131 goto unlock; 5132 mddev_suspend(mddev); 5133 mddev->suspend_lo = new; 5134 mddev_resume(mddev); 5135 5136 err = 0; 5137 unlock: 5138 mddev_unlock(mddev); 5139 return err ?: len; 5140 } 5141 static struct md_sysfs_entry md_suspend_lo = 5142 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 5143 5144 static ssize_t 5145 suspend_hi_show(struct mddev *mddev, char *page) 5146 { 5147 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 5148 } 5149 5150 static ssize_t 5151 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) 5152 { 5153 unsigned long long new; 5154 int err; 5155 5156 err = kstrtoull(buf, 10, &new); 5157 if (err < 0) 5158 return err; 5159 if (new != (sector_t)new) 5160 return -EINVAL; 5161 5162 err = mddev_lock(mddev); 5163 if (err) 5164 return err; 5165 err = -EINVAL; 5166 if (mddev->pers == NULL) 5167 goto unlock; 5168 5169 mddev_suspend(mddev); 5170 mddev->suspend_hi = new; 5171 mddev_resume(mddev); 5172 5173 err = 0; 5174 unlock: 5175 mddev_unlock(mddev); 5176 return err ?: len; 5177 } 5178 static struct md_sysfs_entry md_suspend_hi = 5179 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 5180 5181 static ssize_t 5182 reshape_position_show(struct mddev *mddev, char *page) 5183 { 5184 if (mddev->reshape_position != MaxSector) 5185 return sprintf(page, "%llu\n", 5186 (unsigned long long)mddev->reshape_position); 5187 strcpy(page, "none\n"); 5188 return 5; 5189 } 5190 5191 static ssize_t 5192 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) 5193 { 5194 struct md_rdev *rdev; 5195 unsigned long long new; 5196 int err; 5197 5198 err = kstrtoull(buf, 10, &new); 5199 if (err < 0) 5200 return err; 5201 if (new != (sector_t)new) 5202 return -EINVAL; 5203 err = mddev_lock(mddev); 5204 if (err) 5205 return err; 5206 err = -EBUSY; 5207 if (mddev->pers) 5208 goto unlock; 5209 mddev->reshape_position = new; 5210 mddev->delta_disks = 0; 5211 mddev->reshape_backwards = 0; 5212 mddev->new_level = mddev->level; 5213 mddev->new_layout = mddev->layout; 5214 mddev->new_chunk_sectors = mddev->chunk_sectors; 5215 rdev_for_each(rdev, mddev) 5216 rdev->new_data_offset = rdev->data_offset; 5217 err = 0; 5218 unlock: 5219 mddev_unlock(mddev); 5220 return err ?: len; 5221 } 5222 5223 static struct md_sysfs_entry md_reshape_position = 5224 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 5225 reshape_position_store); 5226 5227 static ssize_t 5228 reshape_direction_show(struct mddev *mddev, char *page) 5229 { 5230 return sprintf(page, "%s\n", 5231 mddev->reshape_backwards ? "backwards" : "forwards"); 5232 } 5233 5234 static ssize_t 5235 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) 5236 { 5237 int backwards = 0; 5238 int err; 5239 5240 if (cmd_match(buf, "forwards")) 5241 backwards = 0; 5242 else if (cmd_match(buf, "backwards")) 5243 backwards = 1; 5244 else 5245 return -EINVAL; 5246 if (mddev->reshape_backwards == backwards) 5247 return len; 5248 5249 err = mddev_lock(mddev); 5250 if (err) 5251 return err; 5252 /* check if we are allowed to change */ 5253 if (mddev->delta_disks) 5254 err = -EBUSY; 5255 else if (mddev->persistent && 5256 mddev->major_version == 0) 5257 err = -EINVAL; 5258 else 5259 mddev->reshape_backwards = backwards; 5260 mddev_unlock(mddev); 5261 return err ?: len; 5262 } 5263 5264 static struct md_sysfs_entry md_reshape_direction = 5265 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show, 5266 reshape_direction_store); 5267 5268 static ssize_t 5269 array_size_show(struct mddev *mddev, char *page) 5270 { 5271 if (mddev->external_size) 5272 return sprintf(page, "%llu\n", 5273 (unsigned long long)mddev->array_sectors/2); 5274 else 5275 return sprintf(page, "default\n"); 5276 } 5277 5278 static ssize_t 5279 array_size_store(struct mddev *mddev, const char *buf, size_t len) 5280 { 5281 sector_t sectors; 5282 int err; 5283 5284 err = mddev_lock(mddev); 5285 if (err) 5286 return err; 5287 5288 /* cluster raid doesn't support change array_sectors */ 5289 if (mddev_is_clustered(mddev)) { 5290 mddev_unlock(mddev); 5291 return -EINVAL; 5292 } 5293 5294 if (strncmp(buf, "default", 7) == 0) { 5295 if (mddev->pers) 5296 sectors = mddev->pers->size(mddev, 0, 0); 5297 else 5298 sectors = mddev->array_sectors; 5299 5300 mddev->external_size = 0; 5301 } else { 5302 if (strict_blocks_to_sectors(buf, §ors) < 0) 5303 err = -EINVAL; 5304 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 5305 err = -E2BIG; 5306 else 5307 mddev->external_size = 1; 5308 } 5309 5310 if (!err) { 5311 mddev->array_sectors = sectors; 5312 if (mddev->pers) 5313 set_capacity_and_notify(mddev->gendisk, 5314 mddev->array_sectors); 5315 } 5316 mddev_unlock(mddev); 5317 return err ?: len; 5318 } 5319 5320 static struct md_sysfs_entry md_array_size = 5321 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 5322 array_size_store); 5323 5324 static ssize_t 5325 consistency_policy_show(struct mddev *mddev, char *page) 5326 { 5327 int ret; 5328 5329 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 5330 ret = sprintf(page, "journal\n"); 5331 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) { 5332 ret = sprintf(page, "ppl\n"); 5333 } else if (mddev->bitmap) { 5334 ret = sprintf(page, "bitmap\n"); 5335 } else if (mddev->pers) { 5336 if (mddev->pers->sync_request) 5337 ret = sprintf(page, "resync\n"); 5338 else 5339 ret = sprintf(page, "none\n"); 5340 } else { 5341 ret = sprintf(page, "unknown\n"); 5342 } 5343 5344 return ret; 5345 } 5346 5347 static ssize_t 5348 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len) 5349 { 5350 int err = 0; 5351 5352 if (mddev->pers) { 5353 if (mddev->pers->change_consistency_policy) 5354 err = mddev->pers->change_consistency_policy(mddev, buf); 5355 else 5356 err = -EBUSY; 5357 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) { 5358 set_bit(MD_HAS_PPL, &mddev->flags); 5359 } else { 5360 err = -EINVAL; 5361 } 5362 5363 return err ? err : len; 5364 } 5365 5366 static struct md_sysfs_entry md_consistency_policy = 5367 __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show, 5368 consistency_policy_store); 5369 5370 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page) 5371 { 5372 return sprintf(page, "%d\n", mddev->fail_last_dev); 5373 } 5374 5375 /* 5376 * Setting fail_last_dev to true to allow last device to be forcibly removed 5377 * from RAID1/RAID10. 5378 */ 5379 static ssize_t 5380 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len) 5381 { 5382 int ret; 5383 bool value; 5384 5385 ret = kstrtobool(buf, &value); 5386 if (ret) 5387 return ret; 5388 5389 if (value != mddev->fail_last_dev) 5390 mddev->fail_last_dev = value; 5391 5392 return len; 5393 } 5394 static struct md_sysfs_entry md_fail_last_dev = 5395 __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show, 5396 fail_last_dev_store); 5397 5398 static ssize_t serialize_policy_show(struct mddev *mddev, char *page) 5399 { 5400 if (mddev->pers == NULL || (mddev->pers->level != 1)) 5401 return sprintf(page, "n/a\n"); 5402 else 5403 return sprintf(page, "%d\n", mddev->serialize_policy); 5404 } 5405 5406 /* 5407 * Setting serialize_policy to true to enforce write IO is not reordered 5408 * for raid1. 5409 */ 5410 static ssize_t 5411 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len) 5412 { 5413 int err; 5414 bool value; 5415 5416 err = kstrtobool(buf, &value); 5417 if (err) 5418 return err; 5419 5420 if (value == mddev->serialize_policy) 5421 return len; 5422 5423 err = mddev_lock(mddev); 5424 if (err) 5425 return err; 5426 if (mddev->pers == NULL || (mddev->pers->level != 1)) { 5427 pr_err("md: serialize_policy is only effective for raid1\n"); 5428 err = -EINVAL; 5429 goto unlock; 5430 } 5431 5432 mddev_suspend(mddev); 5433 if (value) 5434 mddev_create_serial_pool(mddev, NULL, true); 5435 else 5436 mddev_destroy_serial_pool(mddev, NULL, true); 5437 mddev->serialize_policy = value; 5438 mddev_resume(mddev); 5439 unlock: 5440 mddev_unlock(mddev); 5441 return err ?: len; 5442 } 5443 5444 static struct md_sysfs_entry md_serialize_policy = 5445 __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show, 5446 serialize_policy_store); 5447 5448 5449 static struct attribute *md_default_attrs[] = { 5450 &md_level.attr, 5451 &md_layout.attr, 5452 &md_raid_disks.attr, 5453 &md_uuid.attr, 5454 &md_chunk_size.attr, 5455 &md_size.attr, 5456 &md_resync_start.attr, 5457 &md_metadata.attr, 5458 &md_new_device.attr, 5459 &md_safe_delay.attr, 5460 &md_array_state.attr, 5461 &md_reshape_position.attr, 5462 &md_reshape_direction.attr, 5463 &md_array_size.attr, 5464 &max_corr_read_errors.attr, 5465 &md_consistency_policy.attr, 5466 &md_fail_last_dev.attr, 5467 &md_serialize_policy.attr, 5468 NULL, 5469 }; 5470 5471 static const struct attribute_group md_default_group = { 5472 .attrs = md_default_attrs, 5473 }; 5474 5475 static struct attribute *md_redundancy_attrs[] = { 5476 &md_scan_mode.attr, 5477 &md_last_scan_mode.attr, 5478 &md_mismatches.attr, 5479 &md_sync_min.attr, 5480 &md_sync_max.attr, 5481 &md_sync_speed.attr, 5482 &md_sync_force_parallel.attr, 5483 &md_sync_completed.attr, 5484 &md_min_sync.attr, 5485 &md_max_sync.attr, 5486 &md_suspend_lo.attr, 5487 &md_suspend_hi.attr, 5488 &md_bitmap.attr, 5489 &md_degraded.attr, 5490 NULL, 5491 }; 5492 static const struct attribute_group md_redundancy_group = { 5493 .name = NULL, 5494 .attrs = md_redundancy_attrs, 5495 }; 5496 5497 static const struct attribute_group *md_attr_groups[] = { 5498 &md_default_group, 5499 &md_bitmap_group, 5500 NULL, 5501 }; 5502 5503 static ssize_t 5504 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 5505 { 5506 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 5507 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 5508 ssize_t rv; 5509 5510 if (!entry->show) 5511 return -EIO; 5512 spin_lock(&all_mddevs_lock); 5513 if (!mddev_get(mddev)) { 5514 spin_unlock(&all_mddevs_lock); 5515 return -EBUSY; 5516 } 5517 spin_unlock(&all_mddevs_lock); 5518 5519 rv = entry->show(mddev, page); 5520 mddev_put(mddev); 5521 return rv; 5522 } 5523 5524 static ssize_t 5525 md_attr_store(struct kobject *kobj, struct attribute *attr, 5526 const char *page, size_t length) 5527 { 5528 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 5529 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 5530 ssize_t rv; 5531 5532 if (!entry->store) 5533 return -EIO; 5534 if (!capable(CAP_SYS_ADMIN)) 5535 return -EACCES; 5536 spin_lock(&all_mddevs_lock); 5537 if (!mddev_get(mddev)) { 5538 spin_unlock(&all_mddevs_lock); 5539 return -EBUSY; 5540 } 5541 spin_unlock(&all_mddevs_lock); 5542 rv = entry->store(mddev, page, length); 5543 mddev_put(mddev); 5544 return rv; 5545 } 5546 5547 static void md_kobj_release(struct kobject *ko) 5548 { 5549 struct mddev *mddev = container_of(ko, struct mddev, kobj); 5550 5551 if (mddev->sysfs_state) 5552 sysfs_put(mddev->sysfs_state); 5553 if (mddev->sysfs_level) 5554 sysfs_put(mddev->sysfs_level); 5555 5556 del_gendisk(mddev->gendisk); 5557 put_disk(mddev->gendisk); 5558 } 5559 5560 static const struct sysfs_ops md_sysfs_ops = { 5561 .show = md_attr_show, 5562 .store = md_attr_store, 5563 }; 5564 static struct kobj_type md_ktype = { 5565 .release = md_kobj_release, 5566 .sysfs_ops = &md_sysfs_ops, 5567 .default_groups = md_attr_groups, 5568 }; 5569 5570 int mdp_major = 0; 5571 5572 static void mddev_delayed_delete(struct work_struct *ws) 5573 { 5574 struct mddev *mddev = container_of(ws, struct mddev, del_work); 5575 5576 kobject_put(&mddev->kobj); 5577 } 5578 5579 static void no_op(struct percpu_ref *r) {} 5580 5581 int mddev_init_writes_pending(struct mddev *mddev) 5582 { 5583 if (mddev->writes_pending.percpu_count_ptr) 5584 return 0; 5585 if (percpu_ref_init(&mddev->writes_pending, no_op, 5586 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0) 5587 return -ENOMEM; 5588 /* We want to start with the refcount at zero */ 5589 percpu_ref_put(&mddev->writes_pending); 5590 return 0; 5591 } 5592 EXPORT_SYMBOL_GPL(mddev_init_writes_pending); 5593 5594 struct mddev *md_alloc(dev_t dev, char *name) 5595 { 5596 /* 5597 * If dev is zero, name is the name of a device to allocate with 5598 * an arbitrary minor number. It will be "md_???" 5599 * If dev is non-zero it must be a device number with a MAJOR of 5600 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then 5601 * the device is being created by opening a node in /dev. 5602 * If "name" is not NULL, the device is being created by 5603 * writing to /sys/module/md_mod/parameters/new_array. 5604 */ 5605 static DEFINE_MUTEX(disks_mutex); 5606 struct mddev *mddev; 5607 struct gendisk *disk; 5608 int partitioned; 5609 int shift; 5610 int unit; 5611 int error ; 5612 5613 /* 5614 * Wait for any previous instance of this device to be completely 5615 * removed (mddev_delayed_delete). 5616 */ 5617 flush_workqueue(md_misc_wq); 5618 flush_workqueue(md_rdev_misc_wq); 5619 5620 mutex_lock(&disks_mutex); 5621 mddev = mddev_alloc(dev); 5622 if (IS_ERR(mddev)) { 5623 error = PTR_ERR(mddev); 5624 goto out_unlock; 5625 } 5626 5627 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 5628 shift = partitioned ? MdpMinorShift : 0; 5629 unit = MINOR(mddev->unit) >> shift; 5630 5631 if (name && !dev) { 5632 /* Need to ensure that 'name' is not a duplicate. 5633 */ 5634 struct mddev *mddev2; 5635 spin_lock(&all_mddevs_lock); 5636 5637 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 5638 if (mddev2->gendisk && 5639 strcmp(mddev2->gendisk->disk_name, name) == 0) { 5640 spin_unlock(&all_mddevs_lock); 5641 error = -EEXIST; 5642 goto out_free_mddev; 5643 } 5644 spin_unlock(&all_mddevs_lock); 5645 } 5646 if (name && dev) 5647 /* 5648 * Creating /dev/mdNNN via "newarray", so adjust hold_active. 5649 */ 5650 mddev->hold_active = UNTIL_STOP; 5651 5652 error = -ENOMEM; 5653 disk = blk_alloc_disk(NUMA_NO_NODE); 5654 if (!disk) 5655 goto out_free_mddev; 5656 5657 disk->major = MAJOR(mddev->unit); 5658 disk->first_minor = unit << shift; 5659 disk->minors = 1 << shift; 5660 if (name) 5661 strcpy(disk->disk_name, name); 5662 else if (partitioned) 5663 sprintf(disk->disk_name, "md_d%d", unit); 5664 else 5665 sprintf(disk->disk_name, "md%d", unit); 5666 disk->fops = &md_fops; 5667 disk->private_data = mddev; 5668 5669 mddev->queue = disk->queue; 5670 blk_set_stacking_limits(&mddev->queue->limits); 5671 blk_queue_write_cache(mddev->queue, true, true); 5672 disk->events |= DISK_EVENT_MEDIA_CHANGE; 5673 mddev->gendisk = disk; 5674 error = add_disk(disk); 5675 if (error) 5676 goto out_put_disk; 5677 5678 kobject_init(&mddev->kobj, &md_ktype); 5679 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); 5680 if (error) { 5681 /* 5682 * The disk is already live at this point. Clear the hold flag 5683 * and let mddev_put take care of the deletion, as it isn't any 5684 * different from a normal close on last release now. 5685 */ 5686 mddev->hold_active = 0; 5687 mutex_unlock(&disks_mutex); 5688 mddev_put(mddev); 5689 return ERR_PTR(error); 5690 } 5691 5692 kobject_uevent(&mddev->kobj, KOBJ_ADD); 5693 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 5694 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level"); 5695 mutex_unlock(&disks_mutex); 5696 return mddev; 5697 5698 out_put_disk: 5699 put_disk(disk); 5700 out_free_mddev: 5701 mddev_free(mddev); 5702 out_unlock: 5703 mutex_unlock(&disks_mutex); 5704 return ERR_PTR(error); 5705 } 5706 5707 static int md_alloc_and_put(dev_t dev, char *name) 5708 { 5709 struct mddev *mddev = md_alloc(dev, name); 5710 5711 if (IS_ERR(mddev)) 5712 return PTR_ERR(mddev); 5713 mddev_put(mddev); 5714 return 0; 5715 } 5716 5717 static void md_probe(dev_t dev) 5718 { 5719 if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512) 5720 return; 5721 if (create_on_open) 5722 md_alloc_and_put(dev, NULL); 5723 } 5724 5725 static int add_named_array(const char *val, const struct kernel_param *kp) 5726 { 5727 /* 5728 * val must be "md_*" or "mdNNN". 5729 * For "md_*" we allocate an array with a large free minor number, and 5730 * set the name to val. val must not already be an active name. 5731 * For "mdNNN" we allocate an array with the minor number NNN 5732 * which must not already be in use. 5733 */ 5734 int len = strlen(val); 5735 char buf[DISK_NAME_LEN]; 5736 unsigned long devnum; 5737 5738 while (len && val[len-1] == '\n') 5739 len--; 5740 if (len >= DISK_NAME_LEN) 5741 return -E2BIG; 5742 strscpy(buf, val, len+1); 5743 if (strncmp(buf, "md_", 3) == 0) 5744 return md_alloc_and_put(0, buf); 5745 if (strncmp(buf, "md", 2) == 0 && 5746 isdigit(buf[2]) && 5747 kstrtoul(buf+2, 10, &devnum) == 0 && 5748 devnum <= MINORMASK) 5749 return md_alloc_and_put(MKDEV(MD_MAJOR, devnum), NULL); 5750 5751 return -EINVAL; 5752 } 5753 5754 static void md_safemode_timeout(struct timer_list *t) 5755 { 5756 struct mddev *mddev = from_timer(mddev, t, safemode_timer); 5757 5758 mddev->safemode = 1; 5759 if (mddev->external) 5760 sysfs_notify_dirent_safe(mddev->sysfs_state); 5761 5762 md_wakeup_thread(mddev->thread); 5763 } 5764 5765 static int start_dirty_degraded; 5766 5767 int md_run(struct mddev *mddev) 5768 { 5769 int err; 5770 struct md_rdev *rdev; 5771 struct md_personality *pers; 5772 bool nowait = true; 5773 5774 if (list_empty(&mddev->disks)) 5775 /* cannot run an array with no devices.. */ 5776 return -EINVAL; 5777 5778 if (mddev->pers) 5779 return -EBUSY; 5780 /* Cannot run until previous stop completes properly */ 5781 if (mddev->sysfs_active) 5782 return -EBUSY; 5783 5784 /* 5785 * Analyze all RAID superblock(s) 5786 */ 5787 if (!mddev->raid_disks) { 5788 if (!mddev->persistent) 5789 return -EINVAL; 5790 err = analyze_sbs(mddev); 5791 if (err) 5792 return -EINVAL; 5793 } 5794 5795 if (mddev->level != LEVEL_NONE) 5796 request_module("md-level-%d", mddev->level); 5797 else if (mddev->clevel[0]) 5798 request_module("md-%s", mddev->clevel); 5799 5800 /* 5801 * Drop all container device buffers, from now on 5802 * the only valid external interface is through the md 5803 * device. 5804 */ 5805 mddev->has_superblocks = false; 5806 rdev_for_each(rdev, mddev) { 5807 if (test_bit(Faulty, &rdev->flags)) 5808 continue; 5809 sync_blockdev(rdev->bdev); 5810 invalidate_bdev(rdev->bdev); 5811 if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) { 5812 mddev->ro = MD_RDONLY; 5813 if (mddev->gendisk) 5814 set_disk_ro(mddev->gendisk, 1); 5815 } 5816 5817 if (rdev->sb_page) 5818 mddev->has_superblocks = true; 5819 5820 /* perform some consistency tests on the device. 5821 * We don't want the data to overlap the metadata, 5822 * Internal Bitmap issues have been handled elsewhere. 5823 */ 5824 if (rdev->meta_bdev) { 5825 /* Nothing to check */; 5826 } else if (rdev->data_offset < rdev->sb_start) { 5827 if (mddev->dev_sectors && 5828 rdev->data_offset + mddev->dev_sectors 5829 > rdev->sb_start) { 5830 pr_warn("md: %s: data overlaps metadata\n", 5831 mdname(mddev)); 5832 return -EINVAL; 5833 } 5834 } else { 5835 if (rdev->sb_start + rdev->sb_size/512 5836 > rdev->data_offset) { 5837 pr_warn("md: %s: metadata overlaps data\n", 5838 mdname(mddev)); 5839 return -EINVAL; 5840 } 5841 } 5842 sysfs_notify_dirent_safe(rdev->sysfs_state); 5843 nowait = nowait && bdev_nowait(rdev->bdev); 5844 } 5845 5846 if (!bioset_initialized(&mddev->bio_set)) { 5847 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 5848 if (err) 5849 return err; 5850 } 5851 if (!bioset_initialized(&mddev->sync_set)) { 5852 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 5853 if (err) 5854 goto exit_bio_set; 5855 } 5856 5857 spin_lock(&pers_lock); 5858 pers = find_pers(mddev->level, mddev->clevel); 5859 if (!pers || !try_module_get(pers->owner)) { 5860 spin_unlock(&pers_lock); 5861 if (mddev->level != LEVEL_NONE) 5862 pr_warn("md: personality for level %d is not loaded!\n", 5863 mddev->level); 5864 else 5865 pr_warn("md: personality for level %s is not loaded!\n", 5866 mddev->clevel); 5867 err = -EINVAL; 5868 goto abort; 5869 } 5870 spin_unlock(&pers_lock); 5871 if (mddev->level != pers->level) { 5872 mddev->level = pers->level; 5873 mddev->new_level = pers->level; 5874 } 5875 strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 5876 5877 if (mddev->reshape_position != MaxSector && 5878 pers->start_reshape == NULL) { 5879 /* This personality cannot handle reshaping... */ 5880 module_put(pers->owner); 5881 err = -EINVAL; 5882 goto abort; 5883 } 5884 5885 if (pers->sync_request) { 5886 /* Warn if this is a potentially silly 5887 * configuration. 5888 */ 5889 struct md_rdev *rdev2; 5890 int warned = 0; 5891 5892 rdev_for_each(rdev, mddev) 5893 rdev_for_each(rdev2, mddev) { 5894 if (rdev < rdev2 && 5895 rdev->bdev->bd_disk == 5896 rdev2->bdev->bd_disk) { 5897 pr_warn("%s: WARNING: %pg appears to be on the same physical disk as %pg.\n", 5898 mdname(mddev), 5899 rdev->bdev, 5900 rdev2->bdev); 5901 warned = 1; 5902 } 5903 } 5904 5905 if (warned) 5906 pr_warn("True protection against single-disk failure might be compromised.\n"); 5907 } 5908 5909 mddev->recovery = 0; 5910 /* may be over-ridden by personality */ 5911 mddev->resync_max_sectors = mddev->dev_sectors; 5912 5913 mddev->ok_start_degraded = start_dirty_degraded; 5914 5915 if (start_readonly && md_is_rdwr(mddev)) 5916 mddev->ro = MD_AUTO_READ; /* read-only, but switch on first write */ 5917 5918 err = pers->run(mddev); 5919 if (err) 5920 pr_warn("md: pers->run() failed ...\n"); 5921 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { 5922 WARN_ONCE(!mddev->external_size, 5923 "%s: default size too small, but 'external_size' not in effect?\n", 5924 __func__); 5925 pr_warn("md: invalid array_size %llu > default size %llu\n", 5926 (unsigned long long)mddev->array_sectors / 2, 5927 (unsigned long long)pers->size(mddev, 0, 0) / 2); 5928 err = -EINVAL; 5929 } 5930 if (err == 0 && pers->sync_request && 5931 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { 5932 struct bitmap *bitmap; 5933 5934 bitmap = md_bitmap_create(mddev, -1); 5935 if (IS_ERR(bitmap)) { 5936 err = PTR_ERR(bitmap); 5937 pr_warn("%s: failed to create bitmap (%d)\n", 5938 mdname(mddev), err); 5939 } else 5940 mddev->bitmap = bitmap; 5941 5942 } 5943 if (err) 5944 goto bitmap_abort; 5945 5946 if (mddev->bitmap_info.max_write_behind > 0) { 5947 bool create_pool = false; 5948 5949 rdev_for_each(rdev, mddev) { 5950 if (test_bit(WriteMostly, &rdev->flags) && 5951 rdev_init_serial(rdev)) 5952 create_pool = true; 5953 } 5954 if (create_pool && mddev->serial_info_pool == NULL) { 5955 mddev->serial_info_pool = 5956 mempool_create_kmalloc_pool(NR_SERIAL_INFOS, 5957 sizeof(struct serial_info)); 5958 if (!mddev->serial_info_pool) { 5959 err = -ENOMEM; 5960 goto bitmap_abort; 5961 } 5962 } 5963 } 5964 5965 if (mddev->queue) { 5966 bool nonrot = true; 5967 5968 rdev_for_each(rdev, mddev) { 5969 if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) { 5970 nonrot = false; 5971 break; 5972 } 5973 } 5974 if (mddev->degraded) 5975 nonrot = false; 5976 if (nonrot) 5977 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); 5978 else 5979 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); 5980 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue); 5981 5982 /* Set the NOWAIT flags if all underlying devices support it */ 5983 if (nowait) 5984 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue); 5985 } 5986 if (pers->sync_request) { 5987 if (mddev->kobj.sd && 5988 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 5989 pr_warn("md: cannot register extra attributes for %s\n", 5990 mdname(mddev)); 5991 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 5992 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); 5993 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); 5994 } else if (mddev->ro == MD_AUTO_READ) 5995 mddev->ro = MD_RDWR; 5996 5997 atomic_set(&mddev->max_corr_read_errors, 5998 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 5999 mddev->safemode = 0; 6000 if (mddev_is_clustered(mddev)) 6001 mddev->safemode_delay = 0; 6002 else 6003 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; 6004 mddev->in_sync = 1; 6005 smp_wmb(); 6006 spin_lock(&mddev->lock); 6007 mddev->pers = pers; 6008 spin_unlock(&mddev->lock); 6009 rdev_for_each(rdev, mddev) 6010 if (rdev->raid_disk >= 0) 6011 sysfs_link_rdev(mddev, rdev); /* failure here is OK */ 6012 6013 if (mddev->degraded && md_is_rdwr(mddev)) 6014 /* This ensures that recovering status is reported immediately 6015 * via sysfs - until a lack of spares is confirmed. 6016 */ 6017 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6018 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6019 6020 if (mddev->sb_flags) 6021 md_update_sb(mddev, 0); 6022 6023 md_new_event(); 6024 return 0; 6025 6026 bitmap_abort: 6027 mddev_detach(mddev); 6028 if (mddev->private) 6029 pers->free(mddev, mddev->private); 6030 mddev->private = NULL; 6031 module_put(pers->owner); 6032 md_bitmap_destroy(mddev); 6033 abort: 6034 bioset_exit(&mddev->sync_set); 6035 exit_bio_set: 6036 bioset_exit(&mddev->bio_set); 6037 return err; 6038 } 6039 EXPORT_SYMBOL_GPL(md_run); 6040 6041 int do_md_run(struct mddev *mddev) 6042 { 6043 int err; 6044 6045 set_bit(MD_NOT_READY, &mddev->flags); 6046 err = md_run(mddev); 6047 if (err) 6048 goto out; 6049 err = md_bitmap_load(mddev); 6050 if (err) { 6051 md_bitmap_destroy(mddev); 6052 goto out; 6053 } 6054 6055 if (mddev_is_clustered(mddev)) 6056 md_allow_write(mddev); 6057 6058 /* run start up tasks that require md_thread */ 6059 md_start(mddev); 6060 6061 md_wakeup_thread(mddev->thread); 6062 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 6063 6064 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors); 6065 clear_bit(MD_NOT_READY, &mddev->flags); 6066 mddev->changed = 1; 6067 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 6068 sysfs_notify_dirent_safe(mddev->sysfs_state); 6069 sysfs_notify_dirent_safe(mddev->sysfs_action); 6070 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 6071 out: 6072 clear_bit(MD_NOT_READY, &mddev->flags); 6073 return err; 6074 } 6075 6076 int md_start(struct mddev *mddev) 6077 { 6078 int ret = 0; 6079 6080 if (mddev->pers->start) { 6081 set_bit(MD_RECOVERY_WAIT, &mddev->recovery); 6082 md_wakeup_thread(mddev->thread); 6083 ret = mddev->pers->start(mddev); 6084 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); 6085 md_wakeup_thread(mddev->sync_thread); 6086 } 6087 return ret; 6088 } 6089 EXPORT_SYMBOL_GPL(md_start); 6090 6091 static int restart_array(struct mddev *mddev) 6092 { 6093 struct gendisk *disk = mddev->gendisk; 6094 struct md_rdev *rdev; 6095 bool has_journal = false; 6096 bool has_readonly = false; 6097 6098 /* Complain if it has no devices */ 6099 if (list_empty(&mddev->disks)) 6100 return -ENXIO; 6101 if (!mddev->pers) 6102 return -EINVAL; 6103 if (md_is_rdwr(mddev)) 6104 return -EBUSY; 6105 6106 rcu_read_lock(); 6107 rdev_for_each_rcu(rdev, mddev) { 6108 if (test_bit(Journal, &rdev->flags) && 6109 !test_bit(Faulty, &rdev->flags)) 6110 has_journal = true; 6111 if (rdev_read_only(rdev)) 6112 has_readonly = true; 6113 } 6114 rcu_read_unlock(); 6115 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal) 6116 /* Don't restart rw with journal missing/faulty */ 6117 return -EINVAL; 6118 if (has_readonly) 6119 return -EROFS; 6120 6121 mddev->safemode = 0; 6122 mddev->ro = MD_RDWR; 6123 set_disk_ro(disk, 0); 6124 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); 6125 /* Kick recovery or resync if necessary */ 6126 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6127 md_wakeup_thread(mddev->thread); 6128 md_wakeup_thread(mddev->sync_thread); 6129 sysfs_notify_dirent_safe(mddev->sysfs_state); 6130 return 0; 6131 } 6132 6133 static void md_clean(struct mddev *mddev) 6134 { 6135 mddev->array_sectors = 0; 6136 mddev->external_size = 0; 6137 mddev->dev_sectors = 0; 6138 mddev->raid_disks = 0; 6139 mddev->recovery_cp = 0; 6140 mddev->resync_min = 0; 6141 mddev->resync_max = MaxSector; 6142 mddev->reshape_position = MaxSector; 6143 mddev->external = 0; 6144 mddev->persistent = 0; 6145 mddev->level = LEVEL_NONE; 6146 mddev->clevel[0] = 0; 6147 mddev->flags = 0; 6148 mddev->sb_flags = 0; 6149 mddev->ro = MD_RDWR; 6150 mddev->metadata_type[0] = 0; 6151 mddev->chunk_sectors = 0; 6152 mddev->ctime = mddev->utime = 0; 6153 mddev->layout = 0; 6154 mddev->max_disks = 0; 6155 mddev->events = 0; 6156 mddev->can_decrease_events = 0; 6157 mddev->delta_disks = 0; 6158 mddev->reshape_backwards = 0; 6159 mddev->new_level = LEVEL_NONE; 6160 mddev->new_layout = 0; 6161 mddev->new_chunk_sectors = 0; 6162 mddev->curr_resync = 0; 6163 atomic64_set(&mddev->resync_mismatches, 0); 6164 mddev->suspend_lo = mddev->suspend_hi = 0; 6165 mddev->sync_speed_min = mddev->sync_speed_max = 0; 6166 mddev->recovery = 0; 6167 mddev->in_sync = 0; 6168 mddev->changed = 0; 6169 mddev->degraded = 0; 6170 mddev->safemode = 0; 6171 mddev->private = NULL; 6172 mddev->cluster_info = NULL; 6173 mddev->bitmap_info.offset = 0; 6174 mddev->bitmap_info.default_offset = 0; 6175 mddev->bitmap_info.default_space = 0; 6176 mddev->bitmap_info.chunksize = 0; 6177 mddev->bitmap_info.daemon_sleep = 0; 6178 mddev->bitmap_info.max_write_behind = 0; 6179 mddev->bitmap_info.nodes = 0; 6180 } 6181 6182 static void __md_stop_writes(struct mddev *mddev) 6183 { 6184 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6185 if (work_pending(&mddev->del_work)) 6186 flush_workqueue(md_misc_wq); 6187 if (mddev->sync_thread) { 6188 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6189 md_unregister_thread(&mddev->sync_thread); 6190 md_reap_sync_thread(mddev); 6191 } 6192 6193 del_timer_sync(&mddev->safemode_timer); 6194 6195 if (mddev->pers && mddev->pers->quiesce) { 6196 mddev->pers->quiesce(mddev, 1); 6197 mddev->pers->quiesce(mddev, 0); 6198 } 6199 md_bitmap_flush(mddev); 6200 6201 if (md_is_rdwr(mddev) && 6202 ((!mddev->in_sync && !mddev_is_clustered(mddev)) || 6203 mddev->sb_flags)) { 6204 /* mark array as shutdown cleanly */ 6205 if (!mddev_is_clustered(mddev)) 6206 mddev->in_sync = 1; 6207 md_update_sb(mddev, 1); 6208 } 6209 /* disable policy to guarantee rdevs free resources for serialization */ 6210 mddev->serialize_policy = 0; 6211 mddev_destroy_serial_pool(mddev, NULL, true); 6212 } 6213 6214 void md_stop_writes(struct mddev *mddev) 6215 { 6216 mddev_lock_nointr(mddev); 6217 __md_stop_writes(mddev); 6218 mddev_unlock(mddev); 6219 } 6220 EXPORT_SYMBOL_GPL(md_stop_writes); 6221 6222 static void mddev_detach(struct mddev *mddev) 6223 { 6224 md_bitmap_wait_behind_writes(mddev); 6225 if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) { 6226 mddev->pers->quiesce(mddev, 1); 6227 mddev->pers->quiesce(mddev, 0); 6228 } 6229 md_unregister_thread(&mddev->thread); 6230 if (mddev->queue) 6231 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 6232 } 6233 6234 static void __md_stop(struct mddev *mddev) 6235 { 6236 struct md_personality *pers = mddev->pers; 6237 md_bitmap_destroy(mddev); 6238 mddev_detach(mddev); 6239 /* Ensure ->event_work is done */ 6240 if (mddev->event_work.func) 6241 flush_workqueue(md_misc_wq); 6242 spin_lock(&mddev->lock); 6243 mddev->pers = NULL; 6244 spin_unlock(&mddev->lock); 6245 if (mddev->private) 6246 pers->free(mddev, mddev->private); 6247 mddev->private = NULL; 6248 if (pers->sync_request && mddev->to_remove == NULL) 6249 mddev->to_remove = &md_redundancy_group; 6250 module_put(pers->owner); 6251 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6252 } 6253 6254 void md_stop(struct mddev *mddev) 6255 { 6256 /* stop the array and free an attached data structures. 6257 * This is called from dm-raid 6258 */ 6259 __md_stop_writes(mddev); 6260 __md_stop(mddev); 6261 bioset_exit(&mddev->bio_set); 6262 bioset_exit(&mddev->sync_set); 6263 } 6264 6265 EXPORT_SYMBOL_GPL(md_stop); 6266 6267 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) 6268 { 6269 int err = 0; 6270 int did_freeze = 0; 6271 6272 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 6273 did_freeze = 1; 6274 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6275 md_wakeup_thread(mddev->thread); 6276 } 6277 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 6278 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6279 if (mddev->sync_thread) 6280 /* Thread might be blocked waiting for metadata update 6281 * which will now never happen */ 6282 wake_up_process(mddev->sync_thread->tsk); 6283 6284 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 6285 return -EBUSY; 6286 mddev_unlock(mddev); 6287 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, 6288 &mddev->recovery)); 6289 wait_event(mddev->sb_wait, 6290 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 6291 mddev_lock_nointr(mddev); 6292 6293 mutex_lock(&mddev->open_mutex); 6294 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 6295 mddev->sync_thread || 6296 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 6297 pr_warn("md: %s still in use.\n",mdname(mddev)); 6298 if (did_freeze) { 6299 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6300 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6301 md_wakeup_thread(mddev->thread); 6302 } 6303 err = -EBUSY; 6304 goto out; 6305 } 6306 if (mddev->pers) { 6307 __md_stop_writes(mddev); 6308 6309 err = -ENXIO; 6310 if (mddev->ro == MD_RDONLY) 6311 goto out; 6312 mddev->ro = MD_RDONLY; 6313 set_disk_ro(mddev->gendisk, 1); 6314 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6315 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6316 md_wakeup_thread(mddev->thread); 6317 sysfs_notify_dirent_safe(mddev->sysfs_state); 6318 err = 0; 6319 } 6320 out: 6321 mutex_unlock(&mddev->open_mutex); 6322 return err; 6323 } 6324 6325 /* mode: 6326 * 0 - completely stop and dis-assemble array 6327 * 2 - stop but do not disassemble array 6328 */ 6329 static int do_md_stop(struct mddev *mddev, int mode, 6330 struct block_device *bdev) 6331 { 6332 struct gendisk *disk = mddev->gendisk; 6333 struct md_rdev *rdev; 6334 int did_freeze = 0; 6335 6336 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 6337 did_freeze = 1; 6338 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6339 md_wakeup_thread(mddev->thread); 6340 } 6341 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 6342 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6343 if (mddev->sync_thread) 6344 /* Thread might be blocked waiting for metadata update 6345 * which will now never happen */ 6346 wake_up_process(mddev->sync_thread->tsk); 6347 6348 mddev_unlock(mddev); 6349 wait_event(resync_wait, (mddev->sync_thread == NULL && 6350 !test_bit(MD_RECOVERY_RUNNING, 6351 &mddev->recovery))); 6352 mddev_lock_nointr(mddev); 6353 6354 mutex_lock(&mddev->open_mutex); 6355 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 6356 mddev->sysfs_active || 6357 mddev->sync_thread || 6358 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 6359 pr_warn("md: %s still in use.\n",mdname(mddev)); 6360 mutex_unlock(&mddev->open_mutex); 6361 if (did_freeze) { 6362 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6363 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6364 md_wakeup_thread(mddev->thread); 6365 } 6366 return -EBUSY; 6367 } 6368 if (mddev->pers) { 6369 if (!md_is_rdwr(mddev)) 6370 set_disk_ro(disk, 0); 6371 6372 __md_stop_writes(mddev); 6373 __md_stop(mddev); 6374 6375 /* tell userspace to handle 'inactive' */ 6376 sysfs_notify_dirent_safe(mddev->sysfs_state); 6377 6378 rdev_for_each(rdev, mddev) 6379 if (rdev->raid_disk >= 0) 6380 sysfs_unlink_rdev(mddev, rdev); 6381 6382 set_capacity_and_notify(disk, 0); 6383 mutex_unlock(&mddev->open_mutex); 6384 mddev->changed = 1; 6385 6386 if (!md_is_rdwr(mddev)) 6387 mddev->ro = MD_RDWR; 6388 } else 6389 mutex_unlock(&mddev->open_mutex); 6390 /* 6391 * Free resources if final stop 6392 */ 6393 if (mode == 0) { 6394 pr_info("md: %s stopped.\n", mdname(mddev)); 6395 6396 if (mddev->bitmap_info.file) { 6397 struct file *f = mddev->bitmap_info.file; 6398 spin_lock(&mddev->lock); 6399 mddev->bitmap_info.file = NULL; 6400 spin_unlock(&mddev->lock); 6401 fput(f); 6402 } 6403 mddev->bitmap_info.offset = 0; 6404 6405 export_array(mddev); 6406 6407 md_clean(mddev); 6408 if (mddev->hold_active == UNTIL_STOP) 6409 mddev->hold_active = 0; 6410 } 6411 md_new_event(); 6412 sysfs_notify_dirent_safe(mddev->sysfs_state); 6413 return 0; 6414 } 6415 6416 #ifndef MODULE 6417 static void autorun_array(struct mddev *mddev) 6418 { 6419 struct md_rdev *rdev; 6420 int err; 6421 6422 if (list_empty(&mddev->disks)) 6423 return; 6424 6425 pr_info("md: running: "); 6426 6427 rdev_for_each(rdev, mddev) { 6428 pr_cont("<%pg>", rdev->bdev); 6429 } 6430 pr_cont("\n"); 6431 6432 err = do_md_run(mddev); 6433 if (err) { 6434 pr_warn("md: do_md_run() returned %d\n", err); 6435 do_md_stop(mddev, 0, NULL); 6436 } 6437 } 6438 6439 /* 6440 * lets try to run arrays based on all disks that have arrived 6441 * until now. (those are in pending_raid_disks) 6442 * 6443 * the method: pick the first pending disk, collect all disks with 6444 * the same UUID, remove all from the pending list and put them into 6445 * the 'same_array' list. Then order this list based on superblock 6446 * update time (freshest comes first), kick out 'old' disks and 6447 * compare superblocks. If everything's fine then run it. 6448 * 6449 * If "unit" is allocated, then bump its reference count 6450 */ 6451 static void autorun_devices(int part) 6452 { 6453 struct md_rdev *rdev0, *rdev, *tmp; 6454 struct mddev *mddev; 6455 6456 pr_info("md: autorun ...\n"); 6457 while (!list_empty(&pending_raid_disks)) { 6458 int unit; 6459 dev_t dev; 6460 LIST_HEAD(candidates); 6461 rdev0 = list_entry(pending_raid_disks.next, 6462 struct md_rdev, same_set); 6463 6464 pr_debug("md: considering %pg ...\n", rdev0->bdev); 6465 INIT_LIST_HEAD(&candidates); 6466 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 6467 if (super_90_load(rdev, rdev0, 0) >= 0) { 6468 pr_debug("md: adding %pg ...\n", 6469 rdev->bdev); 6470 list_move(&rdev->same_set, &candidates); 6471 } 6472 /* 6473 * now we have a set of devices, with all of them having 6474 * mostly sane superblocks. It's time to allocate the 6475 * mddev. 6476 */ 6477 if (part) { 6478 dev = MKDEV(mdp_major, 6479 rdev0->preferred_minor << MdpMinorShift); 6480 unit = MINOR(dev) >> MdpMinorShift; 6481 } else { 6482 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 6483 unit = MINOR(dev); 6484 } 6485 if (rdev0->preferred_minor != unit) { 6486 pr_warn("md: unit number in %pg is bad: %d\n", 6487 rdev0->bdev, rdev0->preferred_minor); 6488 break; 6489 } 6490 6491 mddev = md_alloc(dev, NULL); 6492 if (IS_ERR(mddev)) 6493 break; 6494 6495 if (mddev_lock(mddev)) 6496 pr_warn("md: %s locked, cannot run\n", mdname(mddev)); 6497 else if (mddev->raid_disks || mddev->major_version 6498 || !list_empty(&mddev->disks)) { 6499 pr_warn("md: %s already running, cannot run %pg\n", 6500 mdname(mddev), rdev0->bdev); 6501 mddev_unlock(mddev); 6502 } else { 6503 pr_debug("md: created %s\n", mdname(mddev)); 6504 mddev->persistent = 1; 6505 rdev_for_each_list(rdev, tmp, &candidates) { 6506 list_del_init(&rdev->same_set); 6507 if (bind_rdev_to_array(rdev, mddev)) 6508 export_rdev(rdev); 6509 } 6510 autorun_array(mddev); 6511 mddev_unlock(mddev); 6512 } 6513 /* on success, candidates will be empty, on error 6514 * it won't... 6515 */ 6516 rdev_for_each_list(rdev, tmp, &candidates) { 6517 list_del_init(&rdev->same_set); 6518 export_rdev(rdev); 6519 } 6520 mddev_put(mddev); 6521 } 6522 pr_info("md: ... autorun DONE.\n"); 6523 } 6524 #endif /* !MODULE */ 6525 6526 static int get_version(void __user *arg) 6527 { 6528 mdu_version_t ver; 6529 6530 ver.major = MD_MAJOR_VERSION; 6531 ver.minor = MD_MINOR_VERSION; 6532 ver.patchlevel = MD_PATCHLEVEL_VERSION; 6533 6534 if (copy_to_user(arg, &ver, sizeof(ver))) 6535 return -EFAULT; 6536 6537 return 0; 6538 } 6539 6540 static int get_array_info(struct mddev *mddev, void __user *arg) 6541 { 6542 mdu_array_info_t info; 6543 int nr,working,insync,failed,spare; 6544 struct md_rdev *rdev; 6545 6546 nr = working = insync = failed = spare = 0; 6547 rcu_read_lock(); 6548 rdev_for_each_rcu(rdev, mddev) { 6549 nr++; 6550 if (test_bit(Faulty, &rdev->flags)) 6551 failed++; 6552 else { 6553 working++; 6554 if (test_bit(In_sync, &rdev->flags)) 6555 insync++; 6556 else if (test_bit(Journal, &rdev->flags)) 6557 /* TODO: add journal count to md_u.h */ 6558 ; 6559 else 6560 spare++; 6561 } 6562 } 6563 rcu_read_unlock(); 6564 6565 info.major_version = mddev->major_version; 6566 info.minor_version = mddev->minor_version; 6567 info.patch_version = MD_PATCHLEVEL_VERSION; 6568 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); 6569 info.level = mddev->level; 6570 info.size = mddev->dev_sectors / 2; 6571 if (info.size != mddev->dev_sectors / 2) /* overflow */ 6572 info.size = -1; 6573 info.nr_disks = nr; 6574 info.raid_disks = mddev->raid_disks; 6575 info.md_minor = mddev->md_minor; 6576 info.not_persistent= !mddev->persistent; 6577 6578 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); 6579 info.state = 0; 6580 if (mddev->in_sync) 6581 info.state = (1<<MD_SB_CLEAN); 6582 if (mddev->bitmap && mddev->bitmap_info.offset) 6583 info.state |= (1<<MD_SB_BITMAP_PRESENT); 6584 if (mddev_is_clustered(mddev)) 6585 info.state |= (1<<MD_SB_CLUSTERED); 6586 info.active_disks = insync; 6587 info.working_disks = working; 6588 info.failed_disks = failed; 6589 info.spare_disks = spare; 6590 6591 info.layout = mddev->layout; 6592 info.chunk_size = mddev->chunk_sectors << 9; 6593 6594 if (copy_to_user(arg, &info, sizeof(info))) 6595 return -EFAULT; 6596 6597 return 0; 6598 } 6599 6600 static int get_bitmap_file(struct mddev *mddev, void __user * arg) 6601 { 6602 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 6603 char *ptr; 6604 int err; 6605 6606 file = kzalloc(sizeof(*file), GFP_NOIO); 6607 if (!file) 6608 return -ENOMEM; 6609 6610 err = 0; 6611 spin_lock(&mddev->lock); 6612 /* bitmap enabled */ 6613 if (mddev->bitmap_info.file) { 6614 ptr = file_path(mddev->bitmap_info.file, file->pathname, 6615 sizeof(file->pathname)); 6616 if (IS_ERR(ptr)) 6617 err = PTR_ERR(ptr); 6618 else 6619 memmove(file->pathname, ptr, 6620 sizeof(file->pathname)-(ptr-file->pathname)); 6621 } 6622 spin_unlock(&mddev->lock); 6623 6624 if (err == 0 && 6625 copy_to_user(arg, file, sizeof(*file))) 6626 err = -EFAULT; 6627 6628 kfree(file); 6629 return err; 6630 } 6631 6632 static int get_disk_info(struct mddev *mddev, void __user * arg) 6633 { 6634 mdu_disk_info_t info; 6635 struct md_rdev *rdev; 6636 6637 if (copy_from_user(&info, arg, sizeof(info))) 6638 return -EFAULT; 6639 6640 rcu_read_lock(); 6641 rdev = md_find_rdev_nr_rcu(mddev, info.number); 6642 if (rdev) { 6643 info.major = MAJOR(rdev->bdev->bd_dev); 6644 info.minor = MINOR(rdev->bdev->bd_dev); 6645 info.raid_disk = rdev->raid_disk; 6646 info.state = 0; 6647 if (test_bit(Faulty, &rdev->flags)) 6648 info.state |= (1<<MD_DISK_FAULTY); 6649 else if (test_bit(In_sync, &rdev->flags)) { 6650 info.state |= (1<<MD_DISK_ACTIVE); 6651 info.state |= (1<<MD_DISK_SYNC); 6652 } 6653 if (test_bit(Journal, &rdev->flags)) 6654 info.state |= (1<<MD_DISK_JOURNAL); 6655 if (test_bit(WriteMostly, &rdev->flags)) 6656 info.state |= (1<<MD_DISK_WRITEMOSTLY); 6657 if (test_bit(FailFast, &rdev->flags)) 6658 info.state |= (1<<MD_DISK_FAILFAST); 6659 } else { 6660 info.major = info.minor = 0; 6661 info.raid_disk = -1; 6662 info.state = (1<<MD_DISK_REMOVED); 6663 } 6664 rcu_read_unlock(); 6665 6666 if (copy_to_user(arg, &info, sizeof(info))) 6667 return -EFAULT; 6668 6669 return 0; 6670 } 6671 6672 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) 6673 { 6674 struct md_rdev *rdev; 6675 dev_t dev = MKDEV(info->major,info->minor); 6676 6677 if (mddev_is_clustered(mddev) && 6678 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) { 6679 pr_warn("%s: Cannot add to clustered mddev.\n", 6680 mdname(mddev)); 6681 return -EINVAL; 6682 } 6683 6684 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 6685 return -EOVERFLOW; 6686 6687 if (!mddev->raid_disks) { 6688 int err; 6689 /* expecting a device which has a superblock */ 6690 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 6691 if (IS_ERR(rdev)) { 6692 pr_warn("md: md_import_device returned %ld\n", 6693 PTR_ERR(rdev)); 6694 return PTR_ERR(rdev); 6695 } 6696 if (!list_empty(&mddev->disks)) { 6697 struct md_rdev *rdev0 6698 = list_entry(mddev->disks.next, 6699 struct md_rdev, same_set); 6700 err = super_types[mddev->major_version] 6701 .load_super(rdev, rdev0, mddev->minor_version); 6702 if (err < 0) { 6703 pr_warn("md: %pg has different UUID to %pg\n", 6704 rdev->bdev, 6705 rdev0->bdev); 6706 export_rdev(rdev); 6707 return -EINVAL; 6708 } 6709 } 6710 err = bind_rdev_to_array(rdev, mddev); 6711 if (err) 6712 export_rdev(rdev); 6713 return err; 6714 } 6715 6716 /* 6717 * md_add_new_disk can be used once the array is assembled 6718 * to add "hot spares". They must already have a superblock 6719 * written 6720 */ 6721 if (mddev->pers) { 6722 int err; 6723 if (!mddev->pers->hot_add_disk) { 6724 pr_warn("%s: personality does not support diskops!\n", 6725 mdname(mddev)); 6726 return -EINVAL; 6727 } 6728 if (mddev->persistent) 6729 rdev = md_import_device(dev, mddev->major_version, 6730 mddev->minor_version); 6731 else 6732 rdev = md_import_device(dev, -1, -1); 6733 if (IS_ERR(rdev)) { 6734 pr_warn("md: md_import_device returned %ld\n", 6735 PTR_ERR(rdev)); 6736 return PTR_ERR(rdev); 6737 } 6738 /* set saved_raid_disk if appropriate */ 6739 if (!mddev->persistent) { 6740 if (info->state & (1<<MD_DISK_SYNC) && 6741 info->raid_disk < mddev->raid_disks) { 6742 rdev->raid_disk = info->raid_disk; 6743 set_bit(In_sync, &rdev->flags); 6744 clear_bit(Bitmap_sync, &rdev->flags); 6745 } else 6746 rdev->raid_disk = -1; 6747 rdev->saved_raid_disk = rdev->raid_disk; 6748 } else 6749 super_types[mddev->major_version]. 6750 validate_super(mddev, rdev); 6751 if ((info->state & (1<<MD_DISK_SYNC)) && 6752 rdev->raid_disk != info->raid_disk) { 6753 /* This was a hot-add request, but events doesn't 6754 * match, so reject it. 6755 */ 6756 export_rdev(rdev); 6757 return -EINVAL; 6758 } 6759 6760 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 6761 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 6762 set_bit(WriteMostly, &rdev->flags); 6763 else 6764 clear_bit(WriteMostly, &rdev->flags); 6765 if (info->state & (1<<MD_DISK_FAILFAST)) 6766 set_bit(FailFast, &rdev->flags); 6767 else 6768 clear_bit(FailFast, &rdev->flags); 6769 6770 if (info->state & (1<<MD_DISK_JOURNAL)) { 6771 struct md_rdev *rdev2; 6772 bool has_journal = false; 6773 6774 /* make sure no existing journal disk */ 6775 rdev_for_each(rdev2, mddev) { 6776 if (test_bit(Journal, &rdev2->flags)) { 6777 has_journal = true; 6778 break; 6779 } 6780 } 6781 if (has_journal || mddev->bitmap) { 6782 export_rdev(rdev); 6783 return -EBUSY; 6784 } 6785 set_bit(Journal, &rdev->flags); 6786 } 6787 /* 6788 * check whether the device shows up in other nodes 6789 */ 6790 if (mddev_is_clustered(mddev)) { 6791 if (info->state & (1 << MD_DISK_CANDIDATE)) 6792 set_bit(Candidate, &rdev->flags); 6793 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) { 6794 /* --add initiated by this node */ 6795 err = md_cluster_ops->add_new_disk(mddev, rdev); 6796 if (err) { 6797 export_rdev(rdev); 6798 return err; 6799 } 6800 } 6801 } 6802 6803 rdev->raid_disk = -1; 6804 err = bind_rdev_to_array(rdev, mddev); 6805 6806 if (err) 6807 export_rdev(rdev); 6808 6809 if (mddev_is_clustered(mddev)) { 6810 if (info->state & (1 << MD_DISK_CANDIDATE)) { 6811 if (!err) { 6812 err = md_cluster_ops->new_disk_ack(mddev, 6813 err == 0); 6814 if (err) 6815 md_kick_rdev_from_array(rdev); 6816 } 6817 } else { 6818 if (err) 6819 md_cluster_ops->add_new_disk_cancel(mddev); 6820 else 6821 err = add_bound_rdev(rdev); 6822 } 6823 6824 } else if (!err) 6825 err = add_bound_rdev(rdev); 6826 6827 return err; 6828 } 6829 6830 /* otherwise, md_add_new_disk is only allowed 6831 * for major_version==0 superblocks 6832 */ 6833 if (mddev->major_version != 0) { 6834 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev)); 6835 return -EINVAL; 6836 } 6837 6838 if (!(info->state & (1<<MD_DISK_FAULTY))) { 6839 int err; 6840 rdev = md_import_device(dev, -1, 0); 6841 if (IS_ERR(rdev)) { 6842 pr_warn("md: error, md_import_device() returned %ld\n", 6843 PTR_ERR(rdev)); 6844 return PTR_ERR(rdev); 6845 } 6846 rdev->desc_nr = info->number; 6847 if (info->raid_disk < mddev->raid_disks) 6848 rdev->raid_disk = info->raid_disk; 6849 else 6850 rdev->raid_disk = -1; 6851 6852 if (rdev->raid_disk < mddev->raid_disks) 6853 if (info->state & (1<<MD_DISK_SYNC)) 6854 set_bit(In_sync, &rdev->flags); 6855 6856 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 6857 set_bit(WriteMostly, &rdev->flags); 6858 if (info->state & (1<<MD_DISK_FAILFAST)) 6859 set_bit(FailFast, &rdev->flags); 6860 6861 if (!mddev->persistent) { 6862 pr_debug("md: nonpersistent superblock ...\n"); 6863 rdev->sb_start = bdev_nr_sectors(rdev->bdev); 6864 } else 6865 rdev->sb_start = calc_dev_sboffset(rdev); 6866 rdev->sectors = rdev->sb_start; 6867 6868 err = bind_rdev_to_array(rdev, mddev); 6869 if (err) { 6870 export_rdev(rdev); 6871 return err; 6872 } 6873 } 6874 6875 return 0; 6876 } 6877 6878 static int hot_remove_disk(struct mddev *mddev, dev_t dev) 6879 { 6880 struct md_rdev *rdev; 6881 6882 if (!mddev->pers) 6883 return -ENODEV; 6884 6885 rdev = find_rdev(mddev, dev); 6886 if (!rdev) 6887 return -ENXIO; 6888 6889 if (rdev->raid_disk < 0) 6890 goto kick_rdev; 6891 6892 clear_bit(Blocked, &rdev->flags); 6893 remove_and_add_spares(mddev, rdev); 6894 6895 if (rdev->raid_disk >= 0) 6896 goto busy; 6897 6898 kick_rdev: 6899 if (mddev_is_clustered(mddev)) { 6900 if (md_cluster_ops->remove_disk(mddev, rdev)) 6901 goto busy; 6902 } 6903 6904 md_kick_rdev_from_array(rdev); 6905 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6906 if (mddev->thread) 6907 md_wakeup_thread(mddev->thread); 6908 else 6909 md_update_sb(mddev, 1); 6910 md_new_event(); 6911 6912 return 0; 6913 busy: 6914 pr_debug("md: cannot remove active disk %pg from %s ...\n", 6915 rdev->bdev, mdname(mddev)); 6916 return -EBUSY; 6917 } 6918 6919 static int hot_add_disk(struct mddev *mddev, dev_t dev) 6920 { 6921 int err; 6922 struct md_rdev *rdev; 6923 6924 if (!mddev->pers) 6925 return -ENODEV; 6926 6927 if (mddev->major_version != 0) { 6928 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n", 6929 mdname(mddev)); 6930 return -EINVAL; 6931 } 6932 if (!mddev->pers->hot_add_disk) { 6933 pr_warn("%s: personality does not support diskops!\n", 6934 mdname(mddev)); 6935 return -EINVAL; 6936 } 6937 6938 rdev = md_import_device(dev, -1, 0); 6939 if (IS_ERR(rdev)) { 6940 pr_warn("md: error, md_import_device() returned %ld\n", 6941 PTR_ERR(rdev)); 6942 return -EINVAL; 6943 } 6944 6945 if (mddev->persistent) 6946 rdev->sb_start = calc_dev_sboffset(rdev); 6947 else 6948 rdev->sb_start = bdev_nr_sectors(rdev->bdev); 6949 6950 rdev->sectors = rdev->sb_start; 6951 6952 if (test_bit(Faulty, &rdev->flags)) { 6953 pr_warn("md: can not hot-add faulty %pg disk to %s!\n", 6954 rdev->bdev, mdname(mddev)); 6955 err = -EINVAL; 6956 goto abort_export; 6957 } 6958 6959 clear_bit(In_sync, &rdev->flags); 6960 rdev->desc_nr = -1; 6961 rdev->saved_raid_disk = -1; 6962 err = bind_rdev_to_array(rdev, mddev); 6963 if (err) 6964 goto abort_export; 6965 6966 /* 6967 * The rest should better be atomic, we can have disk failures 6968 * noticed in interrupt contexts ... 6969 */ 6970 6971 rdev->raid_disk = -1; 6972 6973 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6974 if (!mddev->thread) 6975 md_update_sb(mddev, 1); 6976 /* 6977 * If the new disk does not support REQ_NOWAIT, 6978 * disable on the whole MD. 6979 */ 6980 if (!bdev_nowait(rdev->bdev)) { 6981 pr_info("%s: Disabling nowait because %pg does not support nowait\n", 6982 mdname(mddev), rdev->bdev); 6983 blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue); 6984 } 6985 /* 6986 * Kick recovery, maybe this spare has to be added to the 6987 * array immediately. 6988 */ 6989 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6990 md_wakeup_thread(mddev->thread); 6991 md_new_event(); 6992 return 0; 6993 6994 abort_export: 6995 export_rdev(rdev); 6996 return err; 6997 } 6998 6999 static int set_bitmap_file(struct mddev *mddev, int fd) 7000 { 7001 int err = 0; 7002 7003 if (mddev->pers) { 7004 if (!mddev->pers->quiesce || !mddev->thread) 7005 return -EBUSY; 7006 if (mddev->recovery || mddev->sync_thread) 7007 return -EBUSY; 7008 /* we should be able to change the bitmap.. */ 7009 } 7010 7011 if (fd >= 0) { 7012 struct inode *inode; 7013 struct file *f; 7014 7015 if (mddev->bitmap || mddev->bitmap_info.file) 7016 return -EEXIST; /* cannot add when bitmap is present */ 7017 f = fget(fd); 7018 7019 if (f == NULL) { 7020 pr_warn("%s: error: failed to get bitmap file\n", 7021 mdname(mddev)); 7022 return -EBADF; 7023 } 7024 7025 inode = f->f_mapping->host; 7026 if (!S_ISREG(inode->i_mode)) { 7027 pr_warn("%s: error: bitmap file must be a regular file\n", 7028 mdname(mddev)); 7029 err = -EBADF; 7030 } else if (!(f->f_mode & FMODE_WRITE)) { 7031 pr_warn("%s: error: bitmap file must open for write\n", 7032 mdname(mddev)); 7033 err = -EBADF; 7034 } else if (atomic_read(&inode->i_writecount) != 1) { 7035 pr_warn("%s: error: bitmap file is already in use\n", 7036 mdname(mddev)); 7037 err = -EBUSY; 7038 } 7039 if (err) { 7040 fput(f); 7041 return err; 7042 } 7043 mddev->bitmap_info.file = f; 7044 mddev->bitmap_info.offset = 0; /* file overrides offset */ 7045 } else if (mddev->bitmap == NULL) 7046 return -ENOENT; /* cannot remove what isn't there */ 7047 err = 0; 7048 if (mddev->pers) { 7049 if (fd >= 0) { 7050 struct bitmap *bitmap; 7051 7052 bitmap = md_bitmap_create(mddev, -1); 7053 mddev_suspend(mddev); 7054 if (!IS_ERR(bitmap)) { 7055 mddev->bitmap = bitmap; 7056 err = md_bitmap_load(mddev); 7057 } else 7058 err = PTR_ERR(bitmap); 7059 if (err) { 7060 md_bitmap_destroy(mddev); 7061 fd = -1; 7062 } 7063 mddev_resume(mddev); 7064 } else if (fd < 0) { 7065 mddev_suspend(mddev); 7066 md_bitmap_destroy(mddev); 7067 mddev_resume(mddev); 7068 } 7069 } 7070 if (fd < 0) { 7071 struct file *f = mddev->bitmap_info.file; 7072 if (f) { 7073 spin_lock(&mddev->lock); 7074 mddev->bitmap_info.file = NULL; 7075 spin_unlock(&mddev->lock); 7076 fput(f); 7077 } 7078 } 7079 7080 return err; 7081 } 7082 7083 /* 7084 * md_set_array_info is used two different ways 7085 * The original usage is when creating a new array. 7086 * In this usage, raid_disks is > 0 and it together with 7087 * level, size, not_persistent,layout,chunksize determine the 7088 * shape of the array. 7089 * This will always create an array with a type-0.90.0 superblock. 7090 * The newer usage is when assembling an array. 7091 * In this case raid_disks will be 0, and the major_version field is 7092 * use to determine which style super-blocks are to be found on the devices. 7093 * The minor and patch _version numbers are also kept incase the 7094 * super_block handler wishes to interpret them. 7095 */ 7096 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info) 7097 { 7098 if (info->raid_disks == 0) { 7099 /* just setting version number for superblock loading */ 7100 if (info->major_version < 0 || 7101 info->major_version >= ARRAY_SIZE(super_types) || 7102 super_types[info->major_version].name == NULL) { 7103 /* maybe try to auto-load a module? */ 7104 pr_warn("md: superblock version %d not known\n", 7105 info->major_version); 7106 return -EINVAL; 7107 } 7108 mddev->major_version = info->major_version; 7109 mddev->minor_version = info->minor_version; 7110 mddev->patch_version = info->patch_version; 7111 mddev->persistent = !info->not_persistent; 7112 /* ensure mddev_put doesn't delete this now that there 7113 * is some minimal configuration. 7114 */ 7115 mddev->ctime = ktime_get_real_seconds(); 7116 return 0; 7117 } 7118 mddev->major_version = MD_MAJOR_VERSION; 7119 mddev->minor_version = MD_MINOR_VERSION; 7120 mddev->patch_version = MD_PATCHLEVEL_VERSION; 7121 mddev->ctime = ktime_get_real_seconds(); 7122 7123 mddev->level = info->level; 7124 mddev->clevel[0] = 0; 7125 mddev->dev_sectors = 2 * (sector_t)info->size; 7126 mddev->raid_disks = info->raid_disks; 7127 /* don't set md_minor, it is determined by which /dev/md* was 7128 * openned 7129 */ 7130 if (info->state & (1<<MD_SB_CLEAN)) 7131 mddev->recovery_cp = MaxSector; 7132 else 7133 mddev->recovery_cp = 0; 7134 mddev->persistent = ! info->not_persistent; 7135 mddev->external = 0; 7136 7137 mddev->layout = info->layout; 7138 if (mddev->level == 0) 7139 /* Cannot trust RAID0 layout info here */ 7140 mddev->layout = -1; 7141 mddev->chunk_sectors = info->chunk_size >> 9; 7142 7143 if (mddev->persistent) { 7144 mddev->max_disks = MD_SB_DISKS; 7145 mddev->flags = 0; 7146 mddev->sb_flags = 0; 7147 } 7148 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 7149 7150 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 7151 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 7152 mddev->bitmap_info.offset = 0; 7153 7154 mddev->reshape_position = MaxSector; 7155 7156 /* 7157 * Generate a 128 bit UUID 7158 */ 7159 get_random_bytes(mddev->uuid, 16); 7160 7161 mddev->new_level = mddev->level; 7162 mddev->new_chunk_sectors = mddev->chunk_sectors; 7163 mddev->new_layout = mddev->layout; 7164 mddev->delta_disks = 0; 7165 mddev->reshape_backwards = 0; 7166 7167 return 0; 7168 } 7169 7170 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) 7171 { 7172 lockdep_assert_held(&mddev->reconfig_mutex); 7173 7174 if (mddev->external_size) 7175 return; 7176 7177 mddev->array_sectors = array_sectors; 7178 } 7179 EXPORT_SYMBOL(md_set_array_sectors); 7180 7181 static int update_size(struct mddev *mddev, sector_t num_sectors) 7182 { 7183 struct md_rdev *rdev; 7184 int rv; 7185 int fit = (num_sectors == 0); 7186 sector_t old_dev_sectors = mddev->dev_sectors; 7187 7188 if (mddev->pers->resize == NULL) 7189 return -EINVAL; 7190 /* The "num_sectors" is the number of sectors of each device that 7191 * is used. This can only make sense for arrays with redundancy. 7192 * linear and raid0 always use whatever space is available. We can only 7193 * consider changing this number if no resync or reconstruction is 7194 * happening, and if the new size is acceptable. It must fit before the 7195 * sb_start or, if that is <data_offset, it must fit before the size 7196 * of each device. If num_sectors is zero, we find the largest size 7197 * that fits. 7198 */ 7199 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 7200 mddev->sync_thread) 7201 return -EBUSY; 7202 if (!md_is_rdwr(mddev)) 7203 return -EROFS; 7204 7205 rdev_for_each(rdev, mddev) { 7206 sector_t avail = rdev->sectors; 7207 7208 if (fit && (num_sectors == 0 || num_sectors > avail)) 7209 num_sectors = avail; 7210 if (avail < num_sectors) 7211 return -ENOSPC; 7212 } 7213 rv = mddev->pers->resize(mddev, num_sectors); 7214 if (!rv) { 7215 if (mddev_is_clustered(mddev)) 7216 md_cluster_ops->update_size(mddev, old_dev_sectors); 7217 else if (mddev->queue) { 7218 set_capacity_and_notify(mddev->gendisk, 7219 mddev->array_sectors); 7220 } 7221 } 7222 return rv; 7223 } 7224 7225 static int update_raid_disks(struct mddev *mddev, int raid_disks) 7226 { 7227 int rv; 7228 struct md_rdev *rdev; 7229 /* change the number of raid disks */ 7230 if (mddev->pers->check_reshape == NULL) 7231 return -EINVAL; 7232 if (!md_is_rdwr(mddev)) 7233 return -EROFS; 7234 if (raid_disks <= 0 || 7235 (mddev->max_disks && raid_disks >= mddev->max_disks)) 7236 return -EINVAL; 7237 if (mddev->sync_thread || 7238 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 7239 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) || 7240 mddev->reshape_position != MaxSector) 7241 return -EBUSY; 7242 7243 rdev_for_each(rdev, mddev) { 7244 if (mddev->raid_disks < raid_disks && 7245 rdev->data_offset < rdev->new_data_offset) 7246 return -EINVAL; 7247 if (mddev->raid_disks > raid_disks && 7248 rdev->data_offset > rdev->new_data_offset) 7249 return -EINVAL; 7250 } 7251 7252 mddev->delta_disks = raid_disks - mddev->raid_disks; 7253 if (mddev->delta_disks < 0) 7254 mddev->reshape_backwards = 1; 7255 else if (mddev->delta_disks > 0) 7256 mddev->reshape_backwards = 0; 7257 7258 rv = mddev->pers->check_reshape(mddev); 7259 if (rv < 0) { 7260 mddev->delta_disks = 0; 7261 mddev->reshape_backwards = 0; 7262 } 7263 return rv; 7264 } 7265 7266 /* 7267 * update_array_info is used to change the configuration of an 7268 * on-line array. 7269 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 7270 * fields in the info are checked against the array. 7271 * Any differences that cannot be handled will cause an error. 7272 * Normally, only one change can be managed at a time. 7273 */ 7274 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) 7275 { 7276 int rv = 0; 7277 int cnt = 0; 7278 int state = 0; 7279 7280 /* calculate expected state,ignoring low bits */ 7281 if (mddev->bitmap && mddev->bitmap_info.offset) 7282 state |= (1 << MD_SB_BITMAP_PRESENT); 7283 7284 if (mddev->major_version != info->major_version || 7285 mddev->minor_version != info->minor_version || 7286 /* mddev->patch_version != info->patch_version || */ 7287 mddev->ctime != info->ctime || 7288 mddev->level != info->level || 7289 /* mddev->layout != info->layout || */ 7290 mddev->persistent != !info->not_persistent || 7291 mddev->chunk_sectors != info->chunk_size >> 9 || 7292 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 7293 ((state^info->state) & 0xfffffe00) 7294 ) 7295 return -EINVAL; 7296 /* Check there is only one change */ 7297 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 7298 cnt++; 7299 if (mddev->raid_disks != info->raid_disks) 7300 cnt++; 7301 if (mddev->layout != info->layout) 7302 cnt++; 7303 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 7304 cnt++; 7305 if (cnt == 0) 7306 return 0; 7307 if (cnt > 1) 7308 return -EINVAL; 7309 7310 if (mddev->layout != info->layout) { 7311 /* Change layout 7312 * we don't need to do anything at the md level, the 7313 * personality will take care of it all. 7314 */ 7315 if (mddev->pers->check_reshape == NULL) 7316 return -EINVAL; 7317 else { 7318 mddev->new_layout = info->layout; 7319 rv = mddev->pers->check_reshape(mddev); 7320 if (rv) 7321 mddev->new_layout = mddev->layout; 7322 return rv; 7323 } 7324 } 7325 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 7326 rv = update_size(mddev, (sector_t)info->size * 2); 7327 7328 if (mddev->raid_disks != info->raid_disks) 7329 rv = update_raid_disks(mddev, info->raid_disks); 7330 7331 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 7332 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { 7333 rv = -EINVAL; 7334 goto err; 7335 } 7336 if (mddev->recovery || mddev->sync_thread) { 7337 rv = -EBUSY; 7338 goto err; 7339 } 7340 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 7341 struct bitmap *bitmap; 7342 /* add the bitmap */ 7343 if (mddev->bitmap) { 7344 rv = -EEXIST; 7345 goto err; 7346 } 7347 if (mddev->bitmap_info.default_offset == 0) { 7348 rv = -EINVAL; 7349 goto err; 7350 } 7351 mddev->bitmap_info.offset = 7352 mddev->bitmap_info.default_offset; 7353 mddev->bitmap_info.space = 7354 mddev->bitmap_info.default_space; 7355 bitmap = md_bitmap_create(mddev, -1); 7356 mddev_suspend(mddev); 7357 if (!IS_ERR(bitmap)) { 7358 mddev->bitmap = bitmap; 7359 rv = md_bitmap_load(mddev); 7360 } else 7361 rv = PTR_ERR(bitmap); 7362 if (rv) 7363 md_bitmap_destroy(mddev); 7364 mddev_resume(mddev); 7365 } else { 7366 /* remove the bitmap */ 7367 if (!mddev->bitmap) { 7368 rv = -ENOENT; 7369 goto err; 7370 } 7371 if (mddev->bitmap->storage.file) { 7372 rv = -EINVAL; 7373 goto err; 7374 } 7375 if (mddev->bitmap_info.nodes) { 7376 /* hold PW on all the bitmap lock */ 7377 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) { 7378 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n"); 7379 rv = -EPERM; 7380 md_cluster_ops->unlock_all_bitmaps(mddev); 7381 goto err; 7382 } 7383 7384 mddev->bitmap_info.nodes = 0; 7385 md_cluster_ops->leave(mddev); 7386 module_put(md_cluster_mod); 7387 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; 7388 } 7389 mddev_suspend(mddev); 7390 md_bitmap_destroy(mddev); 7391 mddev_resume(mddev); 7392 mddev->bitmap_info.offset = 0; 7393 } 7394 } 7395 md_update_sb(mddev, 1); 7396 return rv; 7397 err: 7398 return rv; 7399 } 7400 7401 static int set_disk_faulty(struct mddev *mddev, dev_t dev) 7402 { 7403 struct md_rdev *rdev; 7404 int err = 0; 7405 7406 if (mddev->pers == NULL) 7407 return -ENODEV; 7408 7409 rcu_read_lock(); 7410 rdev = md_find_rdev_rcu(mddev, dev); 7411 if (!rdev) 7412 err = -ENODEV; 7413 else { 7414 md_error(mddev, rdev); 7415 if (test_bit(MD_BROKEN, &mddev->flags)) 7416 err = -EBUSY; 7417 } 7418 rcu_read_unlock(); 7419 return err; 7420 } 7421 7422 /* 7423 * We have a problem here : there is no easy way to give a CHS 7424 * virtual geometry. We currently pretend that we have a 2 heads 7425 * 4 sectors (with a BIG number of cylinders...). This drives 7426 * dosfs just mad... ;-) 7427 */ 7428 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 7429 { 7430 struct mddev *mddev = bdev->bd_disk->private_data; 7431 7432 geo->heads = 2; 7433 geo->sectors = 4; 7434 geo->cylinders = mddev->array_sectors / 8; 7435 return 0; 7436 } 7437 7438 static inline bool md_ioctl_valid(unsigned int cmd) 7439 { 7440 switch (cmd) { 7441 case ADD_NEW_DISK: 7442 case GET_ARRAY_INFO: 7443 case GET_BITMAP_FILE: 7444 case GET_DISK_INFO: 7445 case HOT_ADD_DISK: 7446 case HOT_REMOVE_DISK: 7447 case RAID_VERSION: 7448 case RESTART_ARRAY_RW: 7449 case RUN_ARRAY: 7450 case SET_ARRAY_INFO: 7451 case SET_BITMAP_FILE: 7452 case SET_DISK_FAULTY: 7453 case STOP_ARRAY: 7454 case STOP_ARRAY_RO: 7455 case CLUSTERED_DISK_NACK: 7456 return true; 7457 default: 7458 return false; 7459 } 7460 } 7461 7462 static int __md_set_array_info(struct mddev *mddev, void __user *argp) 7463 { 7464 mdu_array_info_t info; 7465 int err; 7466 7467 if (!argp) 7468 memset(&info, 0, sizeof(info)); 7469 else if (copy_from_user(&info, argp, sizeof(info))) 7470 return -EFAULT; 7471 7472 if (mddev->pers) { 7473 err = update_array_info(mddev, &info); 7474 if (err) 7475 pr_warn("md: couldn't update array info. %d\n", err); 7476 return err; 7477 } 7478 7479 if (!list_empty(&mddev->disks)) { 7480 pr_warn("md: array %s already has disks!\n", mdname(mddev)); 7481 return -EBUSY; 7482 } 7483 7484 if (mddev->raid_disks) { 7485 pr_warn("md: array %s already initialised!\n", mdname(mddev)); 7486 return -EBUSY; 7487 } 7488 7489 err = md_set_array_info(mddev, &info); 7490 if (err) 7491 pr_warn("md: couldn't set array info. %d\n", err); 7492 7493 return err; 7494 } 7495 7496 static int md_ioctl(struct block_device *bdev, fmode_t mode, 7497 unsigned int cmd, unsigned long arg) 7498 { 7499 int err = 0; 7500 void __user *argp = (void __user *)arg; 7501 struct mddev *mddev = NULL; 7502 bool did_set_md_closing = false; 7503 7504 if (!md_ioctl_valid(cmd)) 7505 return -ENOTTY; 7506 7507 switch (cmd) { 7508 case RAID_VERSION: 7509 case GET_ARRAY_INFO: 7510 case GET_DISK_INFO: 7511 break; 7512 default: 7513 if (!capable(CAP_SYS_ADMIN)) 7514 return -EACCES; 7515 } 7516 7517 /* 7518 * Commands dealing with the RAID driver but not any 7519 * particular array: 7520 */ 7521 switch (cmd) { 7522 case RAID_VERSION: 7523 err = get_version(argp); 7524 goto out; 7525 default:; 7526 } 7527 7528 /* 7529 * Commands creating/starting a new array: 7530 */ 7531 7532 mddev = bdev->bd_disk->private_data; 7533 7534 if (!mddev) { 7535 BUG(); 7536 goto out; 7537 } 7538 7539 /* Some actions do not requires the mutex */ 7540 switch (cmd) { 7541 case GET_ARRAY_INFO: 7542 if (!mddev->raid_disks && !mddev->external) 7543 err = -ENODEV; 7544 else 7545 err = get_array_info(mddev, argp); 7546 goto out; 7547 7548 case GET_DISK_INFO: 7549 if (!mddev->raid_disks && !mddev->external) 7550 err = -ENODEV; 7551 else 7552 err = get_disk_info(mddev, argp); 7553 goto out; 7554 7555 case SET_DISK_FAULTY: 7556 err = set_disk_faulty(mddev, new_decode_dev(arg)); 7557 goto out; 7558 7559 case GET_BITMAP_FILE: 7560 err = get_bitmap_file(mddev, argp); 7561 goto out; 7562 7563 } 7564 7565 if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK) 7566 flush_rdev_wq(mddev); 7567 7568 if (cmd == HOT_REMOVE_DISK) 7569 /* need to ensure recovery thread has run */ 7570 wait_event_interruptible_timeout(mddev->sb_wait, 7571 !test_bit(MD_RECOVERY_NEEDED, 7572 &mddev->recovery), 7573 msecs_to_jiffies(5000)); 7574 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { 7575 /* Need to flush page cache, and ensure no-one else opens 7576 * and writes 7577 */ 7578 mutex_lock(&mddev->open_mutex); 7579 if (mddev->pers && atomic_read(&mddev->openers) > 1) { 7580 mutex_unlock(&mddev->open_mutex); 7581 err = -EBUSY; 7582 goto out; 7583 } 7584 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) { 7585 mutex_unlock(&mddev->open_mutex); 7586 err = -EBUSY; 7587 goto out; 7588 } 7589 did_set_md_closing = true; 7590 mutex_unlock(&mddev->open_mutex); 7591 sync_blockdev(bdev); 7592 } 7593 err = mddev_lock(mddev); 7594 if (err) { 7595 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n", 7596 err, cmd); 7597 goto out; 7598 } 7599 7600 if (cmd == SET_ARRAY_INFO) { 7601 err = __md_set_array_info(mddev, argp); 7602 goto unlock; 7603 } 7604 7605 /* 7606 * Commands querying/configuring an existing array: 7607 */ 7608 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 7609 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 7610 if ((!mddev->raid_disks && !mddev->external) 7611 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 7612 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 7613 && cmd != GET_BITMAP_FILE) { 7614 err = -ENODEV; 7615 goto unlock; 7616 } 7617 7618 /* 7619 * Commands even a read-only array can execute: 7620 */ 7621 switch (cmd) { 7622 case RESTART_ARRAY_RW: 7623 err = restart_array(mddev); 7624 goto unlock; 7625 7626 case STOP_ARRAY: 7627 err = do_md_stop(mddev, 0, bdev); 7628 goto unlock; 7629 7630 case STOP_ARRAY_RO: 7631 err = md_set_readonly(mddev, bdev); 7632 goto unlock; 7633 7634 case HOT_REMOVE_DISK: 7635 err = hot_remove_disk(mddev, new_decode_dev(arg)); 7636 goto unlock; 7637 7638 case ADD_NEW_DISK: 7639 /* We can support ADD_NEW_DISK on read-only arrays 7640 * only if we are re-adding a preexisting device. 7641 * So require mddev->pers and MD_DISK_SYNC. 7642 */ 7643 if (mddev->pers) { 7644 mdu_disk_info_t info; 7645 if (copy_from_user(&info, argp, sizeof(info))) 7646 err = -EFAULT; 7647 else if (!(info.state & (1<<MD_DISK_SYNC))) 7648 /* Need to clear read-only for this */ 7649 break; 7650 else 7651 err = md_add_new_disk(mddev, &info); 7652 goto unlock; 7653 } 7654 break; 7655 } 7656 7657 /* 7658 * The remaining ioctls are changing the state of the 7659 * superblock, so we do not allow them on read-only arrays. 7660 */ 7661 if (!md_is_rdwr(mddev) && mddev->pers) { 7662 if (mddev->ro != MD_AUTO_READ) { 7663 err = -EROFS; 7664 goto unlock; 7665 } 7666 mddev->ro = MD_RDWR; 7667 sysfs_notify_dirent_safe(mddev->sysfs_state); 7668 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7669 /* mddev_unlock will wake thread */ 7670 /* If a device failed while we were read-only, we 7671 * need to make sure the metadata is updated now. 7672 */ 7673 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { 7674 mddev_unlock(mddev); 7675 wait_event(mddev->sb_wait, 7676 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && 7677 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 7678 mddev_lock_nointr(mddev); 7679 } 7680 } 7681 7682 switch (cmd) { 7683 case ADD_NEW_DISK: 7684 { 7685 mdu_disk_info_t info; 7686 if (copy_from_user(&info, argp, sizeof(info))) 7687 err = -EFAULT; 7688 else 7689 err = md_add_new_disk(mddev, &info); 7690 goto unlock; 7691 } 7692 7693 case CLUSTERED_DISK_NACK: 7694 if (mddev_is_clustered(mddev)) 7695 md_cluster_ops->new_disk_ack(mddev, false); 7696 else 7697 err = -EINVAL; 7698 goto unlock; 7699 7700 case HOT_ADD_DISK: 7701 err = hot_add_disk(mddev, new_decode_dev(arg)); 7702 goto unlock; 7703 7704 case RUN_ARRAY: 7705 err = do_md_run(mddev); 7706 goto unlock; 7707 7708 case SET_BITMAP_FILE: 7709 err = set_bitmap_file(mddev, (int)arg); 7710 goto unlock; 7711 7712 default: 7713 err = -EINVAL; 7714 goto unlock; 7715 } 7716 7717 unlock: 7718 if (mddev->hold_active == UNTIL_IOCTL && 7719 err != -EINVAL) 7720 mddev->hold_active = 0; 7721 mddev_unlock(mddev); 7722 out: 7723 if(did_set_md_closing) 7724 clear_bit(MD_CLOSING, &mddev->flags); 7725 return err; 7726 } 7727 #ifdef CONFIG_COMPAT 7728 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, 7729 unsigned int cmd, unsigned long arg) 7730 { 7731 switch (cmd) { 7732 case HOT_REMOVE_DISK: 7733 case HOT_ADD_DISK: 7734 case SET_DISK_FAULTY: 7735 case SET_BITMAP_FILE: 7736 /* These take in integer arg, do not convert */ 7737 break; 7738 default: 7739 arg = (unsigned long)compat_ptr(arg); 7740 break; 7741 } 7742 7743 return md_ioctl(bdev, mode, cmd, arg); 7744 } 7745 #endif /* CONFIG_COMPAT */ 7746 7747 static int md_set_read_only(struct block_device *bdev, bool ro) 7748 { 7749 struct mddev *mddev = bdev->bd_disk->private_data; 7750 int err; 7751 7752 err = mddev_lock(mddev); 7753 if (err) 7754 return err; 7755 7756 if (!mddev->raid_disks && !mddev->external) { 7757 err = -ENODEV; 7758 goto out_unlock; 7759 } 7760 7761 /* 7762 * Transitioning to read-auto need only happen for arrays that call 7763 * md_write_start and which are not ready for writes yet. 7764 */ 7765 if (!ro && mddev->ro == MD_RDONLY && mddev->pers) { 7766 err = restart_array(mddev); 7767 if (err) 7768 goto out_unlock; 7769 mddev->ro = MD_AUTO_READ; 7770 } 7771 7772 out_unlock: 7773 mddev_unlock(mddev); 7774 return err; 7775 } 7776 7777 static int md_open(struct block_device *bdev, fmode_t mode) 7778 { 7779 struct mddev *mddev; 7780 int err; 7781 7782 spin_lock(&all_mddevs_lock); 7783 mddev = mddev_get(bdev->bd_disk->private_data); 7784 spin_unlock(&all_mddevs_lock); 7785 if (!mddev) 7786 return -ENODEV; 7787 7788 err = mutex_lock_interruptible(&mddev->open_mutex); 7789 if (err) 7790 goto out; 7791 7792 err = -ENODEV; 7793 if (test_bit(MD_CLOSING, &mddev->flags)) 7794 goto out_unlock; 7795 7796 atomic_inc(&mddev->openers); 7797 mutex_unlock(&mddev->open_mutex); 7798 7799 bdev_check_media_change(bdev); 7800 return 0; 7801 7802 out_unlock: 7803 mutex_unlock(&mddev->open_mutex); 7804 out: 7805 mddev_put(mddev); 7806 return err; 7807 } 7808 7809 static void md_release(struct gendisk *disk, fmode_t mode) 7810 { 7811 struct mddev *mddev = disk->private_data; 7812 7813 BUG_ON(!mddev); 7814 atomic_dec(&mddev->openers); 7815 mddev_put(mddev); 7816 } 7817 7818 static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing) 7819 { 7820 struct mddev *mddev = disk->private_data; 7821 unsigned int ret = 0; 7822 7823 if (mddev->changed) 7824 ret = DISK_EVENT_MEDIA_CHANGE; 7825 mddev->changed = 0; 7826 return ret; 7827 } 7828 7829 static void md_free_disk(struct gendisk *disk) 7830 { 7831 struct mddev *mddev = disk->private_data; 7832 7833 percpu_ref_exit(&mddev->writes_pending); 7834 bioset_exit(&mddev->bio_set); 7835 bioset_exit(&mddev->sync_set); 7836 7837 mddev_free(mddev); 7838 } 7839 7840 const struct block_device_operations md_fops = 7841 { 7842 .owner = THIS_MODULE, 7843 .submit_bio = md_submit_bio, 7844 .open = md_open, 7845 .release = md_release, 7846 .ioctl = md_ioctl, 7847 #ifdef CONFIG_COMPAT 7848 .compat_ioctl = md_compat_ioctl, 7849 #endif 7850 .getgeo = md_getgeo, 7851 .check_events = md_check_events, 7852 .set_read_only = md_set_read_only, 7853 .free_disk = md_free_disk, 7854 }; 7855 7856 static int md_thread(void *arg) 7857 { 7858 struct md_thread *thread = arg; 7859 7860 /* 7861 * md_thread is a 'system-thread', it's priority should be very 7862 * high. We avoid resource deadlocks individually in each 7863 * raid personality. (RAID5 does preallocation) We also use RR and 7864 * the very same RT priority as kswapd, thus we will never get 7865 * into a priority inversion deadlock. 7866 * 7867 * we definitely have to have equal or higher priority than 7868 * bdflush, otherwise bdflush will deadlock if there are too 7869 * many dirty RAID5 blocks. 7870 */ 7871 7872 allow_signal(SIGKILL); 7873 while (!kthread_should_stop()) { 7874 7875 /* We need to wait INTERRUPTIBLE so that 7876 * we don't add to the load-average. 7877 * That means we need to be sure no signals are 7878 * pending 7879 */ 7880 if (signal_pending(current)) 7881 flush_signals(current); 7882 7883 wait_event_interruptible_timeout 7884 (thread->wqueue, 7885 test_bit(THREAD_WAKEUP, &thread->flags) 7886 || kthread_should_stop() || kthread_should_park(), 7887 thread->timeout); 7888 7889 clear_bit(THREAD_WAKEUP, &thread->flags); 7890 if (kthread_should_park()) 7891 kthread_parkme(); 7892 if (!kthread_should_stop()) 7893 thread->run(thread); 7894 } 7895 7896 return 0; 7897 } 7898 7899 void md_wakeup_thread(struct md_thread *thread) 7900 { 7901 if (thread) { 7902 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); 7903 set_bit(THREAD_WAKEUP, &thread->flags); 7904 wake_up(&thread->wqueue); 7905 } 7906 } 7907 EXPORT_SYMBOL(md_wakeup_thread); 7908 7909 struct md_thread *md_register_thread(void (*run) (struct md_thread *), 7910 struct mddev *mddev, const char *name) 7911 { 7912 struct md_thread *thread; 7913 7914 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL); 7915 if (!thread) 7916 return NULL; 7917 7918 init_waitqueue_head(&thread->wqueue); 7919 7920 thread->run = run; 7921 thread->mddev = mddev; 7922 thread->timeout = MAX_SCHEDULE_TIMEOUT; 7923 thread->tsk = kthread_run(md_thread, thread, 7924 "%s_%s", 7925 mdname(thread->mddev), 7926 name); 7927 if (IS_ERR(thread->tsk)) { 7928 kfree(thread); 7929 return NULL; 7930 } 7931 return thread; 7932 } 7933 EXPORT_SYMBOL(md_register_thread); 7934 7935 void md_unregister_thread(struct md_thread **threadp) 7936 { 7937 struct md_thread *thread; 7938 7939 /* 7940 * Locking ensures that mddev_unlock does not wake_up a 7941 * non-existent thread 7942 */ 7943 spin_lock(&pers_lock); 7944 thread = *threadp; 7945 if (!thread) { 7946 spin_unlock(&pers_lock); 7947 return; 7948 } 7949 *threadp = NULL; 7950 spin_unlock(&pers_lock); 7951 7952 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 7953 kthread_stop(thread->tsk); 7954 kfree(thread); 7955 } 7956 EXPORT_SYMBOL(md_unregister_thread); 7957 7958 void md_error(struct mddev *mddev, struct md_rdev *rdev) 7959 { 7960 if (!rdev || test_bit(Faulty, &rdev->flags)) 7961 return; 7962 7963 if (!mddev->pers || !mddev->pers->error_handler) 7964 return; 7965 mddev->pers->error_handler(mddev, rdev); 7966 7967 if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags)) 7968 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7969 sysfs_notify_dirent_safe(rdev->sysfs_state); 7970 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7971 if (!test_bit(MD_BROKEN, &mddev->flags)) { 7972 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7973 md_wakeup_thread(mddev->thread); 7974 } 7975 if (mddev->event_work.func) 7976 queue_work(md_misc_wq, &mddev->event_work); 7977 md_new_event(); 7978 } 7979 EXPORT_SYMBOL(md_error); 7980 7981 /* seq_file implementation /proc/mdstat */ 7982 7983 static void status_unused(struct seq_file *seq) 7984 { 7985 int i = 0; 7986 struct md_rdev *rdev; 7987 7988 seq_printf(seq, "unused devices: "); 7989 7990 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 7991 i++; 7992 seq_printf(seq, "%pg ", rdev->bdev); 7993 } 7994 if (!i) 7995 seq_printf(seq, "<none>"); 7996 7997 seq_printf(seq, "\n"); 7998 } 7999 8000 static int status_resync(struct seq_file *seq, struct mddev *mddev) 8001 { 8002 sector_t max_sectors, resync, res; 8003 unsigned long dt, db = 0; 8004 sector_t rt, curr_mark_cnt, resync_mark_cnt; 8005 int scale, recovery_active; 8006 unsigned int per_milli; 8007 8008 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 8009 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 8010 max_sectors = mddev->resync_max_sectors; 8011 else 8012 max_sectors = mddev->dev_sectors; 8013 8014 resync = mddev->curr_resync; 8015 if (resync < MD_RESYNC_ACTIVE) { 8016 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 8017 /* Still cleaning up */ 8018 resync = max_sectors; 8019 } else if (resync > max_sectors) { 8020 resync = max_sectors; 8021 } else { 8022 resync -= atomic_read(&mddev->recovery_active); 8023 if (resync < MD_RESYNC_ACTIVE) { 8024 /* 8025 * Resync has started, but the subtraction has 8026 * yielded one of the special values. Force it 8027 * to active to ensure the status reports an 8028 * active resync. 8029 */ 8030 resync = MD_RESYNC_ACTIVE; 8031 } 8032 } 8033 8034 if (resync == MD_RESYNC_NONE) { 8035 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { 8036 struct md_rdev *rdev; 8037 8038 rdev_for_each(rdev, mddev) 8039 if (rdev->raid_disk >= 0 && 8040 !test_bit(Faulty, &rdev->flags) && 8041 rdev->recovery_offset != MaxSector && 8042 rdev->recovery_offset) { 8043 seq_printf(seq, "\trecover=REMOTE"); 8044 return 1; 8045 } 8046 if (mddev->reshape_position != MaxSector) 8047 seq_printf(seq, "\treshape=REMOTE"); 8048 else 8049 seq_printf(seq, "\tresync=REMOTE"); 8050 return 1; 8051 } 8052 if (mddev->recovery_cp < MaxSector) { 8053 seq_printf(seq, "\tresync=PENDING"); 8054 return 1; 8055 } 8056 return 0; 8057 } 8058 if (resync < MD_RESYNC_ACTIVE) { 8059 seq_printf(seq, "\tresync=DELAYED"); 8060 return 1; 8061 } 8062 8063 WARN_ON(max_sectors == 0); 8064 /* Pick 'scale' such that (resync>>scale)*1000 will fit 8065 * in a sector_t, and (max_sectors>>scale) will fit in a 8066 * u32, as those are the requirements for sector_div. 8067 * Thus 'scale' must be at least 10 8068 */ 8069 scale = 10; 8070 if (sizeof(sector_t) > sizeof(unsigned long)) { 8071 while ( max_sectors/2 > (1ULL<<(scale+32))) 8072 scale++; 8073 } 8074 res = (resync>>scale)*1000; 8075 sector_div(res, (u32)((max_sectors>>scale)+1)); 8076 8077 per_milli = res; 8078 { 8079 int i, x = per_milli/50, y = 20-x; 8080 seq_printf(seq, "["); 8081 for (i = 0; i < x; i++) 8082 seq_printf(seq, "="); 8083 seq_printf(seq, ">"); 8084 for (i = 0; i < y; i++) 8085 seq_printf(seq, "."); 8086 seq_printf(seq, "] "); 8087 } 8088 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 8089 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 8090 "reshape" : 8091 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 8092 "check" : 8093 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 8094 "resync" : "recovery"))), 8095 per_milli/10, per_milli % 10, 8096 (unsigned long long) resync/2, 8097 (unsigned long long) max_sectors/2); 8098 8099 /* 8100 * dt: time from mark until now 8101 * db: blocks written from mark until now 8102 * rt: remaining time 8103 * 8104 * rt is a sector_t, which is always 64bit now. We are keeping 8105 * the original algorithm, but it is not really necessary. 8106 * 8107 * Original algorithm: 8108 * So we divide before multiply in case it is 32bit and close 8109 * to the limit. 8110 * We scale the divisor (db) by 32 to avoid losing precision 8111 * near the end of resync when the number of remaining sectors 8112 * is close to 'db'. 8113 * We then divide rt by 32 after multiplying by db to compensate. 8114 * The '+1' avoids division by zero if db is very small. 8115 */ 8116 dt = ((jiffies - mddev->resync_mark) / HZ); 8117 if (!dt) dt++; 8118 8119 curr_mark_cnt = mddev->curr_mark_cnt; 8120 recovery_active = atomic_read(&mddev->recovery_active); 8121 resync_mark_cnt = mddev->resync_mark_cnt; 8122 8123 if (curr_mark_cnt >= (recovery_active + resync_mark_cnt)) 8124 db = curr_mark_cnt - (recovery_active + resync_mark_cnt); 8125 8126 rt = max_sectors - resync; /* number of remaining sectors */ 8127 rt = div64_u64(rt, db/32+1); 8128 rt *= dt; 8129 rt >>= 5; 8130 8131 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 8132 ((unsigned long)rt % 60)/6); 8133 8134 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 8135 return 1; 8136 } 8137 8138 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 8139 { 8140 struct list_head *tmp; 8141 loff_t l = *pos; 8142 struct mddev *mddev; 8143 8144 if (l == 0x10000) { 8145 ++*pos; 8146 return (void *)2; 8147 } 8148 if (l > 0x10000) 8149 return NULL; 8150 if (!l--) 8151 /* header */ 8152 return (void*)1; 8153 8154 spin_lock(&all_mddevs_lock); 8155 list_for_each(tmp,&all_mddevs) 8156 if (!l--) { 8157 mddev = list_entry(tmp, struct mddev, all_mddevs); 8158 if (!mddev_get(mddev)) 8159 continue; 8160 spin_unlock(&all_mddevs_lock); 8161 return mddev; 8162 } 8163 spin_unlock(&all_mddevs_lock); 8164 if (!l--) 8165 return (void*)2;/* tail */ 8166 return NULL; 8167 } 8168 8169 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 8170 { 8171 struct list_head *tmp; 8172 struct mddev *next_mddev, *mddev = v; 8173 struct mddev *to_put = NULL; 8174 8175 ++*pos; 8176 if (v == (void*)2) 8177 return NULL; 8178 8179 spin_lock(&all_mddevs_lock); 8180 if (v == (void*)1) { 8181 tmp = all_mddevs.next; 8182 } else { 8183 to_put = mddev; 8184 tmp = mddev->all_mddevs.next; 8185 } 8186 8187 for (;;) { 8188 if (tmp == &all_mddevs) { 8189 next_mddev = (void*)2; 8190 *pos = 0x10000; 8191 break; 8192 } 8193 next_mddev = list_entry(tmp, struct mddev, all_mddevs); 8194 if (mddev_get(next_mddev)) 8195 break; 8196 mddev = next_mddev; 8197 tmp = mddev->all_mddevs.next; 8198 } 8199 spin_unlock(&all_mddevs_lock); 8200 8201 if (to_put) 8202 mddev_put(mddev); 8203 return next_mddev; 8204 8205 } 8206 8207 static void md_seq_stop(struct seq_file *seq, void *v) 8208 { 8209 struct mddev *mddev = v; 8210 8211 if (mddev && v != (void*)1 && v != (void*)2) 8212 mddev_put(mddev); 8213 } 8214 8215 static int md_seq_show(struct seq_file *seq, void *v) 8216 { 8217 struct mddev *mddev = v; 8218 sector_t sectors; 8219 struct md_rdev *rdev; 8220 8221 if (v == (void*)1) { 8222 struct md_personality *pers; 8223 seq_printf(seq, "Personalities : "); 8224 spin_lock(&pers_lock); 8225 list_for_each_entry(pers, &pers_list, list) 8226 seq_printf(seq, "[%s] ", pers->name); 8227 8228 spin_unlock(&pers_lock); 8229 seq_printf(seq, "\n"); 8230 seq->poll_event = atomic_read(&md_event_count); 8231 return 0; 8232 } 8233 if (v == (void*)2) { 8234 status_unused(seq); 8235 return 0; 8236 } 8237 8238 spin_lock(&mddev->lock); 8239 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 8240 seq_printf(seq, "%s : %sactive", mdname(mddev), 8241 mddev->pers ? "" : "in"); 8242 if (mddev->pers) { 8243 if (mddev->ro == MD_RDONLY) 8244 seq_printf(seq, " (read-only)"); 8245 if (mddev->ro == MD_AUTO_READ) 8246 seq_printf(seq, " (auto-read-only)"); 8247 seq_printf(seq, " %s", mddev->pers->name); 8248 } 8249 8250 sectors = 0; 8251 rcu_read_lock(); 8252 rdev_for_each_rcu(rdev, mddev) { 8253 seq_printf(seq, " %pg[%d]", rdev->bdev, rdev->desc_nr); 8254 8255 if (test_bit(WriteMostly, &rdev->flags)) 8256 seq_printf(seq, "(W)"); 8257 if (test_bit(Journal, &rdev->flags)) 8258 seq_printf(seq, "(J)"); 8259 if (test_bit(Faulty, &rdev->flags)) { 8260 seq_printf(seq, "(F)"); 8261 continue; 8262 } 8263 if (rdev->raid_disk < 0) 8264 seq_printf(seq, "(S)"); /* spare */ 8265 if (test_bit(Replacement, &rdev->flags)) 8266 seq_printf(seq, "(R)"); 8267 sectors += rdev->sectors; 8268 } 8269 rcu_read_unlock(); 8270 8271 if (!list_empty(&mddev->disks)) { 8272 if (mddev->pers) 8273 seq_printf(seq, "\n %llu blocks", 8274 (unsigned long long) 8275 mddev->array_sectors / 2); 8276 else 8277 seq_printf(seq, "\n %llu blocks", 8278 (unsigned long long)sectors / 2); 8279 } 8280 if (mddev->persistent) { 8281 if (mddev->major_version != 0 || 8282 mddev->minor_version != 90) { 8283 seq_printf(seq," super %d.%d", 8284 mddev->major_version, 8285 mddev->minor_version); 8286 } 8287 } else if (mddev->external) 8288 seq_printf(seq, " super external:%s", 8289 mddev->metadata_type); 8290 else 8291 seq_printf(seq, " super non-persistent"); 8292 8293 if (mddev->pers) { 8294 mddev->pers->status(seq, mddev); 8295 seq_printf(seq, "\n "); 8296 if (mddev->pers->sync_request) { 8297 if (status_resync(seq, mddev)) 8298 seq_printf(seq, "\n "); 8299 } 8300 } else 8301 seq_printf(seq, "\n "); 8302 8303 md_bitmap_status(seq, mddev->bitmap); 8304 8305 seq_printf(seq, "\n"); 8306 } 8307 spin_unlock(&mddev->lock); 8308 8309 return 0; 8310 } 8311 8312 static const struct seq_operations md_seq_ops = { 8313 .start = md_seq_start, 8314 .next = md_seq_next, 8315 .stop = md_seq_stop, 8316 .show = md_seq_show, 8317 }; 8318 8319 static int md_seq_open(struct inode *inode, struct file *file) 8320 { 8321 struct seq_file *seq; 8322 int error; 8323 8324 error = seq_open(file, &md_seq_ops); 8325 if (error) 8326 return error; 8327 8328 seq = file->private_data; 8329 seq->poll_event = atomic_read(&md_event_count); 8330 return error; 8331 } 8332 8333 static int md_unloading; 8334 static __poll_t mdstat_poll(struct file *filp, poll_table *wait) 8335 { 8336 struct seq_file *seq = filp->private_data; 8337 __poll_t mask; 8338 8339 if (md_unloading) 8340 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; 8341 poll_wait(filp, &md_event_waiters, wait); 8342 8343 /* always allow read */ 8344 mask = EPOLLIN | EPOLLRDNORM; 8345 8346 if (seq->poll_event != atomic_read(&md_event_count)) 8347 mask |= EPOLLERR | EPOLLPRI; 8348 return mask; 8349 } 8350 8351 static const struct proc_ops mdstat_proc_ops = { 8352 .proc_open = md_seq_open, 8353 .proc_read = seq_read, 8354 .proc_lseek = seq_lseek, 8355 .proc_release = seq_release, 8356 .proc_poll = mdstat_poll, 8357 }; 8358 8359 int register_md_personality(struct md_personality *p) 8360 { 8361 pr_debug("md: %s personality registered for level %d\n", 8362 p->name, p->level); 8363 spin_lock(&pers_lock); 8364 list_add_tail(&p->list, &pers_list); 8365 spin_unlock(&pers_lock); 8366 return 0; 8367 } 8368 EXPORT_SYMBOL(register_md_personality); 8369 8370 int unregister_md_personality(struct md_personality *p) 8371 { 8372 pr_debug("md: %s personality unregistered\n", p->name); 8373 spin_lock(&pers_lock); 8374 list_del_init(&p->list); 8375 spin_unlock(&pers_lock); 8376 return 0; 8377 } 8378 EXPORT_SYMBOL(unregister_md_personality); 8379 8380 int register_md_cluster_operations(struct md_cluster_operations *ops, 8381 struct module *module) 8382 { 8383 int ret = 0; 8384 spin_lock(&pers_lock); 8385 if (md_cluster_ops != NULL) 8386 ret = -EALREADY; 8387 else { 8388 md_cluster_ops = ops; 8389 md_cluster_mod = module; 8390 } 8391 spin_unlock(&pers_lock); 8392 return ret; 8393 } 8394 EXPORT_SYMBOL(register_md_cluster_operations); 8395 8396 int unregister_md_cluster_operations(void) 8397 { 8398 spin_lock(&pers_lock); 8399 md_cluster_ops = NULL; 8400 spin_unlock(&pers_lock); 8401 return 0; 8402 } 8403 EXPORT_SYMBOL(unregister_md_cluster_operations); 8404 8405 int md_setup_cluster(struct mddev *mddev, int nodes) 8406 { 8407 int ret; 8408 if (!md_cluster_ops) 8409 request_module("md-cluster"); 8410 spin_lock(&pers_lock); 8411 /* ensure module won't be unloaded */ 8412 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) { 8413 pr_warn("can't find md-cluster module or get its reference.\n"); 8414 spin_unlock(&pers_lock); 8415 return -ENOENT; 8416 } 8417 spin_unlock(&pers_lock); 8418 8419 ret = md_cluster_ops->join(mddev, nodes); 8420 if (!ret) 8421 mddev->safemode_delay = 0; 8422 return ret; 8423 } 8424 8425 void md_cluster_stop(struct mddev *mddev) 8426 { 8427 if (!md_cluster_ops) 8428 return; 8429 md_cluster_ops->leave(mddev); 8430 module_put(md_cluster_mod); 8431 } 8432 8433 static int is_mddev_idle(struct mddev *mddev, int init) 8434 { 8435 struct md_rdev *rdev; 8436 int idle; 8437 int curr_events; 8438 8439 idle = 1; 8440 rcu_read_lock(); 8441 rdev_for_each_rcu(rdev, mddev) { 8442 struct gendisk *disk = rdev->bdev->bd_disk; 8443 curr_events = (int)part_stat_read_accum(disk->part0, sectors) - 8444 atomic_read(&disk->sync_io); 8445 /* sync IO will cause sync_io to increase before the disk_stats 8446 * as sync_io is counted when a request starts, and 8447 * disk_stats is counted when it completes. 8448 * So resync activity will cause curr_events to be smaller than 8449 * when there was no such activity. 8450 * non-sync IO will cause disk_stat to increase without 8451 * increasing sync_io so curr_events will (eventually) 8452 * be larger than it was before. Once it becomes 8453 * substantially larger, the test below will cause 8454 * the array to appear non-idle, and resync will slow 8455 * down. 8456 * If there is a lot of outstanding resync activity when 8457 * we set last_event to curr_events, then all that activity 8458 * completing might cause the array to appear non-idle 8459 * and resync will be slowed down even though there might 8460 * not have been non-resync activity. This will only 8461 * happen once though. 'last_events' will soon reflect 8462 * the state where there is little or no outstanding 8463 * resync requests, and further resync activity will 8464 * always make curr_events less than last_events. 8465 * 8466 */ 8467 if (init || curr_events - rdev->last_events > 64) { 8468 rdev->last_events = curr_events; 8469 idle = 0; 8470 } 8471 } 8472 rcu_read_unlock(); 8473 return idle; 8474 } 8475 8476 void md_done_sync(struct mddev *mddev, int blocks, int ok) 8477 { 8478 /* another "blocks" (512byte) blocks have been synced */ 8479 atomic_sub(blocks, &mddev->recovery_active); 8480 wake_up(&mddev->recovery_wait); 8481 if (!ok) { 8482 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8483 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); 8484 md_wakeup_thread(mddev->thread); 8485 // stop recovery, signal do_sync .... 8486 } 8487 } 8488 EXPORT_SYMBOL(md_done_sync); 8489 8490 /* md_write_start(mddev, bi) 8491 * If we need to update some array metadata (e.g. 'active' flag 8492 * in superblock) before writing, schedule a superblock update 8493 * and wait for it to complete. 8494 * A return value of 'false' means that the write wasn't recorded 8495 * and cannot proceed as the array is being suspend. 8496 */ 8497 bool md_write_start(struct mddev *mddev, struct bio *bi) 8498 { 8499 int did_change = 0; 8500 8501 if (bio_data_dir(bi) != WRITE) 8502 return true; 8503 8504 BUG_ON(mddev->ro == MD_RDONLY); 8505 if (mddev->ro == MD_AUTO_READ) { 8506 /* need to switch to read/write */ 8507 mddev->ro = MD_RDWR; 8508 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8509 md_wakeup_thread(mddev->thread); 8510 md_wakeup_thread(mddev->sync_thread); 8511 did_change = 1; 8512 } 8513 rcu_read_lock(); 8514 percpu_ref_get(&mddev->writes_pending); 8515 smp_mb(); /* Match smp_mb in set_in_sync() */ 8516 if (mddev->safemode == 1) 8517 mddev->safemode = 0; 8518 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */ 8519 if (mddev->in_sync || mddev->sync_checkers) { 8520 spin_lock(&mddev->lock); 8521 if (mddev->in_sync) { 8522 mddev->in_sync = 0; 8523 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8524 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 8525 md_wakeup_thread(mddev->thread); 8526 did_change = 1; 8527 } 8528 spin_unlock(&mddev->lock); 8529 } 8530 rcu_read_unlock(); 8531 if (did_change) 8532 sysfs_notify_dirent_safe(mddev->sysfs_state); 8533 if (!mddev->has_superblocks) 8534 return true; 8535 wait_event(mddev->sb_wait, 8536 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || 8537 mddev->suspended); 8538 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 8539 percpu_ref_put(&mddev->writes_pending); 8540 return false; 8541 } 8542 return true; 8543 } 8544 EXPORT_SYMBOL(md_write_start); 8545 8546 /* md_write_inc can only be called when md_write_start() has 8547 * already been called at least once of the current request. 8548 * It increments the counter and is useful when a single request 8549 * is split into several parts. Each part causes an increment and 8550 * so needs a matching md_write_end(). 8551 * Unlike md_write_start(), it is safe to call md_write_inc() inside 8552 * a spinlocked region. 8553 */ 8554 void md_write_inc(struct mddev *mddev, struct bio *bi) 8555 { 8556 if (bio_data_dir(bi) != WRITE) 8557 return; 8558 WARN_ON_ONCE(mddev->in_sync || !md_is_rdwr(mddev)); 8559 percpu_ref_get(&mddev->writes_pending); 8560 } 8561 EXPORT_SYMBOL(md_write_inc); 8562 8563 void md_write_end(struct mddev *mddev) 8564 { 8565 percpu_ref_put(&mddev->writes_pending); 8566 8567 if (mddev->safemode == 2) 8568 md_wakeup_thread(mddev->thread); 8569 else if (mddev->safemode_delay) 8570 /* The roundup() ensures this only performs locking once 8571 * every ->safemode_delay jiffies 8572 */ 8573 mod_timer(&mddev->safemode_timer, 8574 roundup(jiffies, mddev->safemode_delay) + 8575 mddev->safemode_delay); 8576 } 8577 8578 EXPORT_SYMBOL(md_write_end); 8579 8580 /* This is used by raid0 and raid10 */ 8581 void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, 8582 struct bio *bio, sector_t start, sector_t size) 8583 { 8584 struct bio *discard_bio = NULL; 8585 8586 if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO, 8587 &discard_bio) || !discard_bio) 8588 return; 8589 8590 bio_chain(discard_bio, bio); 8591 bio_clone_blkg_association(discard_bio, bio); 8592 if (mddev->gendisk) 8593 trace_block_bio_remap(discard_bio, 8594 disk_devt(mddev->gendisk), 8595 bio->bi_iter.bi_sector); 8596 submit_bio_noacct(discard_bio); 8597 } 8598 EXPORT_SYMBOL_GPL(md_submit_discard_bio); 8599 8600 int acct_bioset_init(struct mddev *mddev) 8601 { 8602 int err = 0; 8603 8604 if (!bioset_initialized(&mddev->io_acct_set)) 8605 err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE, 8606 offsetof(struct md_io_acct, bio_clone), 0); 8607 return err; 8608 } 8609 EXPORT_SYMBOL_GPL(acct_bioset_init); 8610 8611 void acct_bioset_exit(struct mddev *mddev) 8612 { 8613 bioset_exit(&mddev->io_acct_set); 8614 } 8615 EXPORT_SYMBOL_GPL(acct_bioset_exit); 8616 8617 static void md_end_io_acct(struct bio *bio) 8618 { 8619 struct md_io_acct *md_io_acct = bio->bi_private; 8620 struct bio *orig_bio = md_io_acct->orig_bio; 8621 8622 orig_bio->bi_status = bio->bi_status; 8623 8624 bio_end_io_acct(orig_bio, md_io_acct->start_time); 8625 bio_put(bio); 8626 bio_endio(orig_bio); 8627 } 8628 8629 /* 8630 * Used by personalities that don't already clone the bio and thus can't 8631 * easily add the timestamp to their extended bio structure. 8632 */ 8633 void md_account_bio(struct mddev *mddev, struct bio **bio) 8634 { 8635 struct block_device *bdev = (*bio)->bi_bdev; 8636 struct md_io_acct *md_io_acct; 8637 struct bio *clone; 8638 8639 if (!blk_queue_io_stat(bdev->bd_disk->queue)) 8640 return; 8641 8642 clone = bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_acct_set); 8643 md_io_acct = container_of(clone, struct md_io_acct, bio_clone); 8644 md_io_acct->orig_bio = *bio; 8645 md_io_acct->start_time = bio_start_io_acct(*bio); 8646 8647 clone->bi_end_io = md_end_io_acct; 8648 clone->bi_private = md_io_acct; 8649 *bio = clone; 8650 } 8651 EXPORT_SYMBOL_GPL(md_account_bio); 8652 8653 /* md_allow_write(mddev) 8654 * Calling this ensures that the array is marked 'active' so that writes 8655 * may proceed without blocking. It is important to call this before 8656 * attempting a GFP_KERNEL allocation while holding the mddev lock. 8657 * Must be called with mddev_lock held. 8658 */ 8659 void md_allow_write(struct mddev *mddev) 8660 { 8661 if (!mddev->pers) 8662 return; 8663 if (!md_is_rdwr(mddev)) 8664 return; 8665 if (!mddev->pers->sync_request) 8666 return; 8667 8668 spin_lock(&mddev->lock); 8669 if (mddev->in_sync) { 8670 mddev->in_sync = 0; 8671 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8672 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 8673 if (mddev->safemode_delay && 8674 mddev->safemode == 0) 8675 mddev->safemode = 1; 8676 spin_unlock(&mddev->lock); 8677 md_update_sb(mddev, 0); 8678 sysfs_notify_dirent_safe(mddev->sysfs_state); 8679 /* wait for the dirty state to be recorded in the metadata */ 8680 wait_event(mddev->sb_wait, 8681 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 8682 } else 8683 spin_unlock(&mddev->lock); 8684 } 8685 EXPORT_SYMBOL_GPL(md_allow_write); 8686 8687 #define SYNC_MARKS 10 8688 #define SYNC_MARK_STEP (3*HZ) 8689 #define UPDATE_FREQUENCY (5*60*HZ) 8690 void md_do_sync(struct md_thread *thread) 8691 { 8692 struct mddev *mddev = thread->mddev; 8693 struct mddev *mddev2; 8694 unsigned int currspeed = 0, window; 8695 sector_t max_sectors,j, io_sectors, recovery_done; 8696 unsigned long mark[SYNC_MARKS]; 8697 unsigned long update_time; 8698 sector_t mark_cnt[SYNC_MARKS]; 8699 int last_mark,m; 8700 sector_t last_check; 8701 int skipped = 0; 8702 struct md_rdev *rdev; 8703 char *desc, *action = NULL; 8704 struct blk_plug plug; 8705 int ret; 8706 8707 /* just incase thread restarts... */ 8708 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 8709 test_bit(MD_RECOVERY_WAIT, &mddev->recovery)) 8710 return; 8711 if (!md_is_rdwr(mddev)) {/* never try to sync a read-only array */ 8712 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8713 return; 8714 } 8715 8716 if (mddev_is_clustered(mddev)) { 8717 ret = md_cluster_ops->resync_start(mddev); 8718 if (ret) 8719 goto skip; 8720 8721 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags); 8722 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 8723 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || 8724 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 8725 && ((unsigned long long)mddev->curr_resync_completed 8726 < (unsigned long long)mddev->resync_max_sectors)) 8727 goto skip; 8728 } 8729 8730 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8731 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 8732 desc = "data-check"; 8733 action = "check"; 8734 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 8735 desc = "requested-resync"; 8736 action = "repair"; 8737 } else 8738 desc = "resync"; 8739 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 8740 desc = "reshape"; 8741 else 8742 desc = "recovery"; 8743 8744 mddev->last_sync_action = action ?: desc; 8745 8746 /* 8747 * Before starting a resync we must have set curr_resync to 8748 * 2, and then checked that every "conflicting" array has curr_resync 8749 * less than ours. When we find one that is the same or higher 8750 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 8751 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 8752 * This will mean we have to start checking from the beginning again. 8753 * 8754 */ 8755 8756 do { 8757 int mddev2_minor = -1; 8758 mddev->curr_resync = MD_RESYNC_DELAYED; 8759 8760 try_again: 8761 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8762 goto skip; 8763 spin_lock(&all_mddevs_lock); 8764 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) { 8765 if (test_bit(MD_DELETED, &mddev2->flags)) 8766 continue; 8767 if (mddev2 == mddev) 8768 continue; 8769 if (!mddev->parallel_resync 8770 && mddev2->curr_resync 8771 && match_mddev_units(mddev, mddev2)) { 8772 DEFINE_WAIT(wq); 8773 if (mddev < mddev2 && 8774 mddev->curr_resync == MD_RESYNC_DELAYED) { 8775 /* arbitrarily yield */ 8776 mddev->curr_resync = MD_RESYNC_YIELDED; 8777 wake_up(&resync_wait); 8778 } 8779 if (mddev > mddev2 && 8780 mddev->curr_resync == MD_RESYNC_YIELDED) 8781 /* no need to wait here, we can wait the next 8782 * time 'round when curr_resync == 2 8783 */ 8784 continue; 8785 /* We need to wait 'interruptible' so as not to 8786 * contribute to the load average, and not to 8787 * be caught by 'softlockup' 8788 */ 8789 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 8790 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8791 mddev2->curr_resync >= mddev->curr_resync) { 8792 if (mddev2_minor != mddev2->md_minor) { 8793 mddev2_minor = mddev2->md_minor; 8794 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n", 8795 desc, mdname(mddev), 8796 mdname(mddev2)); 8797 } 8798 spin_unlock(&all_mddevs_lock); 8799 8800 if (signal_pending(current)) 8801 flush_signals(current); 8802 schedule(); 8803 finish_wait(&resync_wait, &wq); 8804 goto try_again; 8805 } 8806 finish_wait(&resync_wait, &wq); 8807 } 8808 } 8809 spin_unlock(&all_mddevs_lock); 8810 } while (mddev->curr_resync < MD_RESYNC_DELAYED); 8811 8812 j = 0; 8813 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8814 /* resync follows the size requested by the personality, 8815 * which defaults to physical size, but can be virtual size 8816 */ 8817 max_sectors = mddev->resync_max_sectors; 8818 atomic64_set(&mddev->resync_mismatches, 0); 8819 /* we don't use the checkpoint if there's a bitmap */ 8820 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 8821 j = mddev->resync_min; 8822 else if (!mddev->bitmap) 8823 j = mddev->recovery_cp; 8824 8825 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 8826 max_sectors = mddev->resync_max_sectors; 8827 /* 8828 * If the original node aborts reshaping then we continue the 8829 * reshaping, so set j again to avoid restart reshape from the 8830 * first beginning 8831 */ 8832 if (mddev_is_clustered(mddev) && 8833 mddev->reshape_position != MaxSector) 8834 j = mddev->reshape_position; 8835 } else { 8836 /* recovery follows the physical size of devices */ 8837 max_sectors = mddev->dev_sectors; 8838 j = MaxSector; 8839 rcu_read_lock(); 8840 rdev_for_each_rcu(rdev, mddev) 8841 if (rdev->raid_disk >= 0 && 8842 !test_bit(Journal, &rdev->flags) && 8843 !test_bit(Faulty, &rdev->flags) && 8844 !test_bit(In_sync, &rdev->flags) && 8845 rdev->recovery_offset < j) 8846 j = rdev->recovery_offset; 8847 rcu_read_unlock(); 8848 8849 /* If there is a bitmap, we need to make sure all 8850 * writes that started before we added a spare 8851 * complete before we start doing a recovery. 8852 * Otherwise the write might complete and (via 8853 * bitmap_endwrite) set a bit in the bitmap after the 8854 * recovery has checked that bit and skipped that 8855 * region. 8856 */ 8857 if (mddev->bitmap) { 8858 mddev->pers->quiesce(mddev, 1); 8859 mddev->pers->quiesce(mddev, 0); 8860 } 8861 } 8862 8863 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); 8864 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); 8865 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n", 8866 speed_max(mddev), desc); 8867 8868 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 8869 8870 io_sectors = 0; 8871 for (m = 0; m < SYNC_MARKS; m++) { 8872 mark[m] = jiffies; 8873 mark_cnt[m] = io_sectors; 8874 } 8875 last_mark = 0; 8876 mddev->resync_mark = mark[last_mark]; 8877 mddev->resync_mark_cnt = mark_cnt[last_mark]; 8878 8879 /* 8880 * Tune reconstruction: 8881 */ 8882 window = 32 * (PAGE_SIZE / 512); 8883 pr_debug("md: using %dk window, over a total of %lluk.\n", 8884 window/2, (unsigned long long)max_sectors/2); 8885 8886 atomic_set(&mddev->recovery_active, 0); 8887 last_check = 0; 8888 8889 if (j>2) { 8890 pr_debug("md: resuming %s of %s from checkpoint.\n", 8891 desc, mdname(mddev)); 8892 mddev->curr_resync = j; 8893 } else 8894 mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */ 8895 mddev->curr_resync_completed = j; 8896 sysfs_notify_dirent_safe(mddev->sysfs_completed); 8897 md_new_event(); 8898 update_time = jiffies; 8899 8900 blk_start_plug(&plug); 8901 while (j < max_sectors) { 8902 sector_t sectors; 8903 8904 skipped = 0; 8905 8906 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8907 ((mddev->curr_resync > mddev->curr_resync_completed && 8908 (mddev->curr_resync - mddev->curr_resync_completed) 8909 > (max_sectors >> 4)) || 8910 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) || 8911 (j - mddev->curr_resync_completed)*2 8912 >= mddev->resync_max - mddev->curr_resync_completed || 8913 mddev->curr_resync_completed > mddev->resync_max 8914 )) { 8915 /* time to update curr_resync_completed */ 8916 wait_event(mddev->recovery_wait, 8917 atomic_read(&mddev->recovery_active) == 0); 8918 mddev->curr_resync_completed = j; 8919 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 8920 j > mddev->recovery_cp) 8921 mddev->recovery_cp = j; 8922 update_time = jiffies; 8923 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8924 sysfs_notify_dirent_safe(mddev->sysfs_completed); 8925 } 8926 8927 while (j >= mddev->resync_max && 8928 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8929 /* As this condition is controlled by user-space, 8930 * we can block indefinitely, so use '_interruptible' 8931 * to avoid triggering warnings. 8932 */ 8933 flush_signals(current); /* just in case */ 8934 wait_event_interruptible(mddev->recovery_wait, 8935 mddev->resync_max > j 8936 || test_bit(MD_RECOVERY_INTR, 8937 &mddev->recovery)); 8938 } 8939 8940 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8941 break; 8942 8943 sectors = mddev->pers->sync_request(mddev, j, &skipped); 8944 if (sectors == 0) { 8945 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8946 break; 8947 } 8948 8949 if (!skipped) { /* actual IO requested */ 8950 io_sectors += sectors; 8951 atomic_add(sectors, &mddev->recovery_active); 8952 } 8953 8954 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8955 break; 8956 8957 j += sectors; 8958 if (j > max_sectors) 8959 /* when skipping, extra large numbers can be returned. */ 8960 j = max_sectors; 8961 if (j > 2) 8962 mddev->curr_resync = j; 8963 mddev->curr_mark_cnt = io_sectors; 8964 if (last_check == 0) 8965 /* this is the earliest that rebuild will be 8966 * visible in /proc/mdstat 8967 */ 8968 md_new_event(); 8969 8970 if (last_check + window > io_sectors || j == max_sectors) 8971 continue; 8972 8973 last_check = io_sectors; 8974 repeat: 8975 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 8976 /* step marks */ 8977 int next = (last_mark+1) % SYNC_MARKS; 8978 8979 mddev->resync_mark = mark[next]; 8980 mddev->resync_mark_cnt = mark_cnt[next]; 8981 mark[next] = jiffies; 8982 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 8983 last_mark = next; 8984 } 8985 8986 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8987 break; 8988 8989 /* 8990 * this loop exits only if either when we are slower than 8991 * the 'hard' speed limit, or the system was IO-idle for 8992 * a jiffy. 8993 * the system might be non-idle CPU-wise, but we only care 8994 * about not overloading the IO subsystem. (things like an 8995 * e2fsck being done on the RAID array should execute fast) 8996 */ 8997 cond_resched(); 8998 8999 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); 9000 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 9001 /((jiffies-mddev->resync_mark)/HZ +1) +1; 9002 9003 if (currspeed > speed_min(mddev)) { 9004 if (currspeed > speed_max(mddev)) { 9005 msleep(500); 9006 goto repeat; 9007 } 9008 if (!is_mddev_idle(mddev, 0)) { 9009 /* 9010 * Give other IO more of a chance. 9011 * The faster the devices, the less we wait. 9012 */ 9013 wait_event(mddev->recovery_wait, 9014 !atomic_read(&mddev->recovery_active)); 9015 } 9016 } 9017 } 9018 pr_info("md: %s: %s %s.\n",mdname(mddev), desc, 9019 test_bit(MD_RECOVERY_INTR, &mddev->recovery) 9020 ? "interrupted" : "done"); 9021 /* 9022 * this also signals 'finished resyncing' to md_stop 9023 */ 9024 blk_finish_plug(&plug); 9025 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 9026 9027 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9028 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 9029 mddev->curr_resync >= MD_RESYNC_ACTIVE) { 9030 mddev->curr_resync_completed = mddev->curr_resync; 9031 sysfs_notify_dirent_safe(mddev->sysfs_completed); 9032 } 9033 mddev->pers->sync_request(mddev, max_sectors, &skipped); 9034 9035 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 9036 mddev->curr_resync >= MD_RESYNC_ACTIVE) { 9037 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 9038 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 9039 if (mddev->curr_resync >= mddev->recovery_cp) { 9040 pr_debug("md: checkpointing %s of %s.\n", 9041 desc, mdname(mddev)); 9042 if (test_bit(MD_RECOVERY_ERROR, 9043 &mddev->recovery)) 9044 mddev->recovery_cp = 9045 mddev->curr_resync_completed; 9046 else 9047 mddev->recovery_cp = 9048 mddev->curr_resync; 9049 } 9050 } else 9051 mddev->recovery_cp = MaxSector; 9052 } else { 9053 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 9054 mddev->curr_resync = MaxSector; 9055 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9056 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { 9057 rcu_read_lock(); 9058 rdev_for_each_rcu(rdev, mddev) 9059 if (rdev->raid_disk >= 0 && 9060 mddev->delta_disks >= 0 && 9061 !test_bit(Journal, &rdev->flags) && 9062 !test_bit(Faulty, &rdev->flags) && 9063 !test_bit(In_sync, &rdev->flags) && 9064 rdev->recovery_offset < mddev->curr_resync) 9065 rdev->recovery_offset = mddev->curr_resync; 9066 rcu_read_unlock(); 9067 } 9068 } 9069 } 9070 skip: 9071 /* set CHANGE_PENDING here since maybe another update is needed, 9072 * so other nodes are informed. It should be harmless for normal 9073 * raid */ 9074 set_mask_bits(&mddev->sb_flags, 0, 9075 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); 9076 9077 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9078 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 9079 mddev->delta_disks > 0 && 9080 mddev->pers->finish_reshape && 9081 mddev->pers->size && 9082 mddev->queue) { 9083 mddev_lock_nointr(mddev); 9084 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); 9085 mddev_unlock(mddev); 9086 if (!mddev_is_clustered(mddev)) 9087 set_capacity_and_notify(mddev->gendisk, 9088 mddev->array_sectors); 9089 } 9090 9091 spin_lock(&mddev->lock); 9092 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 9093 /* We completed so min/max setting can be forgotten if used. */ 9094 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 9095 mddev->resync_min = 0; 9096 mddev->resync_max = MaxSector; 9097 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 9098 mddev->resync_min = mddev->curr_resync_completed; 9099 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 9100 mddev->curr_resync = MD_RESYNC_NONE; 9101 spin_unlock(&mddev->lock); 9102 9103 wake_up(&resync_wait); 9104 md_wakeup_thread(mddev->thread); 9105 return; 9106 } 9107 EXPORT_SYMBOL_GPL(md_do_sync); 9108 9109 static int remove_and_add_spares(struct mddev *mddev, 9110 struct md_rdev *this) 9111 { 9112 struct md_rdev *rdev; 9113 int spares = 0; 9114 int removed = 0; 9115 bool remove_some = false; 9116 9117 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 9118 /* Mustn't remove devices when resync thread is running */ 9119 return 0; 9120 9121 rdev_for_each(rdev, mddev) { 9122 if ((this == NULL || rdev == this) && 9123 rdev->raid_disk >= 0 && 9124 !test_bit(Blocked, &rdev->flags) && 9125 test_bit(Faulty, &rdev->flags) && 9126 atomic_read(&rdev->nr_pending)==0) { 9127 /* Faulty non-Blocked devices with nr_pending == 0 9128 * never get nr_pending incremented, 9129 * never get Faulty cleared, and never get Blocked set. 9130 * So we can synchronize_rcu now rather than once per device 9131 */ 9132 remove_some = true; 9133 set_bit(RemoveSynchronized, &rdev->flags); 9134 } 9135 } 9136 9137 if (remove_some) 9138 synchronize_rcu(); 9139 rdev_for_each(rdev, mddev) { 9140 if ((this == NULL || rdev == this) && 9141 rdev->raid_disk >= 0 && 9142 !test_bit(Blocked, &rdev->flags) && 9143 ((test_bit(RemoveSynchronized, &rdev->flags) || 9144 (!test_bit(In_sync, &rdev->flags) && 9145 !test_bit(Journal, &rdev->flags))) && 9146 atomic_read(&rdev->nr_pending)==0)) { 9147 if (mddev->pers->hot_remove_disk( 9148 mddev, rdev) == 0) { 9149 sysfs_unlink_rdev(mddev, rdev); 9150 rdev->saved_raid_disk = rdev->raid_disk; 9151 rdev->raid_disk = -1; 9152 removed++; 9153 } 9154 } 9155 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags)) 9156 clear_bit(RemoveSynchronized, &rdev->flags); 9157 } 9158 9159 if (removed && mddev->kobj.sd) 9160 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 9161 9162 if (this && removed) 9163 goto no_add; 9164 9165 rdev_for_each(rdev, mddev) { 9166 if (this && this != rdev) 9167 continue; 9168 if (test_bit(Candidate, &rdev->flags)) 9169 continue; 9170 if (rdev->raid_disk >= 0 && 9171 !test_bit(In_sync, &rdev->flags) && 9172 !test_bit(Journal, &rdev->flags) && 9173 !test_bit(Faulty, &rdev->flags)) 9174 spares++; 9175 if (rdev->raid_disk >= 0) 9176 continue; 9177 if (test_bit(Faulty, &rdev->flags)) 9178 continue; 9179 if (!test_bit(Journal, &rdev->flags)) { 9180 if (!md_is_rdwr(mddev) && 9181 !(rdev->saved_raid_disk >= 0 && 9182 !test_bit(Bitmap_sync, &rdev->flags))) 9183 continue; 9184 9185 rdev->recovery_offset = 0; 9186 } 9187 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) { 9188 /* failure here is OK */ 9189 sysfs_link_rdev(mddev, rdev); 9190 if (!test_bit(Journal, &rdev->flags)) 9191 spares++; 9192 md_new_event(); 9193 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 9194 } 9195 } 9196 no_add: 9197 if (removed) 9198 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 9199 return spares; 9200 } 9201 9202 static void md_start_sync(struct work_struct *ws) 9203 { 9204 struct mddev *mddev = container_of(ws, struct mddev, del_work); 9205 9206 mddev->sync_thread = md_register_thread(md_do_sync, 9207 mddev, 9208 "resync"); 9209 if (!mddev->sync_thread) { 9210 pr_warn("%s: could not start resync thread...\n", 9211 mdname(mddev)); 9212 /* leave the spares where they are, it shouldn't hurt */ 9213 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9214 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9215 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9216 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9217 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9218 wake_up(&resync_wait); 9219 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 9220 &mddev->recovery)) 9221 if (mddev->sysfs_action) 9222 sysfs_notify_dirent_safe(mddev->sysfs_action); 9223 } else 9224 md_wakeup_thread(mddev->sync_thread); 9225 sysfs_notify_dirent_safe(mddev->sysfs_action); 9226 md_new_event(); 9227 } 9228 9229 /* 9230 * This routine is regularly called by all per-raid-array threads to 9231 * deal with generic issues like resync and super-block update. 9232 * Raid personalities that don't have a thread (linear/raid0) do not 9233 * need this as they never do any recovery or update the superblock. 9234 * 9235 * It does not do any resync itself, but rather "forks" off other threads 9236 * to do that as needed. 9237 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 9238 * "->recovery" and create a thread at ->sync_thread. 9239 * When the thread finishes it sets MD_RECOVERY_DONE 9240 * and wakeups up this thread which will reap the thread and finish up. 9241 * This thread also removes any faulty devices (with nr_pending == 0). 9242 * 9243 * The overall approach is: 9244 * 1/ if the superblock needs updating, update it. 9245 * 2/ If a recovery thread is running, don't do anything else. 9246 * 3/ If recovery has finished, clean up, possibly marking spares active. 9247 * 4/ If there are any faulty devices, remove them. 9248 * 5/ If array is degraded, try to add spares devices 9249 * 6/ If array has spares or is not in-sync, start a resync thread. 9250 */ 9251 void md_check_recovery(struct mddev *mddev) 9252 { 9253 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { 9254 /* Write superblock - thread that called mddev_suspend() 9255 * holds reconfig_mutex for us. 9256 */ 9257 set_bit(MD_UPDATING_SB, &mddev->flags); 9258 smp_mb__after_atomic(); 9259 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) 9260 md_update_sb(mddev, 0); 9261 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); 9262 wake_up(&mddev->sb_wait); 9263 } 9264 9265 if (mddev->suspended) 9266 return; 9267 9268 if (mddev->bitmap) 9269 md_bitmap_daemon_work(mddev); 9270 9271 if (signal_pending(current)) { 9272 if (mddev->pers->sync_request && !mddev->external) { 9273 pr_debug("md: %s in immediate safe mode\n", 9274 mdname(mddev)); 9275 mddev->safemode = 2; 9276 } 9277 flush_signals(current); 9278 } 9279 9280 if (!md_is_rdwr(mddev) && 9281 !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 9282 return; 9283 if ( ! ( 9284 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) || 9285 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 9286 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 9287 (mddev->external == 0 && mddev->safemode == 1) || 9288 (mddev->safemode == 2 9289 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 9290 )) 9291 return; 9292 9293 if (mddev_trylock(mddev)) { 9294 int spares = 0; 9295 bool try_set_sync = mddev->safemode != 0; 9296 9297 if (!mddev->external && mddev->safemode == 1) 9298 mddev->safemode = 0; 9299 9300 if (!md_is_rdwr(mddev)) { 9301 struct md_rdev *rdev; 9302 if (!mddev->external && mddev->in_sync) 9303 /* 'Blocked' flag not needed as failed devices 9304 * will be recorded if array switched to read/write. 9305 * Leaving it set will prevent the device 9306 * from being removed. 9307 */ 9308 rdev_for_each(rdev, mddev) 9309 clear_bit(Blocked, &rdev->flags); 9310 /* On a read-only array we can: 9311 * - remove failed devices 9312 * - add already-in_sync devices if the array itself 9313 * is in-sync. 9314 * As we only add devices that are already in-sync, 9315 * we can activate the spares immediately. 9316 */ 9317 remove_and_add_spares(mddev, NULL); 9318 /* There is no thread, but we need to call 9319 * ->spare_active and clear saved_raid_disk 9320 */ 9321 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 9322 md_unregister_thread(&mddev->sync_thread); 9323 md_reap_sync_thread(mddev); 9324 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9325 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9326 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 9327 goto unlock; 9328 } 9329 9330 if (mddev_is_clustered(mddev)) { 9331 struct md_rdev *rdev, *tmp; 9332 /* kick the device if another node issued a 9333 * remove disk. 9334 */ 9335 rdev_for_each_safe(rdev, tmp, mddev) { 9336 if (test_and_clear_bit(ClusterRemove, &rdev->flags) && 9337 rdev->raid_disk < 0) 9338 md_kick_rdev_from_array(rdev); 9339 } 9340 } 9341 9342 if (try_set_sync && !mddev->external && !mddev->in_sync) { 9343 spin_lock(&mddev->lock); 9344 set_in_sync(mddev); 9345 spin_unlock(&mddev->lock); 9346 } 9347 9348 if (mddev->sb_flags) 9349 md_update_sb(mddev, 0); 9350 9351 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 9352 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 9353 /* resync/recovery still happening */ 9354 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9355 goto unlock; 9356 } 9357 if (mddev->sync_thread) { 9358 md_unregister_thread(&mddev->sync_thread); 9359 md_reap_sync_thread(mddev); 9360 goto unlock; 9361 } 9362 /* Set RUNNING before clearing NEEDED to avoid 9363 * any transients in the value of "sync_action". 9364 */ 9365 mddev->curr_resync_completed = 0; 9366 spin_lock(&mddev->lock); 9367 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9368 spin_unlock(&mddev->lock); 9369 /* Clear some bits that don't mean anything, but 9370 * might be left set 9371 */ 9372 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 9373 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 9374 9375 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 9376 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 9377 goto not_running; 9378 /* no recovery is running. 9379 * remove any failed drives, then 9380 * add spares if possible. 9381 * Spares are also removed and re-added, to allow 9382 * the personality to fail the re-add. 9383 */ 9384 9385 if (mddev->reshape_position != MaxSector) { 9386 if (mddev->pers->check_reshape == NULL || 9387 mddev->pers->check_reshape(mddev) != 0) 9388 /* Cannot proceed */ 9389 goto not_running; 9390 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9391 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9392 } else if ((spares = remove_and_add_spares(mddev, NULL))) { 9393 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9394 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9395 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9396 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9397 } else if (mddev->recovery_cp < MaxSector) { 9398 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9399 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9400 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 9401 /* nothing to be done ... */ 9402 goto not_running; 9403 9404 if (mddev->pers->sync_request) { 9405 if (spares) { 9406 /* We are adding a device or devices to an array 9407 * which has the bitmap stored on all devices. 9408 * So make sure all bitmap pages get written 9409 */ 9410 md_bitmap_write_all(mddev->bitmap); 9411 } 9412 INIT_WORK(&mddev->del_work, md_start_sync); 9413 queue_work(md_misc_wq, &mddev->del_work); 9414 goto unlock; 9415 } 9416 not_running: 9417 if (!mddev->sync_thread) { 9418 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9419 wake_up(&resync_wait); 9420 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 9421 &mddev->recovery)) 9422 if (mddev->sysfs_action) 9423 sysfs_notify_dirent_safe(mddev->sysfs_action); 9424 } 9425 unlock: 9426 wake_up(&mddev->sb_wait); 9427 mddev_unlock(mddev); 9428 } 9429 } 9430 EXPORT_SYMBOL(md_check_recovery); 9431 9432 void md_reap_sync_thread(struct mddev *mddev) 9433 { 9434 struct md_rdev *rdev; 9435 sector_t old_dev_sectors = mddev->dev_sectors; 9436 bool is_reshaped = false; 9437 9438 /* sync_thread should be unregistered, collect result */ 9439 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 9440 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 9441 mddev->degraded != mddev->raid_disks) { 9442 /* success...*/ 9443 /* activate any spares */ 9444 if (mddev->pers->spare_active(mddev)) { 9445 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 9446 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 9447 } 9448 } 9449 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9450 mddev->pers->finish_reshape) { 9451 mddev->pers->finish_reshape(mddev); 9452 if (mddev_is_clustered(mddev)) 9453 is_reshaped = true; 9454 } 9455 9456 /* If array is no-longer degraded, then any saved_raid_disk 9457 * information must be scrapped. 9458 */ 9459 if (!mddev->degraded) 9460 rdev_for_each(rdev, mddev) 9461 rdev->saved_raid_disk = -1; 9462 9463 md_update_sb(mddev, 1); 9464 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can 9465 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by 9466 * clustered raid */ 9467 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) 9468 md_cluster_ops->resync_finish(mddev); 9469 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9470 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 9471 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9472 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9473 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9474 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9475 /* 9476 * We call md_cluster_ops->update_size here because sync_size could 9477 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared, 9478 * so it is time to update size across cluster. 9479 */ 9480 if (mddev_is_clustered(mddev) && is_reshaped 9481 && !test_bit(MD_CLOSING, &mddev->flags)) 9482 md_cluster_ops->update_size(mddev, old_dev_sectors); 9483 wake_up(&resync_wait); 9484 /* flag recovery needed just to double check */ 9485 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9486 sysfs_notify_dirent_safe(mddev->sysfs_completed); 9487 sysfs_notify_dirent_safe(mddev->sysfs_action); 9488 md_new_event(); 9489 if (mddev->event_work.func) 9490 queue_work(md_misc_wq, &mddev->event_work); 9491 } 9492 EXPORT_SYMBOL(md_reap_sync_thread); 9493 9494 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) 9495 { 9496 sysfs_notify_dirent_safe(rdev->sysfs_state); 9497 wait_event_timeout(rdev->blocked_wait, 9498 !test_bit(Blocked, &rdev->flags) && 9499 !test_bit(BlockedBadBlocks, &rdev->flags), 9500 msecs_to_jiffies(5000)); 9501 rdev_dec_pending(rdev, mddev); 9502 } 9503 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 9504 9505 void md_finish_reshape(struct mddev *mddev) 9506 { 9507 /* called be personality module when reshape completes. */ 9508 struct md_rdev *rdev; 9509 9510 rdev_for_each(rdev, mddev) { 9511 if (rdev->data_offset > rdev->new_data_offset) 9512 rdev->sectors += rdev->data_offset - rdev->new_data_offset; 9513 else 9514 rdev->sectors -= rdev->new_data_offset - rdev->data_offset; 9515 rdev->data_offset = rdev->new_data_offset; 9516 } 9517 } 9518 EXPORT_SYMBOL(md_finish_reshape); 9519 9520 /* Bad block management */ 9521 9522 /* Returns 1 on success, 0 on failure */ 9523 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 9524 int is_new) 9525 { 9526 struct mddev *mddev = rdev->mddev; 9527 int rv; 9528 if (is_new) 9529 s += rdev->new_data_offset; 9530 else 9531 s += rdev->data_offset; 9532 rv = badblocks_set(&rdev->badblocks, s, sectors, 0); 9533 if (rv == 0) { 9534 /* Make sure they get written out promptly */ 9535 if (test_bit(ExternalBbl, &rdev->flags)) 9536 sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks); 9537 sysfs_notify_dirent_safe(rdev->sysfs_state); 9538 set_mask_bits(&mddev->sb_flags, 0, 9539 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING)); 9540 md_wakeup_thread(rdev->mddev->thread); 9541 return 1; 9542 } else 9543 return 0; 9544 } 9545 EXPORT_SYMBOL_GPL(rdev_set_badblocks); 9546 9547 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 9548 int is_new) 9549 { 9550 int rv; 9551 if (is_new) 9552 s += rdev->new_data_offset; 9553 else 9554 s += rdev->data_offset; 9555 rv = badblocks_clear(&rdev->badblocks, s, sectors); 9556 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags)) 9557 sysfs_notify_dirent_safe(rdev->sysfs_badblocks); 9558 return rv; 9559 } 9560 EXPORT_SYMBOL_GPL(rdev_clear_badblocks); 9561 9562 static int md_notify_reboot(struct notifier_block *this, 9563 unsigned long code, void *x) 9564 { 9565 struct mddev *mddev, *n; 9566 int need_delay = 0; 9567 9568 spin_lock(&all_mddevs_lock); 9569 list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { 9570 if (!mddev_get(mddev)) 9571 continue; 9572 spin_unlock(&all_mddevs_lock); 9573 if (mddev_trylock(mddev)) { 9574 if (mddev->pers) 9575 __md_stop_writes(mddev); 9576 if (mddev->persistent) 9577 mddev->safemode = 2; 9578 mddev_unlock(mddev); 9579 } 9580 need_delay = 1; 9581 mddev_put(mddev); 9582 spin_lock(&all_mddevs_lock); 9583 } 9584 spin_unlock(&all_mddevs_lock); 9585 9586 /* 9587 * certain more exotic SCSI devices are known to be 9588 * volatile wrt too early system reboots. While the 9589 * right place to handle this issue is the given 9590 * driver, we do want to have a safe RAID driver ... 9591 */ 9592 if (need_delay) 9593 msleep(1000); 9594 9595 return NOTIFY_DONE; 9596 } 9597 9598 static struct notifier_block md_notifier = { 9599 .notifier_call = md_notify_reboot, 9600 .next = NULL, 9601 .priority = INT_MAX, /* before any real devices */ 9602 }; 9603 9604 static void md_geninit(void) 9605 { 9606 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 9607 9608 proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops); 9609 } 9610 9611 static int __init md_init(void) 9612 { 9613 int ret = -ENOMEM; 9614 9615 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); 9616 if (!md_wq) 9617 goto err_wq; 9618 9619 md_misc_wq = alloc_workqueue("md_misc", 0, 0); 9620 if (!md_misc_wq) 9621 goto err_misc_wq; 9622 9623 md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0); 9624 if (!md_rdev_misc_wq) 9625 goto err_rdev_misc_wq; 9626 9627 ret = __register_blkdev(MD_MAJOR, "md", md_probe); 9628 if (ret < 0) 9629 goto err_md; 9630 9631 ret = __register_blkdev(0, "mdp", md_probe); 9632 if (ret < 0) 9633 goto err_mdp; 9634 mdp_major = ret; 9635 9636 register_reboot_notifier(&md_notifier); 9637 raid_table_header = register_sysctl_table(raid_root_table); 9638 9639 md_geninit(); 9640 return 0; 9641 9642 err_mdp: 9643 unregister_blkdev(MD_MAJOR, "md"); 9644 err_md: 9645 destroy_workqueue(md_rdev_misc_wq); 9646 err_rdev_misc_wq: 9647 destroy_workqueue(md_misc_wq); 9648 err_misc_wq: 9649 destroy_workqueue(md_wq); 9650 err_wq: 9651 return ret; 9652 } 9653 9654 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) 9655 { 9656 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 9657 struct md_rdev *rdev2, *tmp; 9658 int role, ret; 9659 9660 /* 9661 * If size is changed in another node then we need to 9662 * do resize as well. 9663 */ 9664 if (mddev->dev_sectors != le64_to_cpu(sb->size)) { 9665 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size)); 9666 if (ret) 9667 pr_info("md-cluster: resize failed\n"); 9668 else 9669 md_bitmap_update_sb(mddev->bitmap); 9670 } 9671 9672 /* Check for change of roles in the active devices */ 9673 rdev_for_each_safe(rdev2, tmp, mddev) { 9674 if (test_bit(Faulty, &rdev2->flags)) 9675 continue; 9676 9677 /* Check if the roles changed */ 9678 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]); 9679 9680 if (test_bit(Candidate, &rdev2->flags)) { 9681 if (role == MD_DISK_ROLE_FAULTY) { 9682 pr_info("md: Removing Candidate device %pg because add failed\n", 9683 rdev2->bdev); 9684 md_kick_rdev_from_array(rdev2); 9685 continue; 9686 } 9687 else 9688 clear_bit(Candidate, &rdev2->flags); 9689 } 9690 9691 if (role != rdev2->raid_disk) { 9692 /* 9693 * got activated except reshape is happening. 9694 */ 9695 if (rdev2->raid_disk == -1 && role != MD_DISK_ROLE_SPARE && 9696 !(le32_to_cpu(sb->feature_map) & 9697 MD_FEATURE_RESHAPE_ACTIVE)) { 9698 rdev2->saved_raid_disk = role; 9699 ret = remove_and_add_spares(mddev, rdev2); 9700 pr_info("Activated spare: %pg\n", 9701 rdev2->bdev); 9702 /* wakeup mddev->thread here, so array could 9703 * perform resync with the new activated disk */ 9704 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9705 md_wakeup_thread(mddev->thread); 9706 } 9707 /* device faulty 9708 * We just want to do the minimum to mark the disk 9709 * as faulty. The recovery is performed by the 9710 * one who initiated the error. 9711 */ 9712 if (role == MD_DISK_ROLE_FAULTY || 9713 role == MD_DISK_ROLE_JOURNAL) { 9714 md_error(mddev, rdev2); 9715 clear_bit(Blocked, &rdev2->flags); 9716 } 9717 } 9718 } 9719 9720 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) { 9721 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); 9722 if (ret) 9723 pr_warn("md: updating array disks failed. %d\n", ret); 9724 } 9725 9726 /* 9727 * Since mddev->delta_disks has already updated in update_raid_disks, 9728 * so it is time to check reshape. 9729 */ 9730 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && 9731 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 9732 /* 9733 * reshape is happening in the remote node, we need to 9734 * update reshape_position and call start_reshape. 9735 */ 9736 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 9737 if (mddev->pers->update_reshape_pos) 9738 mddev->pers->update_reshape_pos(mddev); 9739 if (mddev->pers->start_reshape) 9740 mddev->pers->start_reshape(mddev); 9741 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && 9742 mddev->reshape_position != MaxSector && 9743 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 9744 /* reshape is just done in another node. */ 9745 mddev->reshape_position = MaxSector; 9746 if (mddev->pers->update_reshape_pos) 9747 mddev->pers->update_reshape_pos(mddev); 9748 } 9749 9750 /* Finally set the event to be up to date */ 9751 mddev->events = le64_to_cpu(sb->events); 9752 } 9753 9754 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) 9755 { 9756 int err; 9757 struct page *swapout = rdev->sb_page; 9758 struct mdp_superblock_1 *sb; 9759 9760 /* Store the sb page of the rdev in the swapout temporary 9761 * variable in case we err in the future 9762 */ 9763 rdev->sb_page = NULL; 9764 err = alloc_disk_sb(rdev); 9765 if (err == 0) { 9766 ClearPageUptodate(rdev->sb_page); 9767 rdev->sb_loaded = 0; 9768 err = super_types[mddev->major_version]. 9769 load_super(rdev, NULL, mddev->minor_version); 9770 } 9771 if (err < 0) { 9772 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n", 9773 __func__, __LINE__, rdev->desc_nr, err); 9774 if (rdev->sb_page) 9775 put_page(rdev->sb_page); 9776 rdev->sb_page = swapout; 9777 rdev->sb_loaded = 1; 9778 return err; 9779 } 9780 9781 sb = page_address(rdev->sb_page); 9782 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET 9783 * is not set 9784 */ 9785 9786 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET)) 9787 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 9788 9789 /* The other node finished recovery, call spare_active to set 9790 * device In_sync and mddev->degraded 9791 */ 9792 if (rdev->recovery_offset == MaxSector && 9793 !test_bit(In_sync, &rdev->flags) && 9794 mddev->pers->spare_active(mddev)) 9795 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 9796 9797 put_page(swapout); 9798 return 0; 9799 } 9800 9801 void md_reload_sb(struct mddev *mddev, int nr) 9802 { 9803 struct md_rdev *rdev = NULL, *iter; 9804 int err; 9805 9806 /* Find the rdev */ 9807 rdev_for_each_rcu(iter, mddev) { 9808 if (iter->desc_nr == nr) { 9809 rdev = iter; 9810 break; 9811 } 9812 } 9813 9814 if (!rdev) { 9815 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr); 9816 return; 9817 } 9818 9819 err = read_rdev(mddev, rdev); 9820 if (err < 0) 9821 return; 9822 9823 check_sb_changes(mddev, rdev); 9824 9825 /* Read all rdev's to update recovery_offset */ 9826 rdev_for_each_rcu(rdev, mddev) { 9827 if (!test_bit(Faulty, &rdev->flags)) 9828 read_rdev(mddev, rdev); 9829 } 9830 } 9831 EXPORT_SYMBOL(md_reload_sb); 9832 9833 #ifndef MODULE 9834 9835 /* 9836 * Searches all registered partitions for autorun RAID arrays 9837 * at boot time. 9838 */ 9839 9840 static DEFINE_MUTEX(detected_devices_mutex); 9841 static LIST_HEAD(all_detected_devices); 9842 struct detected_devices_node { 9843 struct list_head list; 9844 dev_t dev; 9845 }; 9846 9847 void md_autodetect_dev(dev_t dev) 9848 { 9849 struct detected_devices_node *node_detected_dev; 9850 9851 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 9852 if (node_detected_dev) { 9853 node_detected_dev->dev = dev; 9854 mutex_lock(&detected_devices_mutex); 9855 list_add_tail(&node_detected_dev->list, &all_detected_devices); 9856 mutex_unlock(&detected_devices_mutex); 9857 } 9858 } 9859 9860 void md_autostart_arrays(int part) 9861 { 9862 struct md_rdev *rdev; 9863 struct detected_devices_node *node_detected_dev; 9864 dev_t dev; 9865 int i_scanned, i_passed; 9866 9867 i_scanned = 0; 9868 i_passed = 0; 9869 9870 pr_info("md: Autodetecting RAID arrays.\n"); 9871 9872 mutex_lock(&detected_devices_mutex); 9873 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 9874 i_scanned++; 9875 node_detected_dev = list_entry(all_detected_devices.next, 9876 struct detected_devices_node, list); 9877 list_del(&node_detected_dev->list); 9878 dev = node_detected_dev->dev; 9879 kfree(node_detected_dev); 9880 mutex_unlock(&detected_devices_mutex); 9881 rdev = md_import_device(dev,0, 90); 9882 mutex_lock(&detected_devices_mutex); 9883 if (IS_ERR(rdev)) 9884 continue; 9885 9886 if (test_bit(Faulty, &rdev->flags)) 9887 continue; 9888 9889 set_bit(AutoDetected, &rdev->flags); 9890 list_add(&rdev->same_set, &pending_raid_disks); 9891 i_passed++; 9892 } 9893 mutex_unlock(&detected_devices_mutex); 9894 9895 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed); 9896 9897 autorun_devices(part); 9898 } 9899 9900 #endif /* !MODULE */ 9901 9902 static __exit void md_exit(void) 9903 { 9904 struct mddev *mddev, *n; 9905 int delay = 1; 9906 9907 unregister_blkdev(MD_MAJOR,"md"); 9908 unregister_blkdev(mdp_major, "mdp"); 9909 unregister_reboot_notifier(&md_notifier); 9910 unregister_sysctl_table(raid_table_header); 9911 9912 /* We cannot unload the modules while some process is 9913 * waiting for us in select() or poll() - wake them up 9914 */ 9915 md_unloading = 1; 9916 while (waitqueue_active(&md_event_waiters)) { 9917 /* not safe to leave yet */ 9918 wake_up(&md_event_waiters); 9919 msleep(delay); 9920 delay += delay; 9921 } 9922 remove_proc_entry("mdstat", NULL); 9923 9924 spin_lock(&all_mddevs_lock); 9925 list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { 9926 if (!mddev_get(mddev)) 9927 continue; 9928 spin_unlock(&all_mddevs_lock); 9929 export_array(mddev); 9930 mddev->ctime = 0; 9931 mddev->hold_active = 0; 9932 /* 9933 * As the mddev is now fully clear, mddev_put will schedule 9934 * the mddev for destruction by a workqueue, and the 9935 * destroy_workqueue() below will wait for that to complete. 9936 */ 9937 mddev_put(mddev); 9938 spin_lock(&all_mddevs_lock); 9939 } 9940 spin_unlock(&all_mddevs_lock); 9941 9942 destroy_workqueue(md_rdev_misc_wq); 9943 destroy_workqueue(md_misc_wq); 9944 destroy_workqueue(md_wq); 9945 } 9946 9947 subsys_initcall(md_init); 9948 module_exit(md_exit) 9949 9950 static int get_ro(char *buffer, const struct kernel_param *kp) 9951 { 9952 return sprintf(buffer, "%d\n", start_readonly); 9953 } 9954 static int set_ro(const char *val, const struct kernel_param *kp) 9955 { 9956 return kstrtouint(val, 10, (unsigned int *)&start_readonly); 9957 } 9958 9959 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 9960 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 9961 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 9962 module_param(create_on_open, bool, S_IRUSR|S_IWUSR); 9963 9964 MODULE_LICENSE("GPL"); 9965 MODULE_DESCRIPTION("MD RAID framework"); 9966 MODULE_ALIAS("md"); 9967 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 9968