1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 md.c : Multiple Devices driver for Linux 4 Copyright (C) 1998, 1999, 2000 Ingo Molnar 5 6 completely rewritten, based on the MD driver code from Marc Zyngier 7 8 Changes: 9 10 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 11 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 12 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 13 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 14 - kmod support by: Cyrus Durgin 15 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 16 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 17 18 - lots of fixes and improvements to the RAID1/RAID5 and generic 19 RAID code (such as request based resynchronization): 20 21 Neil Brown <neilb@cse.unsw.edu.au>. 22 23 - persistent bitmap code 24 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 25 26 27 Errors, Warnings, etc. 28 Please use: 29 pr_crit() for error conditions that risk data loss 30 pr_err() for error conditions that are unexpected, like an IO error 31 or internal inconsistency 32 pr_warn() for error conditions that could have been predicated, like 33 adding a device to an array when it has incompatible metadata 34 pr_info() for every interesting, very rare events, like an array starting 35 or stopping, or resync starting or stopping 36 pr_debug() for everything else. 37 38 */ 39 40 #include <linux/sched/mm.h> 41 #include <linux/sched/signal.h> 42 #include <linux/kthread.h> 43 #include <linux/blkdev.h> 44 #include <linux/blk-integrity.h> 45 #include <linux/badblocks.h> 46 #include <linux/sysctl.h> 47 #include <linux/seq_file.h> 48 #include <linux/fs.h> 49 #include <linux/poll.h> 50 #include <linux/ctype.h> 51 #include <linux/string.h> 52 #include <linux/hdreg.h> 53 #include <linux/proc_fs.h> 54 #include <linux/random.h> 55 #include <linux/major.h> 56 #include <linux/module.h> 57 #include <linux/reboot.h> 58 #include <linux/file.h> 59 #include <linux/compat.h> 60 #include <linux/delay.h> 61 #include <linux/raid/md_p.h> 62 #include <linux/raid/md_u.h> 63 #include <linux/raid/detect.h> 64 #include <linux/slab.h> 65 #include <linux/percpu-refcount.h> 66 #include <linux/part_stat.h> 67 68 #include <trace/events/block.h> 69 #include "md.h" 70 #include "md-bitmap.h" 71 #include "md-cluster.h" 72 73 /* pers_list is a list of registered personalities protected 74 * by pers_lock. 75 * pers_lock does extra service to protect accesses to 76 * mddev->thread when the mutex cannot be held. 77 */ 78 static LIST_HEAD(pers_list); 79 static DEFINE_SPINLOCK(pers_lock); 80 81 static struct kobj_type md_ktype; 82 83 struct md_cluster_operations *md_cluster_ops; 84 EXPORT_SYMBOL(md_cluster_ops); 85 static struct module *md_cluster_mod; 86 87 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 88 static struct workqueue_struct *md_wq; 89 static struct workqueue_struct *md_misc_wq; 90 static struct workqueue_struct *md_rdev_misc_wq; 91 92 static int remove_and_add_spares(struct mddev *mddev, 93 struct md_rdev *this); 94 static void mddev_detach(struct mddev *mddev); 95 96 /* 97 * Default number of read corrections we'll attempt on an rdev 98 * before ejecting it from the array. We divide the read error 99 * count by 2 for every hour elapsed between read errors. 100 */ 101 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 102 /* Default safemode delay: 200 msec */ 103 #define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1) 104 /* 105 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 106 * is 1000 KB/sec, so the extra system load does not show up that much. 107 * Increase it if you want to have more _guaranteed_ speed. Note that 108 * the RAID driver will use the maximum available bandwidth if the IO 109 * subsystem is idle. There is also an 'absolute maximum' reconstruction 110 * speed limit - in case reconstruction slows down your system despite 111 * idle IO detection. 112 * 113 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 114 * or /sys/block/mdX/md/sync_speed_{min,max} 115 */ 116 117 static int sysctl_speed_limit_min = 1000; 118 static int sysctl_speed_limit_max = 200000; 119 static inline int speed_min(struct mddev *mddev) 120 { 121 return mddev->sync_speed_min ? 122 mddev->sync_speed_min : sysctl_speed_limit_min; 123 } 124 125 static inline int speed_max(struct mddev *mddev) 126 { 127 return mddev->sync_speed_max ? 128 mddev->sync_speed_max : sysctl_speed_limit_max; 129 } 130 131 static void rdev_uninit_serial(struct md_rdev *rdev) 132 { 133 if (!test_and_clear_bit(CollisionCheck, &rdev->flags)) 134 return; 135 136 kvfree(rdev->serial); 137 rdev->serial = NULL; 138 } 139 140 static void rdevs_uninit_serial(struct mddev *mddev) 141 { 142 struct md_rdev *rdev; 143 144 rdev_for_each(rdev, mddev) 145 rdev_uninit_serial(rdev); 146 } 147 148 static int rdev_init_serial(struct md_rdev *rdev) 149 { 150 /* serial_nums equals with BARRIER_BUCKETS_NR */ 151 int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t)))); 152 struct serial_in_rdev *serial = NULL; 153 154 if (test_bit(CollisionCheck, &rdev->flags)) 155 return 0; 156 157 serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums, 158 GFP_KERNEL); 159 if (!serial) 160 return -ENOMEM; 161 162 for (i = 0; i < serial_nums; i++) { 163 struct serial_in_rdev *serial_tmp = &serial[i]; 164 165 spin_lock_init(&serial_tmp->serial_lock); 166 serial_tmp->serial_rb = RB_ROOT_CACHED; 167 init_waitqueue_head(&serial_tmp->serial_io_wait); 168 } 169 170 rdev->serial = serial; 171 set_bit(CollisionCheck, &rdev->flags); 172 173 return 0; 174 } 175 176 static int rdevs_init_serial(struct mddev *mddev) 177 { 178 struct md_rdev *rdev; 179 int ret = 0; 180 181 rdev_for_each(rdev, mddev) { 182 ret = rdev_init_serial(rdev); 183 if (ret) 184 break; 185 } 186 187 /* Free all resources if pool is not existed */ 188 if (ret && !mddev->serial_info_pool) 189 rdevs_uninit_serial(mddev); 190 191 return ret; 192 } 193 194 /* 195 * rdev needs to enable serial stuffs if it meets the conditions: 196 * 1. it is multi-queue device flaged with writemostly. 197 * 2. the write-behind mode is enabled. 198 */ 199 static int rdev_need_serial(struct md_rdev *rdev) 200 { 201 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && 202 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 && 203 test_bit(WriteMostly, &rdev->flags)); 204 } 205 206 /* 207 * Init resource for rdev(s), then create serial_info_pool if: 208 * 1. rdev is the first device which return true from rdev_enable_serial. 209 * 2. rdev is NULL, means we want to enable serialization for all rdevs. 210 */ 211 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, 212 bool is_suspend) 213 { 214 int ret = 0; 215 216 if (rdev && !rdev_need_serial(rdev) && 217 !test_bit(CollisionCheck, &rdev->flags)) 218 return; 219 220 if (!is_suspend) 221 mddev_suspend(mddev); 222 223 if (!rdev) 224 ret = rdevs_init_serial(mddev); 225 else 226 ret = rdev_init_serial(rdev); 227 if (ret) 228 goto abort; 229 230 if (mddev->serial_info_pool == NULL) { 231 /* 232 * already in memalloc noio context by 233 * mddev_suspend() 234 */ 235 mddev->serial_info_pool = 236 mempool_create_kmalloc_pool(NR_SERIAL_INFOS, 237 sizeof(struct serial_info)); 238 if (!mddev->serial_info_pool) { 239 rdevs_uninit_serial(mddev); 240 pr_err("can't alloc memory pool for serialization\n"); 241 } 242 } 243 244 abort: 245 if (!is_suspend) 246 mddev_resume(mddev); 247 } 248 249 /* 250 * Free resource from rdev(s), and destroy serial_info_pool under conditions: 251 * 1. rdev is the last device flaged with CollisionCheck. 252 * 2. when bitmap is destroyed while policy is not enabled. 253 * 3. for disable policy, the pool is destroyed only when no rdev needs it. 254 */ 255 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, 256 bool is_suspend) 257 { 258 if (rdev && !test_bit(CollisionCheck, &rdev->flags)) 259 return; 260 261 if (mddev->serial_info_pool) { 262 struct md_rdev *temp; 263 int num = 0; /* used to track if other rdevs need the pool */ 264 265 if (!is_suspend) 266 mddev_suspend(mddev); 267 rdev_for_each(temp, mddev) { 268 if (!rdev) { 269 if (!mddev->serialize_policy || 270 !rdev_need_serial(temp)) 271 rdev_uninit_serial(temp); 272 else 273 num++; 274 } else if (temp != rdev && 275 test_bit(CollisionCheck, &temp->flags)) 276 num++; 277 } 278 279 if (rdev) 280 rdev_uninit_serial(rdev); 281 282 if (num) 283 pr_info("The mempool could be used by other devices\n"); 284 else { 285 mempool_destroy(mddev->serial_info_pool); 286 mddev->serial_info_pool = NULL; 287 } 288 if (!is_suspend) 289 mddev_resume(mddev); 290 } 291 } 292 293 static struct ctl_table_header *raid_table_header; 294 295 static struct ctl_table raid_table[] = { 296 { 297 .procname = "speed_limit_min", 298 .data = &sysctl_speed_limit_min, 299 .maxlen = sizeof(int), 300 .mode = S_IRUGO|S_IWUSR, 301 .proc_handler = proc_dointvec, 302 }, 303 { 304 .procname = "speed_limit_max", 305 .data = &sysctl_speed_limit_max, 306 .maxlen = sizeof(int), 307 .mode = S_IRUGO|S_IWUSR, 308 .proc_handler = proc_dointvec, 309 }, 310 { } 311 }; 312 313 static struct ctl_table raid_dir_table[] = { 314 { 315 .procname = "raid", 316 .maxlen = 0, 317 .mode = S_IRUGO|S_IXUGO, 318 .child = raid_table, 319 }, 320 { } 321 }; 322 323 static struct ctl_table raid_root_table[] = { 324 { 325 .procname = "dev", 326 .maxlen = 0, 327 .mode = 0555, 328 .child = raid_dir_table, 329 }, 330 { } 331 }; 332 333 static int start_readonly; 334 335 /* 336 * The original mechanism for creating an md device is to create 337 * a device node in /dev and to open it. This causes races with device-close. 338 * The preferred method is to write to the "new_array" module parameter. 339 * This can avoid races. 340 * Setting create_on_open to false disables the original mechanism 341 * so all the races disappear. 342 */ 343 static bool create_on_open = true; 344 345 /* 346 * We have a system wide 'event count' that is incremented 347 * on any 'interesting' event, and readers of /proc/mdstat 348 * can use 'poll' or 'select' to find out when the event 349 * count increases. 350 * 351 * Events are: 352 * start array, stop array, error, add device, remove device, 353 * start build, activate spare 354 */ 355 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 356 static atomic_t md_event_count; 357 void md_new_event(void) 358 { 359 atomic_inc(&md_event_count); 360 wake_up(&md_event_waiters); 361 } 362 EXPORT_SYMBOL_GPL(md_new_event); 363 364 /* 365 * Enables to iterate over all existing md arrays 366 * all_mddevs_lock protects this list. 367 */ 368 static LIST_HEAD(all_mddevs); 369 static DEFINE_SPINLOCK(all_mddevs_lock); 370 371 /* Rather than calling directly into the personality make_request function, 372 * IO requests come here first so that we can check if the device is 373 * being suspended pending a reconfiguration. 374 * We hold a refcount over the call to ->make_request. By the time that 375 * call has finished, the bio has been linked into some internal structure 376 * and so is visible to ->quiesce(), so we don't need the refcount any more. 377 */ 378 static bool is_suspended(struct mddev *mddev, struct bio *bio) 379 { 380 if (mddev->suspended) 381 return true; 382 if (bio_data_dir(bio) != WRITE) 383 return false; 384 if (mddev->suspend_lo >= mddev->suspend_hi) 385 return false; 386 if (bio->bi_iter.bi_sector >= mddev->suspend_hi) 387 return false; 388 if (bio_end_sector(bio) < mddev->suspend_lo) 389 return false; 390 return true; 391 } 392 393 void md_handle_request(struct mddev *mddev, struct bio *bio) 394 { 395 check_suspended: 396 rcu_read_lock(); 397 if (is_suspended(mddev, bio)) { 398 DEFINE_WAIT(__wait); 399 /* Bail out if REQ_NOWAIT is set for the bio */ 400 if (bio->bi_opf & REQ_NOWAIT) { 401 rcu_read_unlock(); 402 bio_wouldblock_error(bio); 403 return; 404 } 405 for (;;) { 406 prepare_to_wait(&mddev->sb_wait, &__wait, 407 TASK_UNINTERRUPTIBLE); 408 if (!is_suspended(mddev, bio)) 409 break; 410 rcu_read_unlock(); 411 schedule(); 412 rcu_read_lock(); 413 } 414 finish_wait(&mddev->sb_wait, &__wait); 415 } 416 atomic_inc(&mddev->active_io); 417 rcu_read_unlock(); 418 419 if (!mddev->pers->make_request(mddev, bio)) { 420 atomic_dec(&mddev->active_io); 421 wake_up(&mddev->sb_wait); 422 goto check_suspended; 423 } 424 425 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 426 wake_up(&mddev->sb_wait); 427 } 428 EXPORT_SYMBOL(md_handle_request); 429 430 static void md_submit_bio(struct bio *bio) 431 { 432 const int rw = bio_data_dir(bio); 433 struct mddev *mddev = bio->bi_bdev->bd_disk->private_data; 434 435 if (mddev == NULL || mddev->pers == NULL) { 436 bio_io_error(bio); 437 return; 438 } 439 440 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { 441 bio_io_error(bio); 442 return; 443 } 444 445 blk_queue_split(&bio); 446 447 if (mddev->ro == 1 && unlikely(rw == WRITE)) { 448 if (bio_sectors(bio) != 0) 449 bio->bi_status = BLK_STS_IOERR; 450 bio_endio(bio); 451 return; 452 } 453 454 /* bio could be mergeable after passing to underlayer */ 455 bio->bi_opf &= ~REQ_NOMERGE; 456 457 md_handle_request(mddev, bio); 458 } 459 460 /* mddev_suspend makes sure no new requests are submitted 461 * to the device, and that any requests that have been submitted 462 * are completely handled. 463 * Once mddev_detach() is called and completes, the module will be 464 * completely unused. 465 */ 466 void mddev_suspend(struct mddev *mddev) 467 { 468 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); 469 lockdep_assert_held(&mddev->reconfig_mutex); 470 if (mddev->suspended++) 471 return; 472 synchronize_rcu(); 473 wake_up(&mddev->sb_wait); 474 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags); 475 smp_mb__after_atomic(); 476 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 477 mddev->pers->quiesce(mddev, 1); 478 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); 479 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags)); 480 481 del_timer_sync(&mddev->safemode_timer); 482 /* restrict memory reclaim I/O during raid array is suspend */ 483 mddev->noio_flag = memalloc_noio_save(); 484 } 485 EXPORT_SYMBOL_GPL(mddev_suspend); 486 487 void mddev_resume(struct mddev *mddev) 488 { 489 /* entred the memalloc scope from mddev_suspend() */ 490 memalloc_noio_restore(mddev->noio_flag); 491 lockdep_assert_held(&mddev->reconfig_mutex); 492 if (--mddev->suspended) 493 return; 494 wake_up(&mddev->sb_wait); 495 mddev->pers->quiesce(mddev, 0); 496 497 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 498 md_wakeup_thread(mddev->thread); 499 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 500 } 501 EXPORT_SYMBOL_GPL(mddev_resume); 502 503 /* 504 * Generic flush handling for md 505 */ 506 507 static void md_end_flush(struct bio *bio) 508 { 509 struct md_rdev *rdev = bio->bi_private; 510 struct mddev *mddev = rdev->mddev; 511 512 rdev_dec_pending(rdev, mddev); 513 514 if (atomic_dec_and_test(&mddev->flush_pending)) { 515 /* The pre-request flush has finished */ 516 queue_work(md_wq, &mddev->flush_work); 517 } 518 bio_put(bio); 519 } 520 521 static void md_submit_flush_data(struct work_struct *ws); 522 523 static void submit_flushes(struct work_struct *ws) 524 { 525 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 526 struct md_rdev *rdev; 527 528 mddev->start_flush = ktime_get_boottime(); 529 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 530 atomic_set(&mddev->flush_pending, 1); 531 rcu_read_lock(); 532 rdev_for_each_rcu(rdev, mddev) 533 if (rdev->raid_disk >= 0 && 534 !test_bit(Faulty, &rdev->flags)) { 535 /* Take two references, one is dropped 536 * when request finishes, one after 537 * we reclaim rcu_read_lock 538 */ 539 struct bio *bi; 540 atomic_inc(&rdev->nr_pending); 541 atomic_inc(&rdev->nr_pending); 542 rcu_read_unlock(); 543 bi = bio_alloc_bioset(rdev->bdev, 0, 544 REQ_OP_WRITE | REQ_PREFLUSH, 545 GFP_NOIO, &mddev->bio_set); 546 bi->bi_end_io = md_end_flush; 547 bi->bi_private = rdev; 548 atomic_inc(&mddev->flush_pending); 549 submit_bio(bi); 550 rcu_read_lock(); 551 rdev_dec_pending(rdev, mddev); 552 } 553 rcu_read_unlock(); 554 if (atomic_dec_and_test(&mddev->flush_pending)) 555 queue_work(md_wq, &mddev->flush_work); 556 } 557 558 static void md_submit_flush_data(struct work_struct *ws) 559 { 560 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 561 struct bio *bio = mddev->flush_bio; 562 563 /* 564 * must reset flush_bio before calling into md_handle_request to avoid a 565 * deadlock, because other bios passed md_handle_request suspend check 566 * could wait for this and below md_handle_request could wait for those 567 * bios because of suspend check 568 */ 569 spin_lock_irq(&mddev->lock); 570 mddev->prev_flush_start = mddev->start_flush; 571 mddev->flush_bio = NULL; 572 spin_unlock_irq(&mddev->lock); 573 wake_up(&mddev->sb_wait); 574 575 if (bio->bi_iter.bi_size == 0) { 576 /* an empty barrier - all done */ 577 bio_endio(bio); 578 } else { 579 bio->bi_opf &= ~REQ_PREFLUSH; 580 md_handle_request(mddev, bio); 581 } 582 } 583 584 /* 585 * Manages consolidation of flushes and submitting any flushes needed for 586 * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is 587 * being finished in another context. Returns false if the flushing is 588 * complete but still needs the I/O portion of the bio to be processed. 589 */ 590 bool md_flush_request(struct mddev *mddev, struct bio *bio) 591 { 592 ktime_t req_start = ktime_get_boottime(); 593 spin_lock_irq(&mddev->lock); 594 /* flush requests wait until ongoing flush completes, 595 * hence coalescing all the pending requests. 596 */ 597 wait_event_lock_irq(mddev->sb_wait, 598 !mddev->flush_bio || 599 ktime_before(req_start, mddev->prev_flush_start), 600 mddev->lock); 601 /* new request after previous flush is completed */ 602 if (ktime_after(req_start, mddev->prev_flush_start)) { 603 WARN_ON(mddev->flush_bio); 604 mddev->flush_bio = bio; 605 bio = NULL; 606 } 607 spin_unlock_irq(&mddev->lock); 608 609 if (!bio) { 610 INIT_WORK(&mddev->flush_work, submit_flushes); 611 queue_work(md_wq, &mddev->flush_work); 612 } else { 613 /* flush was performed for some other bio while we waited. */ 614 if (bio->bi_iter.bi_size == 0) 615 /* an empty barrier - all done */ 616 bio_endio(bio); 617 else { 618 bio->bi_opf &= ~REQ_PREFLUSH; 619 return false; 620 } 621 } 622 return true; 623 } 624 EXPORT_SYMBOL(md_flush_request); 625 626 static inline struct mddev *mddev_get(struct mddev *mddev) 627 { 628 lockdep_assert_held(&all_mddevs_lock); 629 630 if (test_bit(MD_DELETED, &mddev->flags)) 631 return NULL; 632 atomic_inc(&mddev->active); 633 return mddev; 634 } 635 636 static void mddev_delayed_delete(struct work_struct *ws); 637 638 static void mddev_put(struct mddev *mddev) 639 { 640 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 641 return; 642 if (!mddev->raid_disks && list_empty(&mddev->disks) && 643 mddev->ctime == 0 && !mddev->hold_active) { 644 /* Array is not configured at all, and not held active, 645 * so destroy it */ 646 set_bit(MD_DELETED, &mddev->flags); 647 648 /* 649 * Call queue_work inside the spinlock so that 650 * flush_workqueue() after mddev_find will succeed in waiting 651 * for the work to be done. 652 */ 653 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 654 queue_work(md_misc_wq, &mddev->del_work); 655 } 656 spin_unlock(&all_mddevs_lock); 657 } 658 659 static void md_safemode_timeout(struct timer_list *t); 660 661 void mddev_init(struct mddev *mddev) 662 { 663 mutex_init(&mddev->open_mutex); 664 mutex_init(&mddev->reconfig_mutex); 665 mutex_init(&mddev->bitmap_info.mutex); 666 INIT_LIST_HEAD(&mddev->disks); 667 INIT_LIST_HEAD(&mddev->all_mddevs); 668 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); 669 atomic_set(&mddev->active, 1); 670 atomic_set(&mddev->openers, 0); 671 atomic_set(&mddev->active_io, 0); 672 spin_lock_init(&mddev->lock); 673 atomic_set(&mddev->flush_pending, 0); 674 init_waitqueue_head(&mddev->sb_wait); 675 init_waitqueue_head(&mddev->recovery_wait); 676 mddev->reshape_position = MaxSector; 677 mddev->reshape_backwards = 0; 678 mddev->last_sync_action = "none"; 679 mddev->resync_min = 0; 680 mddev->resync_max = MaxSector; 681 mddev->level = LEVEL_NONE; 682 } 683 EXPORT_SYMBOL_GPL(mddev_init); 684 685 static struct mddev *mddev_find_locked(dev_t unit) 686 { 687 struct mddev *mddev; 688 689 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 690 if (mddev->unit == unit) 691 return mddev; 692 693 return NULL; 694 } 695 696 /* find an unused unit number */ 697 static dev_t mddev_alloc_unit(void) 698 { 699 static int next_minor = 512; 700 int start = next_minor; 701 bool is_free = 0; 702 dev_t dev = 0; 703 704 while (!is_free) { 705 dev = MKDEV(MD_MAJOR, next_minor); 706 next_minor++; 707 if (next_minor > MINORMASK) 708 next_minor = 0; 709 if (next_minor == start) 710 return 0; /* Oh dear, all in use. */ 711 is_free = !mddev_find_locked(dev); 712 } 713 714 return dev; 715 } 716 717 #ifndef MODULE 718 static struct mddev *mddev_find(dev_t unit) 719 { 720 struct mddev *mddev; 721 722 if (MAJOR(unit) != MD_MAJOR) 723 unit &= ~((1 << MdpMinorShift) - 1); 724 725 spin_lock(&all_mddevs_lock); 726 mddev = mddev_find_locked(unit); 727 if (mddev && !mddev_get(mddev)) 728 mddev = NULL; 729 spin_unlock(&all_mddevs_lock); 730 731 return mddev; 732 } 733 #endif 734 735 static struct mddev *mddev_alloc(dev_t unit) 736 { 737 struct mddev *new; 738 int error; 739 740 if (unit && MAJOR(unit) != MD_MAJOR) 741 unit &= ~((1 << MdpMinorShift) - 1); 742 743 new = kzalloc(sizeof(*new), GFP_KERNEL); 744 if (!new) 745 return ERR_PTR(-ENOMEM); 746 mddev_init(new); 747 748 spin_lock(&all_mddevs_lock); 749 if (unit) { 750 error = -EEXIST; 751 if (mddev_find_locked(unit)) 752 goto out_free_new; 753 new->unit = unit; 754 if (MAJOR(unit) == MD_MAJOR) 755 new->md_minor = MINOR(unit); 756 else 757 new->md_minor = MINOR(unit) >> MdpMinorShift; 758 new->hold_active = UNTIL_IOCTL; 759 } else { 760 error = -ENODEV; 761 new->unit = mddev_alloc_unit(); 762 if (!new->unit) 763 goto out_free_new; 764 new->md_minor = MINOR(new->unit); 765 new->hold_active = UNTIL_STOP; 766 } 767 768 list_add(&new->all_mddevs, &all_mddevs); 769 spin_unlock(&all_mddevs_lock); 770 return new; 771 out_free_new: 772 spin_unlock(&all_mddevs_lock); 773 kfree(new); 774 return ERR_PTR(error); 775 } 776 777 static void mddev_free(struct mddev *mddev) 778 { 779 spin_lock(&all_mddevs_lock); 780 list_del(&mddev->all_mddevs); 781 spin_unlock(&all_mddevs_lock); 782 783 kfree(mddev); 784 } 785 786 static const struct attribute_group md_redundancy_group; 787 788 void mddev_unlock(struct mddev *mddev) 789 { 790 if (mddev->to_remove) { 791 /* These cannot be removed under reconfig_mutex as 792 * an access to the files will try to take reconfig_mutex 793 * while holding the file unremovable, which leads to 794 * a deadlock. 795 * So hold set sysfs_active while the remove in happeing, 796 * and anything else which might set ->to_remove or my 797 * otherwise change the sysfs namespace will fail with 798 * -EBUSY if sysfs_active is still set. 799 * We set sysfs_active under reconfig_mutex and elsewhere 800 * test it under the same mutex to ensure its correct value 801 * is seen. 802 */ 803 const struct attribute_group *to_remove = mddev->to_remove; 804 mddev->to_remove = NULL; 805 mddev->sysfs_active = 1; 806 mutex_unlock(&mddev->reconfig_mutex); 807 808 if (mddev->kobj.sd) { 809 if (to_remove != &md_redundancy_group) 810 sysfs_remove_group(&mddev->kobj, to_remove); 811 if (mddev->pers == NULL || 812 mddev->pers->sync_request == NULL) { 813 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 814 if (mddev->sysfs_action) 815 sysfs_put(mddev->sysfs_action); 816 if (mddev->sysfs_completed) 817 sysfs_put(mddev->sysfs_completed); 818 if (mddev->sysfs_degraded) 819 sysfs_put(mddev->sysfs_degraded); 820 mddev->sysfs_action = NULL; 821 mddev->sysfs_completed = NULL; 822 mddev->sysfs_degraded = NULL; 823 } 824 } 825 mddev->sysfs_active = 0; 826 } else 827 mutex_unlock(&mddev->reconfig_mutex); 828 829 /* As we've dropped the mutex we need a spinlock to 830 * make sure the thread doesn't disappear 831 */ 832 spin_lock(&pers_lock); 833 md_wakeup_thread(mddev->thread); 834 wake_up(&mddev->sb_wait); 835 spin_unlock(&pers_lock); 836 } 837 EXPORT_SYMBOL_GPL(mddev_unlock); 838 839 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) 840 { 841 struct md_rdev *rdev; 842 843 rdev_for_each_rcu(rdev, mddev) 844 if (rdev->desc_nr == nr) 845 return rdev; 846 847 return NULL; 848 } 849 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu); 850 851 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) 852 { 853 struct md_rdev *rdev; 854 855 rdev_for_each(rdev, mddev) 856 if (rdev->bdev->bd_dev == dev) 857 return rdev; 858 859 return NULL; 860 } 861 862 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev) 863 { 864 struct md_rdev *rdev; 865 866 rdev_for_each_rcu(rdev, mddev) 867 if (rdev->bdev->bd_dev == dev) 868 return rdev; 869 870 return NULL; 871 } 872 EXPORT_SYMBOL_GPL(md_find_rdev_rcu); 873 874 static struct md_personality *find_pers(int level, char *clevel) 875 { 876 struct md_personality *pers; 877 list_for_each_entry(pers, &pers_list, list) { 878 if (level != LEVEL_NONE && pers->level == level) 879 return pers; 880 if (strcmp(pers->name, clevel)==0) 881 return pers; 882 } 883 return NULL; 884 } 885 886 /* return the offset of the super block in 512byte sectors */ 887 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) 888 { 889 return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev)); 890 } 891 892 static int alloc_disk_sb(struct md_rdev *rdev) 893 { 894 rdev->sb_page = alloc_page(GFP_KERNEL); 895 if (!rdev->sb_page) 896 return -ENOMEM; 897 return 0; 898 } 899 900 void md_rdev_clear(struct md_rdev *rdev) 901 { 902 if (rdev->sb_page) { 903 put_page(rdev->sb_page); 904 rdev->sb_loaded = 0; 905 rdev->sb_page = NULL; 906 rdev->sb_start = 0; 907 rdev->sectors = 0; 908 } 909 if (rdev->bb_page) { 910 put_page(rdev->bb_page); 911 rdev->bb_page = NULL; 912 } 913 badblocks_exit(&rdev->badblocks); 914 } 915 EXPORT_SYMBOL_GPL(md_rdev_clear); 916 917 static void super_written(struct bio *bio) 918 { 919 struct md_rdev *rdev = bio->bi_private; 920 struct mddev *mddev = rdev->mddev; 921 922 if (bio->bi_status) { 923 pr_err("md: %s gets error=%d\n", __func__, 924 blk_status_to_errno(bio->bi_status)); 925 md_error(mddev, rdev); 926 if (!test_bit(Faulty, &rdev->flags) 927 && (bio->bi_opf & MD_FAILFAST)) { 928 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); 929 set_bit(LastDev, &rdev->flags); 930 } 931 } else 932 clear_bit(LastDev, &rdev->flags); 933 934 if (atomic_dec_and_test(&mddev->pending_writes)) 935 wake_up(&mddev->sb_wait); 936 rdev_dec_pending(rdev, mddev); 937 bio_put(bio); 938 } 939 940 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, 941 sector_t sector, int size, struct page *page) 942 { 943 /* write first size bytes of page to sector of rdev 944 * Increment mddev->pending_writes before returning 945 * and decrement it on completion, waking up sb_wait 946 * if zero is reached. 947 * If an error occurred, call md_error 948 */ 949 struct bio *bio; 950 951 if (!page) 952 return; 953 954 if (test_bit(Faulty, &rdev->flags)) 955 return; 956 957 bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev, 958 1, 959 REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA, 960 GFP_NOIO, &mddev->sync_set); 961 962 atomic_inc(&rdev->nr_pending); 963 964 bio->bi_iter.bi_sector = sector; 965 bio_add_page(bio, page, size, 0); 966 bio->bi_private = rdev; 967 bio->bi_end_io = super_written; 968 969 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && 970 test_bit(FailFast, &rdev->flags) && 971 !test_bit(LastDev, &rdev->flags)) 972 bio->bi_opf |= MD_FAILFAST; 973 974 atomic_inc(&mddev->pending_writes); 975 submit_bio(bio); 976 } 977 978 int md_super_wait(struct mddev *mddev) 979 { 980 /* wait for all superblock writes that were scheduled to complete */ 981 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); 982 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) 983 return -EAGAIN; 984 return 0; 985 } 986 987 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 988 struct page *page, blk_opf_t opf, bool metadata_op) 989 { 990 struct bio bio; 991 struct bio_vec bvec; 992 993 if (metadata_op && rdev->meta_bdev) 994 bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf); 995 else 996 bio_init(&bio, rdev->bdev, &bvec, 1, opf); 997 998 if (metadata_op) 999 bio.bi_iter.bi_sector = sector + rdev->sb_start; 1000 else if (rdev->mddev->reshape_position != MaxSector && 1001 (rdev->mddev->reshape_backwards == 1002 (sector >= rdev->mddev->reshape_position))) 1003 bio.bi_iter.bi_sector = sector + rdev->new_data_offset; 1004 else 1005 bio.bi_iter.bi_sector = sector + rdev->data_offset; 1006 bio_add_page(&bio, page, size, 0); 1007 1008 submit_bio_wait(&bio); 1009 1010 return !bio.bi_status; 1011 } 1012 EXPORT_SYMBOL_GPL(sync_page_io); 1013 1014 static int read_disk_sb(struct md_rdev *rdev, int size) 1015 { 1016 if (rdev->sb_loaded) 1017 return 0; 1018 1019 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) 1020 goto fail; 1021 rdev->sb_loaded = 1; 1022 return 0; 1023 1024 fail: 1025 pr_err("md: disabled device %pg, could not read superblock.\n", 1026 rdev->bdev); 1027 return -EINVAL; 1028 } 1029 1030 static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 1031 { 1032 return sb1->set_uuid0 == sb2->set_uuid0 && 1033 sb1->set_uuid1 == sb2->set_uuid1 && 1034 sb1->set_uuid2 == sb2->set_uuid2 && 1035 sb1->set_uuid3 == sb2->set_uuid3; 1036 } 1037 1038 static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 1039 { 1040 int ret; 1041 mdp_super_t *tmp1, *tmp2; 1042 1043 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 1044 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 1045 1046 if (!tmp1 || !tmp2) { 1047 ret = 0; 1048 goto abort; 1049 } 1050 1051 *tmp1 = *sb1; 1052 *tmp2 = *sb2; 1053 1054 /* 1055 * nr_disks is not constant 1056 */ 1057 tmp1->nr_disks = 0; 1058 tmp2->nr_disks = 0; 1059 1060 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 1061 abort: 1062 kfree(tmp1); 1063 kfree(tmp2); 1064 return ret; 1065 } 1066 1067 static u32 md_csum_fold(u32 csum) 1068 { 1069 csum = (csum & 0xffff) + (csum >> 16); 1070 return (csum & 0xffff) + (csum >> 16); 1071 } 1072 1073 static unsigned int calc_sb_csum(mdp_super_t *sb) 1074 { 1075 u64 newcsum = 0; 1076 u32 *sb32 = (u32*)sb; 1077 int i; 1078 unsigned int disk_csum, csum; 1079 1080 disk_csum = sb->sb_csum; 1081 sb->sb_csum = 0; 1082 1083 for (i = 0; i < MD_SB_BYTES/4 ; i++) 1084 newcsum += sb32[i]; 1085 csum = (newcsum & 0xffffffff) + (newcsum>>32); 1086 1087 #ifdef CONFIG_ALPHA 1088 /* This used to use csum_partial, which was wrong for several 1089 * reasons including that different results are returned on 1090 * different architectures. It isn't critical that we get exactly 1091 * the same return value as before (we always csum_fold before 1092 * testing, and that removes any differences). However as we 1093 * know that csum_partial always returned a 16bit value on 1094 * alphas, do a fold to maximise conformity to previous behaviour. 1095 */ 1096 sb->sb_csum = md_csum_fold(disk_csum); 1097 #else 1098 sb->sb_csum = disk_csum; 1099 #endif 1100 return csum; 1101 } 1102 1103 /* 1104 * Handle superblock details. 1105 * We want to be able to handle multiple superblock formats 1106 * so we have a common interface to them all, and an array of 1107 * different handlers. 1108 * We rely on user-space to write the initial superblock, and support 1109 * reading and updating of superblocks. 1110 * Interface methods are: 1111 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version) 1112 * loads and validates a superblock on dev. 1113 * if refdev != NULL, compare superblocks on both devices 1114 * Return: 1115 * 0 - dev has a superblock that is compatible with refdev 1116 * 1 - dev has a superblock that is compatible and newer than refdev 1117 * so dev should be used as the refdev in future 1118 * -EINVAL superblock incompatible or invalid 1119 * -othererror e.g. -EIO 1120 * 1121 * int validate_super(struct mddev *mddev, struct md_rdev *dev) 1122 * Verify that dev is acceptable into mddev. 1123 * The first time, mddev->raid_disks will be 0, and data from 1124 * dev should be merged in. Subsequent calls check that dev 1125 * is new enough. Return 0 or -EINVAL 1126 * 1127 * void sync_super(struct mddev *mddev, struct md_rdev *dev) 1128 * Update the superblock for rdev with data in mddev 1129 * This does not write to disc. 1130 * 1131 */ 1132 1133 struct super_type { 1134 char *name; 1135 struct module *owner; 1136 int (*load_super)(struct md_rdev *rdev, 1137 struct md_rdev *refdev, 1138 int minor_version); 1139 int (*validate_super)(struct mddev *mddev, 1140 struct md_rdev *rdev); 1141 void (*sync_super)(struct mddev *mddev, 1142 struct md_rdev *rdev); 1143 unsigned long long (*rdev_size_change)(struct md_rdev *rdev, 1144 sector_t num_sectors); 1145 int (*allow_new_offset)(struct md_rdev *rdev, 1146 unsigned long long new_offset); 1147 }; 1148 1149 /* 1150 * Check that the given mddev has no bitmap. 1151 * 1152 * This function is called from the run method of all personalities that do not 1153 * support bitmaps. It prints an error message and returns non-zero if mddev 1154 * has a bitmap. Otherwise, it returns 0. 1155 * 1156 */ 1157 int md_check_no_bitmap(struct mddev *mddev) 1158 { 1159 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 1160 return 0; 1161 pr_warn("%s: bitmaps are not supported for %s\n", 1162 mdname(mddev), mddev->pers->name); 1163 return 1; 1164 } 1165 EXPORT_SYMBOL(md_check_no_bitmap); 1166 1167 /* 1168 * load_super for 0.90.0 1169 */ 1170 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1171 { 1172 mdp_super_t *sb; 1173 int ret; 1174 bool spare_disk = true; 1175 1176 /* 1177 * Calculate the position of the superblock (512byte sectors), 1178 * it's at the end of the disk. 1179 * 1180 * It also happens to be a multiple of 4Kb. 1181 */ 1182 rdev->sb_start = calc_dev_sboffset(rdev); 1183 1184 ret = read_disk_sb(rdev, MD_SB_BYTES); 1185 if (ret) 1186 return ret; 1187 1188 ret = -EINVAL; 1189 1190 sb = page_address(rdev->sb_page); 1191 1192 if (sb->md_magic != MD_SB_MAGIC) { 1193 pr_warn("md: invalid raid superblock magic on %pg\n", 1194 rdev->bdev); 1195 goto abort; 1196 } 1197 1198 if (sb->major_version != 0 || 1199 sb->minor_version < 90 || 1200 sb->minor_version > 91) { 1201 pr_warn("Bad version number %d.%d on %pg\n", 1202 sb->major_version, sb->minor_version, rdev->bdev); 1203 goto abort; 1204 } 1205 1206 if (sb->raid_disks <= 0) 1207 goto abort; 1208 1209 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 1210 pr_warn("md: invalid superblock checksum on %pg\n", rdev->bdev); 1211 goto abort; 1212 } 1213 1214 rdev->preferred_minor = sb->md_minor; 1215 rdev->data_offset = 0; 1216 rdev->new_data_offset = 0; 1217 rdev->sb_size = MD_SB_BYTES; 1218 rdev->badblocks.shift = -1; 1219 1220 if (sb->level == LEVEL_MULTIPATH) 1221 rdev->desc_nr = -1; 1222 else 1223 rdev->desc_nr = sb->this_disk.number; 1224 1225 /* not spare disk, or LEVEL_MULTIPATH */ 1226 if (sb->level == LEVEL_MULTIPATH || 1227 (rdev->desc_nr >= 0 && 1228 rdev->desc_nr < MD_SB_DISKS && 1229 sb->disks[rdev->desc_nr].state & 1230 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))) 1231 spare_disk = false; 1232 1233 if (!refdev) { 1234 if (!spare_disk) 1235 ret = 1; 1236 else 1237 ret = 0; 1238 } else { 1239 __u64 ev1, ev2; 1240 mdp_super_t *refsb = page_address(refdev->sb_page); 1241 if (!md_uuid_equal(refsb, sb)) { 1242 pr_warn("md: %pg has different UUID to %pg\n", 1243 rdev->bdev, refdev->bdev); 1244 goto abort; 1245 } 1246 if (!md_sb_equal(refsb, sb)) { 1247 pr_warn("md: %pg has same UUID but different superblock to %pg\n", 1248 rdev->bdev, refdev->bdev); 1249 goto abort; 1250 } 1251 ev1 = md_event(sb); 1252 ev2 = md_event(refsb); 1253 1254 if (!spare_disk && ev1 > ev2) 1255 ret = 1; 1256 else 1257 ret = 0; 1258 } 1259 rdev->sectors = rdev->sb_start; 1260 /* Limit to 4TB as metadata cannot record more than that. 1261 * (not needed for Linear and RAID0 as metadata doesn't 1262 * record this size) 1263 */ 1264 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1) 1265 rdev->sectors = (sector_t)(2ULL << 32) - 2; 1266 1267 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) 1268 /* "this cannot possibly happen" ... */ 1269 ret = -EINVAL; 1270 1271 abort: 1272 return ret; 1273 } 1274 1275 /* 1276 * validate_super for 0.90.0 1277 */ 1278 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) 1279 { 1280 mdp_disk_t *desc; 1281 mdp_super_t *sb = page_address(rdev->sb_page); 1282 __u64 ev1 = md_event(sb); 1283 1284 rdev->raid_disk = -1; 1285 clear_bit(Faulty, &rdev->flags); 1286 clear_bit(In_sync, &rdev->flags); 1287 clear_bit(Bitmap_sync, &rdev->flags); 1288 clear_bit(WriteMostly, &rdev->flags); 1289 1290 if (mddev->raid_disks == 0) { 1291 mddev->major_version = 0; 1292 mddev->minor_version = sb->minor_version; 1293 mddev->patch_version = sb->patch_version; 1294 mddev->external = 0; 1295 mddev->chunk_sectors = sb->chunk_size >> 9; 1296 mddev->ctime = sb->ctime; 1297 mddev->utime = sb->utime; 1298 mddev->level = sb->level; 1299 mddev->clevel[0] = 0; 1300 mddev->layout = sb->layout; 1301 mddev->raid_disks = sb->raid_disks; 1302 mddev->dev_sectors = ((sector_t)sb->size) * 2; 1303 mddev->events = ev1; 1304 mddev->bitmap_info.offset = 0; 1305 mddev->bitmap_info.space = 0; 1306 /* bitmap can use 60 K after the 4K superblocks */ 1307 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1308 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 1309 mddev->reshape_backwards = 0; 1310 1311 if (mddev->minor_version >= 91) { 1312 mddev->reshape_position = sb->reshape_position; 1313 mddev->delta_disks = sb->delta_disks; 1314 mddev->new_level = sb->new_level; 1315 mddev->new_layout = sb->new_layout; 1316 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1317 if (mddev->delta_disks < 0) 1318 mddev->reshape_backwards = 1; 1319 } else { 1320 mddev->reshape_position = MaxSector; 1321 mddev->delta_disks = 0; 1322 mddev->new_level = mddev->level; 1323 mddev->new_layout = mddev->layout; 1324 mddev->new_chunk_sectors = mddev->chunk_sectors; 1325 } 1326 if (mddev->level == 0) 1327 mddev->layout = -1; 1328 1329 if (sb->state & (1<<MD_SB_CLEAN)) 1330 mddev->recovery_cp = MaxSector; 1331 else { 1332 if (sb->events_hi == sb->cp_events_hi && 1333 sb->events_lo == sb->cp_events_lo) { 1334 mddev->recovery_cp = sb->recovery_cp; 1335 } else 1336 mddev->recovery_cp = 0; 1337 } 1338 1339 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1340 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1341 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1342 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1343 1344 mddev->max_disks = MD_SB_DISKS; 1345 1346 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1347 mddev->bitmap_info.file == NULL) { 1348 mddev->bitmap_info.offset = 1349 mddev->bitmap_info.default_offset; 1350 mddev->bitmap_info.space = 1351 mddev->bitmap_info.default_space; 1352 } 1353 1354 } else if (mddev->pers == NULL) { 1355 /* Insist on good event counter while assembling, except 1356 * for spares (which don't need an event count) */ 1357 ++ev1; 1358 if (sb->disks[rdev->desc_nr].state & ( 1359 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1360 if (ev1 < mddev->events) 1361 return -EINVAL; 1362 } else if (mddev->bitmap) { 1363 /* if adding to array with a bitmap, then we can accept an 1364 * older device ... but not too old. 1365 */ 1366 if (ev1 < mddev->bitmap->events_cleared) 1367 return 0; 1368 if (ev1 < mddev->events) 1369 set_bit(Bitmap_sync, &rdev->flags); 1370 } else { 1371 if (ev1 < mddev->events) 1372 /* just a hot-add of a new device, leave raid_disk at -1 */ 1373 return 0; 1374 } 1375 1376 if (mddev->level != LEVEL_MULTIPATH) { 1377 desc = sb->disks + rdev->desc_nr; 1378 1379 if (desc->state & (1<<MD_DISK_FAULTY)) 1380 set_bit(Faulty, &rdev->flags); 1381 else if (desc->state & (1<<MD_DISK_SYNC) /* && 1382 desc->raid_disk < mddev->raid_disks */) { 1383 set_bit(In_sync, &rdev->flags); 1384 rdev->raid_disk = desc->raid_disk; 1385 rdev->saved_raid_disk = desc->raid_disk; 1386 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1387 /* active but not in sync implies recovery up to 1388 * reshape position. We don't know exactly where 1389 * that is, so set to zero for now */ 1390 if (mddev->minor_version >= 91) { 1391 rdev->recovery_offset = 0; 1392 rdev->raid_disk = desc->raid_disk; 1393 } 1394 } 1395 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1396 set_bit(WriteMostly, &rdev->flags); 1397 if (desc->state & (1<<MD_DISK_FAILFAST)) 1398 set_bit(FailFast, &rdev->flags); 1399 } else /* MULTIPATH are always insync */ 1400 set_bit(In_sync, &rdev->flags); 1401 return 0; 1402 } 1403 1404 /* 1405 * sync_super for 0.90.0 1406 */ 1407 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) 1408 { 1409 mdp_super_t *sb; 1410 struct md_rdev *rdev2; 1411 int next_spare = mddev->raid_disks; 1412 1413 /* make rdev->sb match mddev data.. 1414 * 1415 * 1/ zero out disks 1416 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1417 * 3/ any empty disks < next_spare become removed 1418 * 1419 * disks[0] gets initialised to REMOVED because 1420 * we cannot be sure from other fields if it has 1421 * been initialised or not. 1422 */ 1423 int i; 1424 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1425 1426 rdev->sb_size = MD_SB_BYTES; 1427 1428 sb = page_address(rdev->sb_page); 1429 1430 memset(sb, 0, sizeof(*sb)); 1431 1432 sb->md_magic = MD_SB_MAGIC; 1433 sb->major_version = mddev->major_version; 1434 sb->patch_version = mddev->patch_version; 1435 sb->gvalid_words = 0; /* ignored */ 1436 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1437 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1438 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1439 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1440 1441 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); 1442 sb->level = mddev->level; 1443 sb->size = mddev->dev_sectors / 2; 1444 sb->raid_disks = mddev->raid_disks; 1445 sb->md_minor = mddev->md_minor; 1446 sb->not_persistent = 0; 1447 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); 1448 sb->state = 0; 1449 sb->events_hi = (mddev->events>>32); 1450 sb->events_lo = (u32)mddev->events; 1451 1452 if (mddev->reshape_position == MaxSector) 1453 sb->minor_version = 90; 1454 else { 1455 sb->minor_version = 91; 1456 sb->reshape_position = mddev->reshape_position; 1457 sb->new_level = mddev->new_level; 1458 sb->delta_disks = mddev->delta_disks; 1459 sb->new_layout = mddev->new_layout; 1460 sb->new_chunk = mddev->new_chunk_sectors << 9; 1461 } 1462 mddev->minor_version = sb->minor_version; 1463 if (mddev->in_sync) 1464 { 1465 sb->recovery_cp = mddev->recovery_cp; 1466 sb->cp_events_hi = (mddev->events>>32); 1467 sb->cp_events_lo = (u32)mddev->events; 1468 if (mddev->recovery_cp == MaxSector) 1469 sb->state = (1<< MD_SB_CLEAN); 1470 } else 1471 sb->recovery_cp = 0; 1472 1473 sb->layout = mddev->layout; 1474 sb->chunk_size = mddev->chunk_sectors << 9; 1475 1476 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1477 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1478 1479 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1480 rdev_for_each(rdev2, mddev) { 1481 mdp_disk_t *d; 1482 int desc_nr; 1483 int is_active = test_bit(In_sync, &rdev2->flags); 1484 1485 if (rdev2->raid_disk >= 0 && 1486 sb->minor_version >= 91) 1487 /* we have nowhere to store the recovery_offset, 1488 * but if it is not below the reshape_position, 1489 * we can piggy-back on that. 1490 */ 1491 is_active = 1; 1492 if (rdev2->raid_disk < 0 || 1493 test_bit(Faulty, &rdev2->flags)) 1494 is_active = 0; 1495 if (is_active) 1496 desc_nr = rdev2->raid_disk; 1497 else 1498 desc_nr = next_spare++; 1499 rdev2->desc_nr = desc_nr; 1500 d = &sb->disks[rdev2->desc_nr]; 1501 nr_disks++; 1502 d->number = rdev2->desc_nr; 1503 d->major = MAJOR(rdev2->bdev->bd_dev); 1504 d->minor = MINOR(rdev2->bdev->bd_dev); 1505 if (is_active) 1506 d->raid_disk = rdev2->raid_disk; 1507 else 1508 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1509 if (test_bit(Faulty, &rdev2->flags)) 1510 d->state = (1<<MD_DISK_FAULTY); 1511 else if (is_active) { 1512 d->state = (1<<MD_DISK_ACTIVE); 1513 if (test_bit(In_sync, &rdev2->flags)) 1514 d->state |= (1<<MD_DISK_SYNC); 1515 active++; 1516 working++; 1517 } else { 1518 d->state = 0; 1519 spare++; 1520 working++; 1521 } 1522 if (test_bit(WriteMostly, &rdev2->flags)) 1523 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1524 if (test_bit(FailFast, &rdev2->flags)) 1525 d->state |= (1<<MD_DISK_FAILFAST); 1526 } 1527 /* now set the "removed" and "faulty" bits on any missing devices */ 1528 for (i=0 ; i < mddev->raid_disks ; i++) { 1529 mdp_disk_t *d = &sb->disks[i]; 1530 if (d->state == 0 && d->number == 0) { 1531 d->number = i; 1532 d->raid_disk = i; 1533 d->state = (1<<MD_DISK_REMOVED); 1534 d->state |= (1<<MD_DISK_FAULTY); 1535 failed++; 1536 } 1537 } 1538 sb->nr_disks = nr_disks; 1539 sb->active_disks = active; 1540 sb->working_disks = working; 1541 sb->failed_disks = failed; 1542 sb->spare_disks = spare; 1543 1544 sb->this_disk = sb->disks[rdev->desc_nr]; 1545 sb->sb_csum = calc_sb_csum(sb); 1546 } 1547 1548 /* 1549 * rdev_size_change for 0.90.0 1550 */ 1551 static unsigned long long 1552 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1553 { 1554 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1555 return 0; /* component must fit device */ 1556 if (rdev->mddev->bitmap_info.offset) 1557 return 0; /* can't move bitmap */ 1558 rdev->sb_start = calc_dev_sboffset(rdev); 1559 if (!num_sectors || num_sectors > rdev->sb_start) 1560 num_sectors = rdev->sb_start; 1561 /* Limit to 4TB as metadata cannot record more than that. 1562 * 4TB == 2^32 KB, or 2*2^32 sectors. 1563 */ 1564 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) 1565 num_sectors = (sector_t)(2ULL << 32) - 2; 1566 do { 1567 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1568 rdev->sb_page); 1569 } while (md_super_wait(rdev->mddev) < 0); 1570 return num_sectors; 1571 } 1572 1573 static int 1574 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) 1575 { 1576 /* non-zero offset changes not possible with v0.90 */ 1577 return new_offset == 0; 1578 } 1579 1580 /* 1581 * version 1 superblock 1582 */ 1583 1584 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb) 1585 { 1586 __le32 disk_csum; 1587 u32 csum; 1588 unsigned long long newcsum; 1589 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1590 __le32 *isuper = (__le32*)sb; 1591 1592 disk_csum = sb->sb_csum; 1593 sb->sb_csum = 0; 1594 newcsum = 0; 1595 for (; size >= 4; size -= 4) 1596 newcsum += le32_to_cpu(*isuper++); 1597 1598 if (size == 2) 1599 newcsum += le16_to_cpu(*(__le16*) isuper); 1600 1601 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1602 sb->sb_csum = disk_csum; 1603 return cpu_to_le32(csum); 1604 } 1605 1606 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1607 { 1608 struct mdp_superblock_1 *sb; 1609 int ret; 1610 sector_t sb_start; 1611 sector_t sectors; 1612 int bmask; 1613 bool spare_disk = true; 1614 1615 /* 1616 * Calculate the position of the superblock in 512byte sectors. 1617 * It is always aligned to a 4K boundary and 1618 * depeding on minor_version, it can be: 1619 * 0: At least 8K, but less than 12K, from end of device 1620 * 1: At start of device 1621 * 2: 4K from start of device. 1622 */ 1623 switch(minor_version) { 1624 case 0: 1625 sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2; 1626 sb_start &= ~(sector_t)(4*2-1); 1627 break; 1628 case 1: 1629 sb_start = 0; 1630 break; 1631 case 2: 1632 sb_start = 8; 1633 break; 1634 default: 1635 return -EINVAL; 1636 } 1637 rdev->sb_start = sb_start; 1638 1639 /* superblock is rarely larger than 1K, but it can be larger, 1640 * and it is safe to read 4k, so we do that 1641 */ 1642 ret = read_disk_sb(rdev, 4096); 1643 if (ret) return ret; 1644 1645 sb = page_address(rdev->sb_page); 1646 1647 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1648 sb->major_version != cpu_to_le32(1) || 1649 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1650 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1651 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1652 return -EINVAL; 1653 1654 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1655 pr_warn("md: invalid superblock checksum on %pg\n", 1656 rdev->bdev); 1657 return -EINVAL; 1658 } 1659 if (le64_to_cpu(sb->data_size) < 10) { 1660 pr_warn("md: data_size too small on %pg\n", 1661 rdev->bdev); 1662 return -EINVAL; 1663 } 1664 if (sb->pad0 || 1665 sb->pad3[0] || 1666 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) 1667 /* Some padding is non-zero, might be a new feature */ 1668 return -EINVAL; 1669 1670 rdev->preferred_minor = 0xffff; 1671 rdev->data_offset = le64_to_cpu(sb->data_offset); 1672 rdev->new_data_offset = rdev->data_offset; 1673 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) && 1674 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) 1675 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); 1676 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1677 1678 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1679 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1680 if (rdev->sb_size & bmask) 1681 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1682 1683 if (minor_version 1684 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1685 return -EINVAL; 1686 if (minor_version 1687 && rdev->new_data_offset < sb_start + (rdev->sb_size/512)) 1688 return -EINVAL; 1689 1690 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1691 rdev->desc_nr = -1; 1692 else 1693 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1694 1695 if (!rdev->bb_page) { 1696 rdev->bb_page = alloc_page(GFP_KERNEL); 1697 if (!rdev->bb_page) 1698 return -ENOMEM; 1699 } 1700 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && 1701 rdev->badblocks.count == 0) { 1702 /* need to load the bad block list. 1703 * Currently we limit it to one page. 1704 */ 1705 s32 offset; 1706 sector_t bb_sector; 1707 __le64 *bbp; 1708 int i; 1709 int sectors = le16_to_cpu(sb->bblog_size); 1710 if (sectors > (PAGE_SIZE / 512)) 1711 return -EINVAL; 1712 offset = le32_to_cpu(sb->bblog_offset); 1713 if (offset == 0) 1714 return -EINVAL; 1715 bb_sector = (long long)offset; 1716 if (!sync_page_io(rdev, bb_sector, sectors << 9, 1717 rdev->bb_page, REQ_OP_READ, true)) 1718 return -EIO; 1719 bbp = (__le64 *)page_address(rdev->bb_page); 1720 rdev->badblocks.shift = sb->bblog_shift; 1721 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { 1722 u64 bb = le64_to_cpu(*bbp); 1723 int count = bb & (0x3ff); 1724 u64 sector = bb >> 10; 1725 sector <<= sb->bblog_shift; 1726 count <<= sb->bblog_shift; 1727 if (bb + 1 == 0) 1728 break; 1729 if (badblocks_set(&rdev->badblocks, sector, count, 1)) 1730 return -EINVAL; 1731 } 1732 } else if (sb->bblog_offset != 0) 1733 rdev->badblocks.shift = 0; 1734 1735 if ((le32_to_cpu(sb->feature_map) & 1736 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) { 1737 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset); 1738 rdev->ppl.size = le16_to_cpu(sb->ppl.size); 1739 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset; 1740 } 1741 1742 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) && 1743 sb->level != 0) 1744 return -EINVAL; 1745 1746 /* not spare disk, or LEVEL_MULTIPATH */ 1747 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) || 1748 (rdev->desc_nr >= 0 && 1749 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1750 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || 1751 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))) 1752 spare_disk = false; 1753 1754 if (!refdev) { 1755 if (!spare_disk) 1756 ret = 1; 1757 else 1758 ret = 0; 1759 } else { 1760 __u64 ev1, ev2; 1761 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); 1762 1763 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1764 sb->level != refsb->level || 1765 sb->layout != refsb->layout || 1766 sb->chunksize != refsb->chunksize) { 1767 pr_warn("md: %pg has strangely different superblock to %pg\n", 1768 rdev->bdev, 1769 refdev->bdev); 1770 return -EINVAL; 1771 } 1772 ev1 = le64_to_cpu(sb->events); 1773 ev2 = le64_to_cpu(refsb->events); 1774 1775 if (!spare_disk && ev1 > ev2) 1776 ret = 1; 1777 else 1778 ret = 0; 1779 } 1780 if (minor_version) 1781 sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset; 1782 else 1783 sectors = rdev->sb_start; 1784 if (sectors < le64_to_cpu(sb->data_size)) 1785 return -EINVAL; 1786 rdev->sectors = le64_to_cpu(sb->data_size); 1787 return ret; 1788 } 1789 1790 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) 1791 { 1792 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 1793 __u64 ev1 = le64_to_cpu(sb->events); 1794 1795 rdev->raid_disk = -1; 1796 clear_bit(Faulty, &rdev->flags); 1797 clear_bit(In_sync, &rdev->flags); 1798 clear_bit(Bitmap_sync, &rdev->flags); 1799 clear_bit(WriteMostly, &rdev->flags); 1800 1801 if (mddev->raid_disks == 0) { 1802 mddev->major_version = 1; 1803 mddev->patch_version = 0; 1804 mddev->external = 0; 1805 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1806 mddev->ctime = le64_to_cpu(sb->ctime); 1807 mddev->utime = le64_to_cpu(sb->utime); 1808 mddev->level = le32_to_cpu(sb->level); 1809 mddev->clevel[0] = 0; 1810 mddev->layout = le32_to_cpu(sb->layout); 1811 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1812 mddev->dev_sectors = le64_to_cpu(sb->size); 1813 mddev->events = ev1; 1814 mddev->bitmap_info.offset = 0; 1815 mddev->bitmap_info.space = 0; 1816 /* Default location for bitmap is 1K after superblock 1817 * using 3K - total of 4K 1818 */ 1819 mddev->bitmap_info.default_offset = 1024 >> 9; 1820 mddev->bitmap_info.default_space = (4096-1024) >> 9; 1821 mddev->reshape_backwards = 0; 1822 1823 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1824 memcpy(mddev->uuid, sb->set_uuid, 16); 1825 1826 mddev->max_disks = (4096-256)/2; 1827 1828 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1829 mddev->bitmap_info.file == NULL) { 1830 mddev->bitmap_info.offset = 1831 (__s32)le32_to_cpu(sb->bitmap_offset); 1832 /* Metadata doesn't record how much space is available. 1833 * For 1.0, we assume we can use up to the superblock 1834 * if before, else to 4K beyond superblock. 1835 * For others, assume no change is possible. 1836 */ 1837 if (mddev->minor_version > 0) 1838 mddev->bitmap_info.space = 0; 1839 else if (mddev->bitmap_info.offset > 0) 1840 mddev->bitmap_info.space = 1841 8 - mddev->bitmap_info.offset; 1842 else 1843 mddev->bitmap_info.space = 1844 -mddev->bitmap_info.offset; 1845 } 1846 1847 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1848 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1849 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1850 mddev->new_level = le32_to_cpu(sb->new_level); 1851 mddev->new_layout = le32_to_cpu(sb->new_layout); 1852 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1853 if (mddev->delta_disks < 0 || 1854 (mddev->delta_disks == 0 && 1855 (le32_to_cpu(sb->feature_map) 1856 & MD_FEATURE_RESHAPE_BACKWARDS))) 1857 mddev->reshape_backwards = 1; 1858 } else { 1859 mddev->reshape_position = MaxSector; 1860 mddev->delta_disks = 0; 1861 mddev->new_level = mddev->level; 1862 mddev->new_layout = mddev->layout; 1863 mddev->new_chunk_sectors = mddev->chunk_sectors; 1864 } 1865 1866 if (mddev->level == 0 && 1867 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT)) 1868 mddev->layout = -1; 1869 1870 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) 1871 set_bit(MD_HAS_JOURNAL, &mddev->flags); 1872 1873 if (le32_to_cpu(sb->feature_map) & 1874 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) { 1875 if (le32_to_cpu(sb->feature_map) & 1876 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL)) 1877 return -EINVAL; 1878 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) && 1879 (le32_to_cpu(sb->feature_map) & 1880 MD_FEATURE_MULTIPLE_PPLS)) 1881 return -EINVAL; 1882 set_bit(MD_HAS_PPL, &mddev->flags); 1883 } 1884 } else if (mddev->pers == NULL) { 1885 /* Insist of good event counter while assembling, except for 1886 * spares (which don't need an event count) */ 1887 ++ev1; 1888 if (rdev->desc_nr >= 0 && 1889 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1890 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || 1891 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)) 1892 if (ev1 < mddev->events) 1893 return -EINVAL; 1894 } else if (mddev->bitmap) { 1895 /* If adding to array with a bitmap, then we can accept an 1896 * older device, but not too old. 1897 */ 1898 if (ev1 < mddev->bitmap->events_cleared) 1899 return 0; 1900 if (ev1 < mddev->events) 1901 set_bit(Bitmap_sync, &rdev->flags); 1902 } else { 1903 if (ev1 < mddev->events) 1904 /* just a hot-add of a new device, leave raid_disk at -1 */ 1905 return 0; 1906 } 1907 if (mddev->level != LEVEL_MULTIPATH) { 1908 int role; 1909 if (rdev->desc_nr < 0 || 1910 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1911 role = MD_DISK_ROLE_SPARE; 1912 rdev->desc_nr = -1; 1913 } else 1914 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1915 switch(role) { 1916 case MD_DISK_ROLE_SPARE: /* spare */ 1917 break; 1918 case MD_DISK_ROLE_FAULTY: /* faulty */ 1919 set_bit(Faulty, &rdev->flags); 1920 break; 1921 case MD_DISK_ROLE_JOURNAL: /* journal device */ 1922 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) { 1923 /* journal device without journal feature */ 1924 pr_warn("md: journal device provided without journal feature, ignoring the device\n"); 1925 return -EINVAL; 1926 } 1927 set_bit(Journal, &rdev->flags); 1928 rdev->journal_tail = le64_to_cpu(sb->journal_tail); 1929 rdev->raid_disk = 0; 1930 break; 1931 default: 1932 rdev->saved_raid_disk = role; 1933 if ((le32_to_cpu(sb->feature_map) & 1934 MD_FEATURE_RECOVERY_OFFSET)) { 1935 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1936 if (!(le32_to_cpu(sb->feature_map) & 1937 MD_FEATURE_RECOVERY_BITMAP)) 1938 rdev->saved_raid_disk = -1; 1939 } else { 1940 /* 1941 * If the array is FROZEN, then the device can't 1942 * be in_sync with rest of array. 1943 */ 1944 if (!test_bit(MD_RECOVERY_FROZEN, 1945 &mddev->recovery)) 1946 set_bit(In_sync, &rdev->flags); 1947 } 1948 rdev->raid_disk = role; 1949 break; 1950 } 1951 if (sb->devflags & WriteMostly1) 1952 set_bit(WriteMostly, &rdev->flags); 1953 if (sb->devflags & FailFast1) 1954 set_bit(FailFast, &rdev->flags); 1955 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) 1956 set_bit(Replacement, &rdev->flags); 1957 } else /* MULTIPATH are always insync */ 1958 set_bit(In_sync, &rdev->flags); 1959 1960 return 0; 1961 } 1962 1963 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) 1964 { 1965 struct mdp_superblock_1 *sb; 1966 struct md_rdev *rdev2; 1967 int max_dev, i; 1968 /* make rdev->sb match mddev and rdev data. */ 1969 1970 sb = page_address(rdev->sb_page); 1971 1972 sb->feature_map = 0; 1973 sb->pad0 = 0; 1974 sb->recovery_offset = cpu_to_le64(0); 1975 memset(sb->pad3, 0, sizeof(sb->pad3)); 1976 1977 sb->utime = cpu_to_le64((__u64)mddev->utime); 1978 sb->events = cpu_to_le64(mddev->events); 1979 if (mddev->in_sync) 1980 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1981 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) 1982 sb->resync_offset = cpu_to_le64(MaxSector); 1983 else 1984 sb->resync_offset = cpu_to_le64(0); 1985 1986 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1987 1988 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1989 sb->size = cpu_to_le64(mddev->dev_sectors); 1990 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 1991 sb->level = cpu_to_le32(mddev->level); 1992 sb->layout = cpu_to_le32(mddev->layout); 1993 if (test_bit(FailFast, &rdev->flags)) 1994 sb->devflags |= FailFast1; 1995 else 1996 sb->devflags &= ~FailFast1; 1997 1998 if (test_bit(WriteMostly, &rdev->flags)) 1999 sb->devflags |= WriteMostly1; 2000 else 2001 sb->devflags &= ~WriteMostly1; 2002 sb->data_offset = cpu_to_le64(rdev->data_offset); 2003 sb->data_size = cpu_to_le64(rdev->sectors); 2004 2005 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 2006 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 2007 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 2008 } 2009 2010 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) && 2011 !test_bit(In_sync, &rdev->flags)) { 2012 sb->feature_map |= 2013 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 2014 sb->recovery_offset = 2015 cpu_to_le64(rdev->recovery_offset); 2016 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) 2017 sb->feature_map |= 2018 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP); 2019 } 2020 /* Note: recovery_offset and journal_tail share space */ 2021 if (test_bit(Journal, &rdev->flags)) 2022 sb->journal_tail = cpu_to_le64(rdev->journal_tail); 2023 if (test_bit(Replacement, &rdev->flags)) 2024 sb->feature_map |= 2025 cpu_to_le32(MD_FEATURE_REPLACEMENT); 2026 2027 if (mddev->reshape_position != MaxSector) { 2028 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 2029 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 2030 sb->new_layout = cpu_to_le32(mddev->new_layout); 2031 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 2032 sb->new_level = cpu_to_le32(mddev->new_level); 2033 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 2034 if (mddev->delta_disks == 0 && 2035 mddev->reshape_backwards) 2036 sb->feature_map 2037 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS); 2038 if (rdev->new_data_offset != rdev->data_offset) { 2039 sb->feature_map 2040 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET); 2041 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset 2042 - rdev->data_offset)); 2043 } 2044 } 2045 2046 if (mddev_is_clustered(mddev)) 2047 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED); 2048 2049 if (rdev->badblocks.count == 0) 2050 /* Nothing to do for bad blocks*/ ; 2051 else if (sb->bblog_offset == 0) 2052 /* Cannot record bad blocks on this device */ 2053 md_error(mddev, rdev); 2054 else { 2055 struct badblocks *bb = &rdev->badblocks; 2056 __le64 *bbp = (__le64 *)page_address(rdev->bb_page); 2057 u64 *p = bb->page; 2058 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); 2059 if (bb->changed) { 2060 unsigned seq; 2061 2062 retry: 2063 seq = read_seqbegin(&bb->lock); 2064 2065 memset(bbp, 0xff, PAGE_SIZE); 2066 2067 for (i = 0 ; i < bb->count ; i++) { 2068 u64 internal_bb = p[i]; 2069 u64 store_bb = ((BB_OFFSET(internal_bb) << 10) 2070 | BB_LEN(internal_bb)); 2071 bbp[i] = cpu_to_le64(store_bb); 2072 } 2073 bb->changed = 0; 2074 if (read_seqretry(&bb->lock, seq)) 2075 goto retry; 2076 2077 bb->sector = (rdev->sb_start + 2078 (int)le32_to_cpu(sb->bblog_offset)); 2079 bb->size = le16_to_cpu(sb->bblog_size); 2080 } 2081 } 2082 2083 max_dev = 0; 2084 rdev_for_each(rdev2, mddev) 2085 if (rdev2->desc_nr+1 > max_dev) 2086 max_dev = rdev2->desc_nr+1; 2087 2088 if (max_dev > le32_to_cpu(sb->max_dev)) { 2089 int bmask; 2090 sb->max_dev = cpu_to_le32(max_dev); 2091 rdev->sb_size = max_dev * 2 + 256; 2092 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 2093 if (rdev->sb_size & bmask) 2094 rdev->sb_size = (rdev->sb_size | bmask) + 1; 2095 } else 2096 max_dev = le32_to_cpu(sb->max_dev); 2097 2098 for (i=0; i<max_dev;i++) 2099 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 2100 2101 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) 2102 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL); 2103 2104 if (test_bit(MD_HAS_PPL, &mddev->flags)) { 2105 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags)) 2106 sb->feature_map |= 2107 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS); 2108 else 2109 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL); 2110 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset); 2111 sb->ppl.size = cpu_to_le16(rdev->ppl.size); 2112 } 2113 2114 rdev_for_each(rdev2, mddev) { 2115 i = rdev2->desc_nr; 2116 if (test_bit(Faulty, &rdev2->flags)) 2117 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); 2118 else if (test_bit(In_sync, &rdev2->flags)) 2119 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 2120 else if (test_bit(Journal, &rdev2->flags)) 2121 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL); 2122 else if (rdev2->raid_disk >= 0) 2123 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 2124 else 2125 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 2126 } 2127 2128 sb->sb_csum = calc_sb_1_csum(sb); 2129 } 2130 2131 static sector_t super_1_choose_bm_space(sector_t dev_size) 2132 { 2133 sector_t bm_space; 2134 2135 /* if the device is bigger than 8Gig, save 64k for bitmap 2136 * usage, if bigger than 200Gig, save 128k 2137 */ 2138 if (dev_size < 64*2) 2139 bm_space = 0; 2140 else if (dev_size - 64*2 >= 200*1024*1024*2) 2141 bm_space = 128*2; 2142 else if (dev_size - 4*2 > 8*1024*1024*2) 2143 bm_space = 64*2; 2144 else 2145 bm_space = 4*2; 2146 return bm_space; 2147 } 2148 2149 static unsigned long long 2150 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 2151 { 2152 struct mdp_superblock_1 *sb; 2153 sector_t max_sectors; 2154 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 2155 return 0; /* component must fit device */ 2156 if (rdev->data_offset != rdev->new_data_offset) 2157 return 0; /* too confusing */ 2158 if (rdev->sb_start < rdev->data_offset) { 2159 /* minor versions 1 and 2; superblock before data */ 2160 max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset; 2161 if (!num_sectors || num_sectors > max_sectors) 2162 num_sectors = max_sectors; 2163 } else if (rdev->mddev->bitmap_info.offset) { 2164 /* minor version 0 with bitmap we can't move */ 2165 return 0; 2166 } else { 2167 /* minor version 0; superblock after data */ 2168 sector_t sb_start, bm_space; 2169 sector_t dev_size = bdev_nr_sectors(rdev->bdev); 2170 2171 /* 8K is for superblock */ 2172 sb_start = dev_size - 8*2; 2173 sb_start &= ~(sector_t)(4*2 - 1); 2174 2175 bm_space = super_1_choose_bm_space(dev_size); 2176 2177 /* Space that can be used to store date needs to decrease 2178 * superblock bitmap space and bad block space(4K) 2179 */ 2180 max_sectors = sb_start - bm_space - 4*2; 2181 2182 if (!num_sectors || num_sectors > max_sectors) 2183 num_sectors = max_sectors; 2184 rdev->sb_start = sb_start; 2185 } 2186 sb = page_address(rdev->sb_page); 2187 sb->data_size = cpu_to_le64(num_sectors); 2188 sb->super_offset = cpu_to_le64(rdev->sb_start); 2189 sb->sb_csum = calc_sb_1_csum(sb); 2190 do { 2191 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 2192 rdev->sb_page); 2193 } while (md_super_wait(rdev->mddev) < 0); 2194 return num_sectors; 2195 2196 } 2197 2198 static int 2199 super_1_allow_new_offset(struct md_rdev *rdev, 2200 unsigned long long new_offset) 2201 { 2202 /* All necessary checks on new >= old have been done */ 2203 struct bitmap *bitmap; 2204 if (new_offset >= rdev->data_offset) 2205 return 1; 2206 2207 /* with 1.0 metadata, there is no metadata to tread on 2208 * so we can always move back */ 2209 if (rdev->mddev->minor_version == 0) 2210 return 1; 2211 2212 /* otherwise we must be sure not to step on 2213 * any metadata, so stay: 2214 * 36K beyond start of superblock 2215 * beyond end of badblocks 2216 * beyond write-intent bitmap 2217 */ 2218 if (rdev->sb_start + (32+4)*2 > new_offset) 2219 return 0; 2220 bitmap = rdev->mddev->bitmap; 2221 if (bitmap && !rdev->mddev->bitmap_info.file && 2222 rdev->sb_start + rdev->mddev->bitmap_info.offset + 2223 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset) 2224 return 0; 2225 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) 2226 return 0; 2227 2228 return 1; 2229 } 2230 2231 static struct super_type super_types[] = { 2232 [0] = { 2233 .name = "0.90.0", 2234 .owner = THIS_MODULE, 2235 .load_super = super_90_load, 2236 .validate_super = super_90_validate, 2237 .sync_super = super_90_sync, 2238 .rdev_size_change = super_90_rdev_size_change, 2239 .allow_new_offset = super_90_allow_new_offset, 2240 }, 2241 [1] = { 2242 .name = "md-1", 2243 .owner = THIS_MODULE, 2244 .load_super = super_1_load, 2245 .validate_super = super_1_validate, 2246 .sync_super = super_1_sync, 2247 .rdev_size_change = super_1_rdev_size_change, 2248 .allow_new_offset = super_1_allow_new_offset, 2249 }, 2250 }; 2251 2252 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) 2253 { 2254 if (mddev->sync_super) { 2255 mddev->sync_super(mddev, rdev); 2256 return; 2257 } 2258 2259 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); 2260 2261 super_types[mddev->major_version].sync_super(mddev, rdev); 2262 } 2263 2264 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) 2265 { 2266 struct md_rdev *rdev, *rdev2; 2267 2268 rcu_read_lock(); 2269 rdev_for_each_rcu(rdev, mddev1) { 2270 if (test_bit(Faulty, &rdev->flags) || 2271 test_bit(Journal, &rdev->flags) || 2272 rdev->raid_disk == -1) 2273 continue; 2274 rdev_for_each_rcu(rdev2, mddev2) { 2275 if (test_bit(Faulty, &rdev2->flags) || 2276 test_bit(Journal, &rdev2->flags) || 2277 rdev2->raid_disk == -1) 2278 continue; 2279 if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) { 2280 rcu_read_unlock(); 2281 return 1; 2282 } 2283 } 2284 } 2285 rcu_read_unlock(); 2286 return 0; 2287 } 2288 2289 static LIST_HEAD(pending_raid_disks); 2290 2291 /* 2292 * Try to register data integrity profile for an mddev 2293 * 2294 * This is called when an array is started and after a disk has been kicked 2295 * from the array. It only succeeds if all working and active component devices 2296 * are integrity capable with matching profiles. 2297 */ 2298 int md_integrity_register(struct mddev *mddev) 2299 { 2300 struct md_rdev *rdev, *reference = NULL; 2301 2302 if (list_empty(&mddev->disks)) 2303 return 0; /* nothing to do */ 2304 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) 2305 return 0; /* shouldn't register, or already is */ 2306 rdev_for_each(rdev, mddev) { 2307 /* skip spares and non-functional disks */ 2308 if (test_bit(Faulty, &rdev->flags)) 2309 continue; 2310 if (rdev->raid_disk < 0) 2311 continue; 2312 if (!reference) { 2313 /* Use the first rdev as the reference */ 2314 reference = rdev; 2315 continue; 2316 } 2317 /* does this rdev's profile match the reference profile? */ 2318 if (blk_integrity_compare(reference->bdev->bd_disk, 2319 rdev->bdev->bd_disk) < 0) 2320 return -EINVAL; 2321 } 2322 if (!reference || !bdev_get_integrity(reference->bdev)) 2323 return 0; 2324 /* 2325 * All component devices are integrity capable and have matching 2326 * profiles, register the common profile for the md device. 2327 */ 2328 blk_integrity_register(mddev->gendisk, 2329 bdev_get_integrity(reference->bdev)); 2330 2331 pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); 2332 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) || 2333 (mddev->level != 1 && mddev->level != 10 && 2334 bioset_integrity_create(&mddev->io_acct_set, BIO_POOL_SIZE))) { 2335 /* 2336 * No need to handle the failure of bioset_integrity_create, 2337 * because the function is called by md_run() -> pers->run(), 2338 * md_run calls bioset_exit -> bioset_integrity_free in case 2339 * of failure case. 2340 */ 2341 pr_err("md: failed to create integrity pool for %s\n", 2342 mdname(mddev)); 2343 return -EINVAL; 2344 } 2345 return 0; 2346 } 2347 EXPORT_SYMBOL(md_integrity_register); 2348 2349 /* 2350 * Attempt to add an rdev, but only if it is consistent with the current 2351 * integrity profile 2352 */ 2353 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) 2354 { 2355 struct blk_integrity *bi_mddev; 2356 2357 if (!mddev->gendisk) 2358 return 0; 2359 2360 bi_mddev = blk_get_integrity(mddev->gendisk); 2361 2362 if (!bi_mddev) /* nothing to do */ 2363 return 0; 2364 2365 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { 2366 pr_err("%s: incompatible integrity profile for %pg\n", 2367 mdname(mddev), rdev->bdev); 2368 return -ENXIO; 2369 } 2370 2371 return 0; 2372 } 2373 EXPORT_SYMBOL(md_integrity_add_rdev); 2374 2375 static bool rdev_read_only(struct md_rdev *rdev) 2376 { 2377 return bdev_read_only(rdev->bdev) || 2378 (rdev->meta_bdev && bdev_read_only(rdev->meta_bdev)); 2379 } 2380 2381 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) 2382 { 2383 char b[BDEVNAME_SIZE]; 2384 int err; 2385 2386 /* prevent duplicates */ 2387 if (find_rdev(mddev, rdev->bdev->bd_dev)) 2388 return -EEXIST; 2389 2390 if (rdev_read_only(rdev) && mddev->pers) 2391 return -EROFS; 2392 2393 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 2394 if (!test_bit(Journal, &rdev->flags) && 2395 rdev->sectors && 2396 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { 2397 if (mddev->pers) { 2398 /* Cannot change size, so fail 2399 * If mddev->level <= 0, then we don't care 2400 * about aligning sizes (e.g. linear) 2401 */ 2402 if (mddev->level > 0) 2403 return -ENOSPC; 2404 } else 2405 mddev->dev_sectors = rdev->sectors; 2406 } 2407 2408 /* Verify rdev->desc_nr is unique. 2409 * If it is -1, assign a free number, else 2410 * check number is not in use 2411 */ 2412 rcu_read_lock(); 2413 if (rdev->desc_nr < 0) { 2414 int choice = 0; 2415 if (mddev->pers) 2416 choice = mddev->raid_disks; 2417 while (md_find_rdev_nr_rcu(mddev, choice)) 2418 choice++; 2419 rdev->desc_nr = choice; 2420 } else { 2421 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { 2422 rcu_read_unlock(); 2423 return -EBUSY; 2424 } 2425 } 2426 rcu_read_unlock(); 2427 if (!test_bit(Journal, &rdev->flags) && 2428 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 2429 pr_warn("md: %s: array is limited to %d devices\n", 2430 mdname(mddev), mddev->max_disks); 2431 return -EBUSY; 2432 } 2433 snprintf(b, sizeof(b), "%pg", rdev->bdev); 2434 strreplace(b, '/', '!'); 2435 2436 rdev->mddev = mddev; 2437 pr_debug("md: bind<%s>\n", b); 2438 2439 if (mddev->raid_disks) 2440 mddev_create_serial_pool(mddev, rdev, false); 2441 2442 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 2443 goto fail; 2444 2445 /* failure here is OK */ 2446 err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block"); 2447 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 2448 rdev->sysfs_unack_badblocks = 2449 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks"); 2450 rdev->sysfs_badblocks = 2451 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks"); 2452 2453 list_add_rcu(&rdev->same_set, &mddev->disks); 2454 bd_link_disk_holder(rdev->bdev, mddev->gendisk); 2455 2456 /* May as well allow recovery to be retried once */ 2457 mddev->recovery_disabled++; 2458 2459 return 0; 2460 2461 fail: 2462 pr_warn("md: failed to register dev-%s for %s\n", 2463 b, mdname(mddev)); 2464 return err; 2465 } 2466 2467 static void rdev_delayed_delete(struct work_struct *ws) 2468 { 2469 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); 2470 kobject_del(&rdev->kobj); 2471 kobject_put(&rdev->kobj); 2472 } 2473 2474 static void unbind_rdev_from_array(struct md_rdev *rdev) 2475 { 2476 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); 2477 list_del_rcu(&rdev->same_set); 2478 pr_debug("md: unbind<%pg>\n", rdev->bdev); 2479 mddev_destroy_serial_pool(rdev->mddev, rdev, false); 2480 rdev->mddev = NULL; 2481 sysfs_remove_link(&rdev->kobj, "block"); 2482 sysfs_put(rdev->sysfs_state); 2483 sysfs_put(rdev->sysfs_unack_badblocks); 2484 sysfs_put(rdev->sysfs_badblocks); 2485 rdev->sysfs_state = NULL; 2486 rdev->sysfs_unack_badblocks = NULL; 2487 rdev->sysfs_badblocks = NULL; 2488 rdev->badblocks.count = 0; 2489 /* We need to delay this, otherwise we can deadlock when 2490 * writing to 'remove' to "dev/state". We also need 2491 * to delay it due to rcu usage. 2492 */ 2493 synchronize_rcu(); 2494 INIT_WORK(&rdev->del_work, rdev_delayed_delete); 2495 kobject_get(&rdev->kobj); 2496 queue_work(md_rdev_misc_wq, &rdev->del_work); 2497 } 2498 2499 /* 2500 * prevent the device from being mounted, repartitioned or 2501 * otherwise reused by a RAID array (or any other kernel 2502 * subsystem), by bd_claiming the device. 2503 */ 2504 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) 2505 { 2506 int err = 0; 2507 struct block_device *bdev; 2508 2509 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2510 shared ? (struct md_rdev *)lock_rdev : rdev); 2511 if (IS_ERR(bdev)) { 2512 pr_warn("md: could not open device unknown-block(%u,%u).\n", 2513 MAJOR(dev), MINOR(dev)); 2514 return PTR_ERR(bdev); 2515 } 2516 rdev->bdev = bdev; 2517 return err; 2518 } 2519 2520 static void unlock_rdev(struct md_rdev *rdev) 2521 { 2522 struct block_device *bdev = rdev->bdev; 2523 rdev->bdev = NULL; 2524 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2525 } 2526 2527 void md_autodetect_dev(dev_t dev); 2528 2529 static void export_rdev(struct md_rdev *rdev) 2530 { 2531 pr_debug("md: export_rdev(%pg)\n", rdev->bdev); 2532 md_rdev_clear(rdev); 2533 #ifndef MODULE 2534 if (test_bit(AutoDetected, &rdev->flags)) 2535 md_autodetect_dev(rdev->bdev->bd_dev); 2536 #endif 2537 unlock_rdev(rdev); 2538 kobject_put(&rdev->kobj); 2539 } 2540 2541 void md_kick_rdev_from_array(struct md_rdev *rdev) 2542 { 2543 unbind_rdev_from_array(rdev); 2544 export_rdev(rdev); 2545 } 2546 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array); 2547 2548 static void export_array(struct mddev *mddev) 2549 { 2550 struct md_rdev *rdev; 2551 2552 while (!list_empty(&mddev->disks)) { 2553 rdev = list_first_entry(&mddev->disks, struct md_rdev, 2554 same_set); 2555 md_kick_rdev_from_array(rdev); 2556 } 2557 mddev->raid_disks = 0; 2558 mddev->major_version = 0; 2559 } 2560 2561 static bool set_in_sync(struct mddev *mddev) 2562 { 2563 lockdep_assert_held(&mddev->lock); 2564 if (!mddev->in_sync) { 2565 mddev->sync_checkers++; 2566 spin_unlock(&mddev->lock); 2567 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending); 2568 spin_lock(&mddev->lock); 2569 if (!mddev->in_sync && 2570 percpu_ref_is_zero(&mddev->writes_pending)) { 2571 mddev->in_sync = 1; 2572 /* 2573 * Ensure ->in_sync is visible before we clear 2574 * ->sync_checkers. 2575 */ 2576 smp_mb(); 2577 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 2578 sysfs_notify_dirent_safe(mddev->sysfs_state); 2579 } 2580 if (--mddev->sync_checkers == 0) 2581 percpu_ref_switch_to_percpu(&mddev->writes_pending); 2582 } 2583 if (mddev->safemode == 1) 2584 mddev->safemode = 0; 2585 return mddev->in_sync; 2586 } 2587 2588 static void sync_sbs(struct mddev *mddev, int nospares) 2589 { 2590 /* Update each superblock (in-memory image), but 2591 * if we are allowed to, skip spares which already 2592 * have the right event counter, or have one earlier 2593 * (which would mean they aren't being marked as dirty 2594 * with the rest of the array) 2595 */ 2596 struct md_rdev *rdev; 2597 rdev_for_each(rdev, mddev) { 2598 if (rdev->sb_events == mddev->events || 2599 (nospares && 2600 rdev->raid_disk < 0 && 2601 rdev->sb_events+1 == mddev->events)) { 2602 /* Don't update this superblock */ 2603 rdev->sb_loaded = 2; 2604 } else { 2605 sync_super(mddev, rdev); 2606 rdev->sb_loaded = 1; 2607 } 2608 } 2609 } 2610 2611 static bool does_sb_need_changing(struct mddev *mddev) 2612 { 2613 struct md_rdev *rdev = NULL, *iter; 2614 struct mdp_superblock_1 *sb; 2615 int role; 2616 2617 /* Find a good rdev */ 2618 rdev_for_each(iter, mddev) 2619 if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) { 2620 rdev = iter; 2621 break; 2622 } 2623 2624 /* No good device found. */ 2625 if (!rdev) 2626 return false; 2627 2628 sb = page_address(rdev->sb_page); 2629 /* Check if a device has become faulty or a spare become active */ 2630 rdev_for_each(rdev, mddev) { 2631 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 2632 /* Device activated? */ 2633 if (role == MD_DISK_ROLE_SPARE && rdev->raid_disk >= 0 && 2634 !test_bit(Faulty, &rdev->flags)) 2635 return true; 2636 /* Device turned faulty? */ 2637 if (test_bit(Faulty, &rdev->flags) && (role < MD_DISK_ROLE_MAX)) 2638 return true; 2639 } 2640 2641 /* Check if any mddev parameters have changed */ 2642 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || 2643 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || 2644 (mddev->layout != le32_to_cpu(sb->layout)) || 2645 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || 2646 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) 2647 return true; 2648 2649 return false; 2650 } 2651 2652 void md_update_sb(struct mddev *mddev, int force_change) 2653 { 2654 struct md_rdev *rdev; 2655 int sync_req; 2656 int nospares = 0; 2657 int any_badblocks_changed = 0; 2658 int ret = -1; 2659 2660 if (mddev->ro) { 2661 if (force_change) 2662 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2663 return; 2664 } 2665 2666 repeat: 2667 if (mddev_is_clustered(mddev)) { 2668 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) 2669 force_change = 1; 2670 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) 2671 nospares = 1; 2672 ret = md_cluster_ops->metadata_update_start(mddev); 2673 /* Has someone else has updated the sb */ 2674 if (!does_sb_need_changing(mddev)) { 2675 if (ret == 0) 2676 md_cluster_ops->metadata_update_cancel(mddev); 2677 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), 2678 BIT(MD_SB_CHANGE_DEVS) | 2679 BIT(MD_SB_CHANGE_CLEAN)); 2680 return; 2681 } 2682 } 2683 2684 /* 2685 * First make sure individual recovery_offsets are correct 2686 * curr_resync_completed can only be used during recovery. 2687 * During reshape/resync it might use array-addresses rather 2688 * that device addresses. 2689 */ 2690 rdev_for_each(rdev, mddev) { 2691 if (rdev->raid_disk >= 0 && 2692 mddev->delta_disks >= 0 && 2693 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 2694 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && 2695 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 2696 !test_bit(Journal, &rdev->flags) && 2697 !test_bit(In_sync, &rdev->flags) && 2698 mddev->curr_resync_completed > rdev->recovery_offset) 2699 rdev->recovery_offset = mddev->curr_resync_completed; 2700 2701 } 2702 if (!mddev->persistent) { 2703 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 2704 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2705 if (!mddev->external) { 2706 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 2707 rdev_for_each(rdev, mddev) { 2708 if (rdev->badblocks.changed) { 2709 rdev->badblocks.changed = 0; 2710 ack_all_badblocks(&rdev->badblocks); 2711 md_error(mddev, rdev); 2712 } 2713 clear_bit(Blocked, &rdev->flags); 2714 clear_bit(BlockedBadBlocks, &rdev->flags); 2715 wake_up(&rdev->blocked_wait); 2716 } 2717 } 2718 wake_up(&mddev->sb_wait); 2719 return; 2720 } 2721 2722 spin_lock(&mddev->lock); 2723 2724 mddev->utime = ktime_get_real_seconds(); 2725 2726 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) 2727 force_change = 1; 2728 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) 2729 /* just a clean<-> dirty transition, possibly leave spares alone, 2730 * though if events isn't the right even/odd, we will have to do 2731 * spares after all 2732 */ 2733 nospares = 1; 2734 if (force_change) 2735 nospares = 0; 2736 if (mddev->degraded) 2737 /* If the array is degraded, then skipping spares is both 2738 * dangerous and fairly pointless. 2739 * Dangerous because a device that was removed from the array 2740 * might have a event_count that still looks up-to-date, 2741 * so it can be re-added without a resync. 2742 * Pointless because if there are any spares to skip, 2743 * then a recovery will happen and soon that array won't 2744 * be degraded any more and the spare can go back to sleep then. 2745 */ 2746 nospares = 0; 2747 2748 sync_req = mddev->in_sync; 2749 2750 /* If this is just a dirty<->clean transition, and the array is clean 2751 * and 'events' is odd, we can roll back to the previous clean state */ 2752 if (nospares 2753 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2754 && mddev->can_decrease_events 2755 && mddev->events != 1) { 2756 mddev->events--; 2757 mddev->can_decrease_events = 0; 2758 } else { 2759 /* otherwise we have to go forward and ... */ 2760 mddev->events ++; 2761 mddev->can_decrease_events = nospares; 2762 } 2763 2764 /* 2765 * This 64-bit counter should never wrap. 2766 * Either we are in around ~1 trillion A.C., assuming 2767 * 1 reboot per second, or we have a bug... 2768 */ 2769 WARN_ON(mddev->events == 0); 2770 2771 rdev_for_each(rdev, mddev) { 2772 if (rdev->badblocks.changed) 2773 any_badblocks_changed++; 2774 if (test_bit(Faulty, &rdev->flags)) 2775 set_bit(FaultRecorded, &rdev->flags); 2776 } 2777 2778 sync_sbs(mddev, nospares); 2779 spin_unlock(&mddev->lock); 2780 2781 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", 2782 mdname(mddev), mddev->in_sync); 2783 2784 if (mddev->queue) 2785 blk_add_trace_msg(mddev->queue, "md md_update_sb"); 2786 rewrite: 2787 md_bitmap_update_sb(mddev->bitmap); 2788 rdev_for_each(rdev, mddev) { 2789 if (rdev->sb_loaded != 1) 2790 continue; /* no noise on spare devices */ 2791 2792 if (!test_bit(Faulty, &rdev->flags)) { 2793 md_super_write(mddev,rdev, 2794 rdev->sb_start, rdev->sb_size, 2795 rdev->sb_page); 2796 pr_debug("md: (write) %pg's sb offset: %llu\n", 2797 rdev->bdev, 2798 (unsigned long long)rdev->sb_start); 2799 rdev->sb_events = mddev->events; 2800 if (rdev->badblocks.size) { 2801 md_super_write(mddev, rdev, 2802 rdev->badblocks.sector, 2803 rdev->badblocks.size << 9, 2804 rdev->bb_page); 2805 rdev->badblocks.size = 0; 2806 } 2807 2808 } else 2809 pr_debug("md: %pg (skipping faulty)\n", 2810 rdev->bdev); 2811 2812 if (mddev->level == LEVEL_MULTIPATH) 2813 /* only need to write one superblock... */ 2814 break; 2815 } 2816 if (md_super_wait(mddev) < 0) 2817 goto rewrite; 2818 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */ 2819 2820 if (mddev_is_clustered(mddev) && ret == 0) 2821 md_cluster_ops->metadata_update_finish(mddev); 2822 2823 if (mddev->in_sync != sync_req || 2824 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), 2825 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN))) 2826 /* have to write it out again */ 2827 goto repeat; 2828 wake_up(&mddev->sb_wait); 2829 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2830 sysfs_notify_dirent_safe(mddev->sysfs_completed); 2831 2832 rdev_for_each(rdev, mddev) { 2833 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) 2834 clear_bit(Blocked, &rdev->flags); 2835 2836 if (any_badblocks_changed) 2837 ack_all_badblocks(&rdev->badblocks); 2838 clear_bit(BlockedBadBlocks, &rdev->flags); 2839 wake_up(&rdev->blocked_wait); 2840 } 2841 } 2842 EXPORT_SYMBOL(md_update_sb); 2843 2844 static int add_bound_rdev(struct md_rdev *rdev) 2845 { 2846 struct mddev *mddev = rdev->mddev; 2847 int err = 0; 2848 bool add_journal = test_bit(Journal, &rdev->flags); 2849 2850 if (!mddev->pers->hot_remove_disk || add_journal) { 2851 /* If there is hot_add_disk but no hot_remove_disk 2852 * then added disks for geometry changes, 2853 * and should be added immediately. 2854 */ 2855 super_types[mddev->major_version]. 2856 validate_super(mddev, rdev); 2857 if (add_journal) 2858 mddev_suspend(mddev); 2859 err = mddev->pers->hot_add_disk(mddev, rdev); 2860 if (add_journal) 2861 mddev_resume(mddev); 2862 if (err) { 2863 md_kick_rdev_from_array(rdev); 2864 return err; 2865 } 2866 } 2867 sysfs_notify_dirent_safe(rdev->sysfs_state); 2868 2869 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2870 if (mddev->degraded) 2871 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 2872 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2873 md_new_event(); 2874 md_wakeup_thread(mddev->thread); 2875 return 0; 2876 } 2877 2878 /* words written to sysfs files may, or may not, be \n terminated. 2879 * We want to accept with case. For this we use cmd_match. 2880 */ 2881 static int cmd_match(const char *cmd, const char *str) 2882 { 2883 /* See if cmd, written into a sysfs file, matches 2884 * str. They must either be the same, or cmd can 2885 * have a trailing newline 2886 */ 2887 while (*cmd && *str && *cmd == *str) { 2888 cmd++; 2889 str++; 2890 } 2891 if (*cmd == '\n') 2892 cmd++; 2893 if (*str || *cmd) 2894 return 0; 2895 return 1; 2896 } 2897 2898 struct rdev_sysfs_entry { 2899 struct attribute attr; 2900 ssize_t (*show)(struct md_rdev *, char *); 2901 ssize_t (*store)(struct md_rdev *, const char *, size_t); 2902 }; 2903 2904 static ssize_t 2905 state_show(struct md_rdev *rdev, char *page) 2906 { 2907 char *sep = ","; 2908 size_t len = 0; 2909 unsigned long flags = READ_ONCE(rdev->flags); 2910 2911 if (test_bit(Faulty, &flags) || 2912 (!test_bit(ExternalBbl, &flags) && 2913 rdev->badblocks.unacked_exist)) 2914 len += sprintf(page+len, "faulty%s", sep); 2915 if (test_bit(In_sync, &flags)) 2916 len += sprintf(page+len, "in_sync%s", sep); 2917 if (test_bit(Journal, &flags)) 2918 len += sprintf(page+len, "journal%s", sep); 2919 if (test_bit(WriteMostly, &flags)) 2920 len += sprintf(page+len, "write_mostly%s", sep); 2921 if (test_bit(Blocked, &flags) || 2922 (rdev->badblocks.unacked_exist 2923 && !test_bit(Faulty, &flags))) 2924 len += sprintf(page+len, "blocked%s", sep); 2925 if (!test_bit(Faulty, &flags) && 2926 !test_bit(Journal, &flags) && 2927 !test_bit(In_sync, &flags)) 2928 len += sprintf(page+len, "spare%s", sep); 2929 if (test_bit(WriteErrorSeen, &flags)) 2930 len += sprintf(page+len, "write_error%s", sep); 2931 if (test_bit(WantReplacement, &flags)) 2932 len += sprintf(page+len, "want_replacement%s", sep); 2933 if (test_bit(Replacement, &flags)) 2934 len += sprintf(page+len, "replacement%s", sep); 2935 if (test_bit(ExternalBbl, &flags)) 2936 len += sprintf(page+len, "external_bbl%s", sep); 2937 if (test_bit(FailFast, &flags)) 2938 len += sprintf(page+len, "failfast%s", sep); 2939 2940 if (len) 2941 len -= strlen(sep); 2942 2943 return len+sprintf(page+len, "\n"); 2944 } 2945 2946 static ssize_t 2947 state_store(struct md_rdev *rdev, const char *buf, size_t len) 2948 { 2949 /* can write 2950 * faulty - simulates an error 2951 * remove - disconnects the device 2952 * writemostly - sets write_mostly 2953 * -writemostly - clears write_mostly 2954 * blocked - sets the Blocked flags 2955 * -blocked - clears the Blocked and possibly simulates an error 2956 * insync - sets Insync providing device isn't active 2957 * -insync - clear Insync for a device with a slot assigned, 2958 * so that it gets rebuilt based on bitmap 2959 * write_error - sets WriteErrorSeen 2960 * -write_error - clears WriteErrorSeen 2961 * {,-}failfast - set/clear FailFast 2962 */ 2963 2964 struct mddev *mddev = rdev->mddev; 2965 int err = -EINVAL; 2966 bool need_update_sb = false; 2967 2968 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2969 md_error(rdev->mddev, rdev); 2970 2971 if (test_bit(MD_BROKEN, &rdev->mddev->flags)) 2972 err = -EBUSY; 2973 else 2974 err = 0; 2975 } else if (cmd_match(buf, "remove")) { 2976 if (rdev->mddev->pers) { 2977 clear_bit(Blocked, &rdev->flags); 2978 remove_and_add_spares(rdev->mddev, rdev); 2979 } 2980 if (rdev->raid_disk >= 0) 2981 err = -EBUSY; 2982 else { 2983 err = 0; 2984 if (mddev_is_clustered(mddev)) 2985 err = md_cluster_ops->remove_disk(mddev, rdev); 2986 2987 if (err == 0) { 2988 md_kick_rdev_from_array(rdev); 2989 if (mddev->pers) { 2990 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2991 md_wakeup_thread(mddev->thread); 2992 } 2993 md_new_event(); 2994 } 2995 } 2996 } else if (cmd_match(buf, "writemostly")) { 2997 set_bit(WriteMostly, &rdev->flags); 2998 mddev_create_serial_pool(rdev->mddev, rdev, false); 2999 need_update_sb = true; 3000 err = 0; 3001 } else if (cmd_match(buf, "-writemostly")) { 3002 mddev_destroy_serial_pool(rdev->mddev, rdev, false); 3003 clear_bit(WriteMostly, &rdev->flags); 3004 need_update_sb = true; 3005 err = 0; 3006 } else if (cmd_match(buf, "blocked")) { 3007 set_bit(Blocked, &rdev->flags); 3008 err = 0; 3009 } else if (cmd_match(buf, "-blocked")) { 3010 if (!test_bit(Faulty, &rdev->flags) && 3011 !test_bit(ExternalBbl, &rdev->flags) && 3012 rdev->badblocks.unacked_exist) { 3013 /* metadata handler doesn't understand badblocks, 3014 * so we need to fail the device 3015 */ 3016 md_error(rdev->mddev, rdev); 3017 } 3018 clear_bit(Blocked, &rdev->flags); 3019 clear_bit(BlockedBadBlocks, &rdev->flags); 3020 wake_up(&rdev->blocked_wait); 3021 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 3022 md_wakeup_thread(rdev->mddev->thread); 3023 3024 err = 0; 3025 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 3026 set_bit(In_sync, &rdev->flags); 3027 err = 0; 3028 } else if (cmd_match(buf, "failfast")) { 3029 set_bit(FailFast, &rdev->flags); 3030 need_update_sb = true; 3031 err = 0; 3032 } else if (cmd_match(buf, "-failfast")) { 3033 clear_bit(FailFast, &rdev->flags); 3034 need_update_sb = true; 3035 err = 0; 3036 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 && 3037 !test_bit(Journal, &rdev->flags)) { 3038 if (rdev->mddev->pers == NULL) { 3039 clear_bit(In_sync, &rdev->flags); 3040 rdev->saved_raid_disk = rdev->raid_disk; 3041 rdev->raid_disk = -1; 3042 err = 0; 3043 } 3044 } else if (cmd_match(buf, "write_error")) { 3045 set_bit(WriteErrorSeen, &rdev->flags); 3046 err = 0; 3047 } else if (cmd_match(buf, "-write_error")) { 3048 clear_bit(WriteErrorSeen, &rdev->flags); 3049 err = 0; 3050 } else if (cmd_match(buf, "want_replacement")) { 3051 /* Any non-spare device that is not a replacement can 3052 * become want_replacement at any time, but we then need to 3053 * check if recovery is needed. 3054 */ 3055 if (rdev->raid_disk >= 0 && 3056 !test_bit(Journal, &rdev->flags) && 3057 !test_bit(Replacement, &rdev->flags)) 3058 set_bit(WantReplacement, &rdev->flags); 3059 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 3060 md_wakeup_thread(rdev->mddev->thread); 3061 err = 0; 3062 } else if (cmd_match(buf, "-want_replacement")) { 3063 /* Clearing 'want_replacement' is always allowed. 3064 * Once replacements starts it is too late though. 3065 */ 3066 err = 0; 3067 clear_bit(WantReplacement, &rdev->flags); 3068 } else if (cmd_match(buf, "replacement")) { 3069 /* Can only set a device as a replacement when array has not 3070 * yet been started. Once running, replacement is automatic 3071 * from spares, or by assigning 'slot'. 3072 */ 3073 if (rdev->mddev->pers) 3074 err = -EBUSY; 3075 else { 3076 set_bit(Replacement, &rdev->flags); 3077 err = 0; 3078 } 3079 } else if (cmd_match(buf, "-replacement")) { 3080 /* Similarly, can only clear Replacement before start */ 3081 if (rdev->mddev->pers) 3082 err = -EBUSY; 3083 else { 3084 clear_bit(Replacement, &rdev->flags); 3085 err = 0; 3086 } 3087 } else if (cmd_match(buf, "re-add")) { 3088 if (!rdev->mddev->pers) 3089 err = -EINVAL; 3090 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) && 3091 rdev->saved_raid_disk >= 0) { 3092 /* clear_bit is performed _after_ all the devices 3093 * have their local Faulty bit cleared. If any writes 3094 * happen in the meantime in the local node, they 3095 * will land in the local bitmap, which will be synced 3096 * by this node eventually 3097 */ 3098 if (!mddev_is_clustered(rdev->mddev) || 3099 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) { 3100 clear_bit(Faulty, &rdev->flags); 3101 err = add_bound_rdev(rdev); 3102 } 3103 } else 3104 err = -EBUSY; 3105 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) { 3106 set_bit(ExternalBbl, &rdev->flags); 3107 rdev->badblocks.shift = 0; 3108 err = 0; 3109 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) { 3110 clear_bit(ExternalBbl, &rdev->flags); 3111 err = 0; 3112 } 3113 if (need_update_sb) 3114 md_update_sb(mddev, 1); 3115 if (!err) 3116 sysfs_notify_dirent_safe(rdev->sysfs_state); 3117 return err ? err : len; 3118 } 3119 static struct rdev_sysfs_entry rdev_state = 3120 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store); 3121 3122 static ssize_t 3123 errors_show(struct md_rdev *rdev, char *page) 3124 { 3125 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 3126 } 3127 3128 static ssize_t 3129 errors_store(struct md_rdev *rdev, const char *buf, size_t len) 3130 { 3131 unsigned int n; 3132 int rv; 3133 3134 rv = kstrtouint(buf, 10, &n); 3135 if (rv < 0) 3136 return rv; 3137 atomic_set(&rdev->corrected_errors, n); 3138 return len; 3139 } 3140 static struct rdev_sysfs_entry rdev_errors = 3141 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 3142 3143 static ssize_t 3144 slot_show(struct md_rdev *rdev, char *page) 3145 { 3146 if (test_bit(Journal, &rdev->flags)) 3147 return sprintf(page, "journal\n"); 3148 else if (rdev->raid_disk < 0) 3149 return sprintf(page, "none\n"); 3150 else 3151 return sprintf(page, "%d\n", rdev->raid_disk); 3152 } 3153 3154 static ssize_t 3155 slot_store(struct md_rdev *rdev, const char *buf, size_t len) 3156 { 3157 int slot; 3158 int err; 3159 3160 if (test_bit(Journal, &rdev->flags)) 3161 return -EBUSY; 3162 if (strncmp(buf, "none", 4)==0) 3163 slot = -1; 3164 else { 3165 err = kstrtouint(buf, 10, (unsigned int *)&slot); 3166 if (err < 0) 3167 return err; 3168 } 3169 if (rdev->mddev->pers && slot == -1) { 3170 /* Setting 'slot' on an active array requires also 3171 * updating the 'rd%d' link, and communicating 3172 * with the personality with ->hot_*_disk. 3173 * For now we only support removing 3174 * failed/spare devices. This normally happens automatically, 3175 * but not when the metadata is externally managed. 3176 */ 3177 if (rdev->raid_disk == -1) 3178 return -EEXIST; 3179 /* personality does all needed checks */ 3180 if (rdev->mddev->pers->hot_remove_disk == NULL) 3181 return -EINVAL; 3182 clear_bit(Blocked, &rdev->flags); 3183 remove_and_add_spares(rdev->mddev, rdev); 3184 if (rdev->raid_disk >= 0) 3185 return -EBUSY; 3186 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 3187 md_wakeup_thread(rdev->mddev->thread); 3188 } else if (rdev->mddev->pers) { 3189 /* Activating a spare .. or possibly reactivating 3190 * if we ever get bitmaps working here. 3191 */ 3192 int err; 3193 3194 if (rdev->raid_disk != -1) 3195 return -EBUSY; 3196 3197 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) 3198 return -EBUSY; 3199 3200 if (rdev->mddev->pers->hot_add_disk == NULL) 3201 return -EINVAL; 3202 3203 if (slot >= rdev->mddev->raid_disks && 3204 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 3205 return -ENOSPC; 3206 3207 rdev->raid_disk = slot; 3208 if (test_bit(In_sync, &rdev->flags)) 3209 rdev->saved_raid_disk = slot; 3210 else 3211 rdev->saved_raid_disk = -1; 3212 clear_bit(In_sync, &rdev->flags); 3213 clear_bit(Bitmap_sync, &rdev->flags); 3214 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev); 3215 if (err) { 3216 rdev->raid_disk = -1; 3217 return err; 3218 } else 3219 sysfs_notify_dirent_safe(rdev->sysfs_state); 3220 /* failure here is OK */; 3221 sysfs_link_rdev(rdev->mddev, rdev); 3222 /* don't wakeup anyone, leave that to userspace. */ 3223 } else { 3224 if (slot >= rdev->mddev->raid_disks && 3225 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 3226 return -ENOSPC; 3227 rdev->raid_disk = slot; 3228 /* assume it is working */ 3229 clear_bit(Faulty, &rdev->flags); 3230 clear_bit(WriteMostly, &rdev->flags); 3231 set_bit(In_sync, &rdev->flags); 3232 sysfs_notify_dirent_safe(rdev->sysfs_state); 3233 } 3234 return len; 3235 } 3236 3237 static struct rdev_sysfs_entry rdev_slot = 3238 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 3239 3240 static ssize_t 3241 offset_show(struct md_rdev *rdev, char *page) 3242 { 3243 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 3244 } 3245 3246 static ssize_t 3247 offset_store(struct md_rdev *rdev, const char *buf, size_t len) 3248 { 3249 unsigned long long offset; 3250 if (kstrtoull(buf, 10, &offset) < 0) 3251 return -EINVAL; 3252 if (rdev->mddev->pers && rdev->raid_disk >= 0) 3253 return -EBUSY; 3254 if (rdev->sectors && rdev->mddev->external) 3255 /* Must set offset before size, so overlap checks 3256 * can be sane */ 3257 return -EBUSY; 3258 rdev->data_offset = offset; 3259 rdev->new_data_offset = offset; 3260 return len; 3261 } 3262 3263 static struct rdev_sysfs_entry rdev_offset = 3264 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 3265 3266 static ssize_t new_offset_show(struct md_rdev *rdev, char *page) 3267 { 3268 return sprintf(page, "%llu\n", 3269 (unsigned long long)rdev->new_data_offset); 3270 } 3271 3272 static ssize_t new_offset_store(struct md_rdev *rdev, 3273 const char *buf, size_t len) 3274 { 3275 unsigned long long new_offset; 3276 struct mddev *mddev = rdev->mddev; 3277 3278 if (kstrtoull(buf, 10, &new_offset) < 0) 3279 return -EINVAL; 3280 3281 if (mddev->sync_thread || 3282 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) 3283 return -EBUSY; 3284 if (new_offset == rdev->data_offset) 3285 /* reset is always permitted */ 3286 ; 3287 else if (new_offset > rdev->data_offset) { 3288 /* must not push array size beyond rdev_sectors */ 3289 if (new_offset - rdev->data_offset 3290 + mddev->dev_sectors > rdev->sectors) 3291 return -E2BIG; 3292 } 3293 /* Metadata worries about other space details. */ 3294 3295 /* decreasing the offset is inconsistent with a backwards 3296 * reshape. 3297 */ 3298 if (new_offset < rdev->data_offset && 3299 mddev->reshape_backwards) 3300 return -EINVAL; 3301 /* Increasing offset is inconsistent with forwards 3302 * reshape. reshape_direction should be set to 3303 * 'backwards' first. 3304 */ 3305 if (new_offset > rdev->data_offset && 3306 !mddev->reshape_backwards) 3307 return -EINVAL; 3308 3309 if (mddev->pers && mddev->persistent && 3310 !super_types[mddev->major_version] 3311 .allow_new_offset(rdev, new_offset)) 3312 return -E2BIG; 3313 rdev->new_data_offset = new_offset; 3314 if (new_offset > rdev->data_offset) 3315 mddev->reshape_backwards = 1; 3316 else if (new_offset < rdev->data_offset) 3317 mddev->reshape_backwards = 0; 3318 3319 return len; 3320 } 3321 static struct rdev_sysfs_entry rdev_new_offset = 3322 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store); 3323 3324 static ssize_t 3325 rdev_size_show(struct md_rdev *rdev, char *page) 3326 { 3327 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 3328 } 3329 3330 static int md_rdevs_overlap(struct md_rdev *a, struct md_rdev *b) 3331 { 3332 /* check if two start/length pairs overlap */ 3333 if (a->data_offset + a->sectors <= b->data_offset) 3334 return false; 3335 if (b->data_offset + b->sectors <= a->data_offset) 3336 return false; 3337 return true; 3338 } 3339 3340 static bool md_rdev_overlaps(struct md_rdev *rdev) 3341 { 3342 struct mddev *mddev; 3343 struct md_rdev *rdev2; 3344 3345 spin_lock(&all_mddevs_lock); 3346 list_for_each_entry(mddev, &all_mddevs, all_mddevs) { 3347 if (test_bit(MD_DELETED, &mddev->flags)) 3348 continue; 3349 rdev_for_each(rdev2, mddev) { 3350 if (rdev != rdev2 && rdev->bdev == rdev2->bdev && 3351 md_rdevs_overlap(rdev, rdev2)) { 3352 spin_unlock(&all_mddevs_lock); 3353 return true; 3354 } 3355 } 3356 } 3357 spin_unlock(&all_mddevs_lock); 3358 return false; 3359 } 3360 3361 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 3362 { 3363 unsigned long long blocks; 3364 sector_t new; 3365 3366 if (kstrtoull(buf, 10, &blocks) < 0) 3367 return -EINVAL; 3368 3369 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 3370 return -EINVAL; /* sector conversion overflow */ 3371 3372 new = blocks * 2; 3373 if (new != blocks * 2) 3374 return -EINVAL; /* unsigned long long to sector_t overflow */ 3375 3376 *sectors = new; 3377 return 0; 3378 } 3379 3380 static ssize_t 3381 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) 3382 { 3383 struct mddev *my_mddev = rdev->mddev; 3384 sector_t oldsectors = rdev->sectors; 3385 sector_t sectors; 3386 3387 if (test_bit(Journal, &rdev->flags)) 3388 return -EBUSY; 3389 if (strict_blocks_to_sectors(buf, §ors) < 0) 3390 return -EINVAL; 3391 if (rdev->data_offset != rdev->new_data_offset) 3392 return -EINVAL; /* too confusing */ 3393 if (my_mddev->pers && rdev->raid_disk >= 0) { 3394 if (my_mddev->persistent) { 3395 sectors = super_types[my_mddev->major_version]. 3396 rdev_size_change(rdev, sectors); 3397 if (!sectors) 3398 return -EBUSY; 3399 } else if (!sectors) 3400 sectors = bdev_nr_sectors(rdev->bdev) - 3401 rdev->data_offset; 3402 if (!my_mddev->pers->resize) 3403 /* Cannot change size for RAID0 or Linear etc */ 3404 return -EINVAL; 3405 } 3406 if (sectors < my_mddev->dev_sectors) 3407 return -EINVAL; /* component must fit device */ 3408 3409 rdev->sectors = sectors; 3410 3411 /* 3412 * Check that all other rdevs with the same bdev do not overlap. This 3413 * check does not provide a hard guarantee, it just helps avoid 3414 * dangerous mistakes. 3415 */ 3416 if (sectors > oldsectors && my_mddev->external && 3417 md_rdev_overlaps(rdev)) { 3418 /* 3419 * Someone else could have slipped in a size change here, but 3420 * doing so is just silly. We put oldsectors back because we 3421 * know it is safe, and trust userspace not to race with itself. 3422 */ 3423 rdev->sectors = oldsectors; 3424 return -EBUSY; 3425 } 3426 return len; 3427 } 3428 3429 static struct rdev_sysfs_entry rdev_size = 3430 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 3431 3432 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) 3433 { 3434 unsigned long long recovery_start = rdev->recovery_offset; 3435 3436 if (test_bit(In_sync, &rdev->flags) || 3437 recovery_start == MaxSector) 3438 return sprintf(page, "none\n"); 3439 3440 return sprintf(page, "%llu\n", recovery_start); 3441 } 3442 3443 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) 3444 { 3445 unsigned long long recovery_start; 3446 3447 if (cmd_match(buf, "none")) 3448 recovery_start = MaxSector; 3449 else if (kstrtoull(buf, 10, &recovery_start)) 3450 return -EINVAL; 3451 3452 if (rdev->mddev->pers && 3453 rdev->raid_disk >= 0) 3454 return -EBUSY; 3455 3456 rdev->recovery_offset = recovery_start; 3457 if (recovery_start == MaxSector) 3458 set_bit(In_sync, &rdev->flags); 3459 else 3460 clear_bit(In_sync, &rdev->flags); 3461 return len; 3462 } 3463 3464 static struct rdev_sysfs_entry rdev_recovery_start = 3465 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 3466 3467 /* sysfs access to bad-blocks list. 3468 * We present two files. 3469 * 'bad-blocks' lists sector numbers and lengths of ranges that 3470 * are recorded as bad. The list is truncated to fit within 3471 * the one-page limit of sysfs. 3472 * Writing "sector length" to this file adds an acknowledged 3473 * bad block list. 3474 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet 3475 * been acknowledged. Writing to this file adds bad blocks 3476 * without acknowledging them. This is largely for testing. 3477 */ 3478 static ssize_t bb_show(struct md_rdev *rdev, char *page) 3479 { 3480 return badblocks_show(&rdev->badblocks, page, 0); 3481 } 3482 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) 3483 { 3484 int rv = badblocks_store(&rdev->badblocks, page, len, 0); 3485 /* Maybe that ack was all we needed */ 3486 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) 3487 wake_up(&rdev->blocked_wait); 3488 return rv; 3489 } 3490 static struct rdev_sysfs_entry rdev_bad_blocks = 3491 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); 3492 3493 static ssize_t ubb_show(struct md_rdev *rdev, char *page) 3494 { 3495 return badblocks_show(&rdev->badblocks, page, 1); 3496 } 3497 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) 3498 { 3499 return badblocks_store(&rdev->badblocks, page, len, 1); 3500 } 3501 static struct rdev_sysfs_entry rdev_unack_bad_blocks = 3502 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store); 3503 3504 static ssize_t 3505 ppl_sector_show(struct md_rdev *rdev, char *page) 3506 { 3507 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector); 3508 } 3509 3510 static ssize_t 3511 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len) 3512 { 3513 unsigned long long sector; 3514 3515 if (kstrtoull(buf, 10, §or) < 0) 3516 return -EINVAL; 3517 if (sector != (sector_t)sector) 3518 return -EINVAL; 3519 3520 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && 3521 rdev->raid_disk >= 0) 3522 return -EBUSY; 3523 3524 if (rdev->mddev->persistent) { 3525 if (rdev->mddev->major_version == 0) 3526 return -EINVAL; 3527 if ((sector > rdev->sb_start && 3528 sector - rdev->sb_start > S16_MAX) || 3529 (sector < rdev->sb_start && 3530 rdev->sb_start - sector > -S16_MIN)) 3531 return -EINVAL; 3532 rdev->ppl.offset = sector - rdev->sb_start; 3533 } else if (!rdev->mddev->external) { 3534 return -EBUSY; 3535 } 3536 rdev->ppl.sector = sector; 3537 return len; 3538 } 3539 3540 static struct rdev_sysfs_entry rdev_ppl_sector = 3541 __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store); 3542 3543 static ssize_t 3544 ppl_size_show(struct md_rdev *rdev, char *page) 3545 { 3546 return sprintf(page, "%u\n", rdev->ppl.size); 3547 } 3548 3549 static ssize_t 3550 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len) 3551 { 3552 unsigned int size; 3553 3554 if (kstrtouint(buf, 10, &size) < 0) 3555 return -EINVAL; 3556 3557 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && 3558 rdev->raid_disk >= 0) 3559 return -EBUSY; 3560 3561 if (rdev->mddev->persistent) { 3562 if (rdev->mddev->major_version == 0) 3563 return -EINVAL; 3564 if (size > U16_MAX) 3565 return -EINVAL; 3566 } else if (!rdev->mddev->external) { 3567 return -EBUSY; 3568 } 3569 rdev->ppl.size = size; 3570 return len; 3571 } 3572 3573 static struct rdev_sysfs_entry rdev_ppl_size = 3574 __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store); 3575 3576 static struct attribute *rdev_default_attrs[] = { 3577 &rdev_state.attr, 3578 &rdev_errors.attr, 3579 &rdev_slot.attr, 3580 &rdev_offset.attr, 3581 &rdev_new_offset.attr, 3582 &rdev_size.attr, 3583 &rdev_recovery_start.attr, 3584 &rdev_bad_blocks.attr, 3585 &rdev_unack_bad_blocks.attr, 3586 &rdev_ppl_sector.attr, 3587 &rdev_ppl_size.attr, 3588 NULL, 3589 }; 3590 ATTRIBUTE_GROUPS(rdev_default); 3591 static ssize_t 3592 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3593 { 3594 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3595 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3596 3597 if (!entry->show) 3598 return -EIO; 3599 if (!rdev->mddev) 3600 return -ENODEV; 3601 return entry->show(rdev, page); 3602 } 3603 3604 static ssize_t 3605 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 3606 const char *page, size_t length) 3607 { 3608 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3609 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3610 ssize_t rv; 3611 struct mddev *mddev = rdev->mddev; 3612 3613 if (!entry->store) 3614 return -EIO; 3615 if (!capable(CAP_SYS_ADMIN)) 3616 return -EACCES; 3617 rv = mddev ? mddev_lock(mddev) : -ENODEV; 3618 if (!rv) { 3619 if (rdev->mddev == NULL) 3620 rv = -ENODEV; 3621 else 3622 rv = entry->store(rdev, page, length); 3623 mddev_unlock(mddev); 3624 } 3625 return rv; 3626 } 3627 3628 static void rdev_free(struct kobject *ko) 3629 { 3630 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); 3631 kfree(rdev); 3632 } 3633 static const struct sysfs_ops rdev_sysfs_ops = { 3634 .show = rdev_attr_show, 3635 .store = rdev_attr_store, 3636 }; 3637 static struct kobj_type rdev_ktype = { 3638 .release = rdev_free, 3639 .sysfs_ops = &rdev_sysfs_ops, 3640 .default_groups = rdev_default_groups, 3641 }; 3642 3643 int md_rdev_init(struct md_rdev *rdev) 3644 { 3645 rdev->desc_nr = -1; 3646 rdev->saved_raid_disk = -1; 3647 rdev->raid_disk = -1; 3648 rdev->flags = 0; 3649 rdev->data_offset = 0; 3650 rdev->new_data_offset = 0; 3651 rdev->sb_events = 0; 3652 rdev->last_read_error = 0; 3653 rdev->sb_loaded = 0; 3654 rdev->bb_page = NULL; 3655 atomic_set(&rdev->nr_pending, 0); 3656 atomic_set(&rdev->read_errors, 0); 3657 atomic_set(&rdev->corrected_errors, 0); 3658 3659 INIT_LIST_HEAD(&rdev->same_set); 3660 init_waitqueue_head(&rdev->blocked_wait); 3661 3662 /* Add space to store bad block list. 3663 * This reserves the space even on arrays where it cannot 3664 * be used - I wonder if that matters 3665 */ 3666 return badblocks_init(&rdev->badblocks, 0); 3667 } 3668 EXPORT_SYMBOL_GPL(md_rdev_init); 3669 /* 3670 * Import a device. If 'super_format' >= 0, then sanity check the superblock 3671 * 3672 * mark the device faulty if: 3673 * 3674 * - the device is nonexistent (zero size) 3675 * - the device has no valid superblock 3676 * 3677 * a faulty rdev _never_ has rdev->sb set. 3678 */ 3679 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) 3680 { 3681 int err; 3682 struct md_rdev *rdev; 3683 sector_t size; 3684 3685 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 3686 if (!rdev) 3687 return ERR_PTR(-ENOMEM); 3688 3689 err = md_rdev_init(rdev); 3690 if (err) 3691 goto abort_free; 3692 err = alloc_disk_sb(rdev); 3693 if (err) 3694 goto abort_free; 3695 3696 err = lock_rdev(rdev, newdev, super_format == -2); 3697 if (err) 3698 goto abort_free; 3699 3700 kobject_init(&rdev->kobj, &rdev_ktype); 3701 3702 size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS; 3703 if (!size) { 3704 pr_warn("md: %pg has zero or unknown size, marking faulty!\n", 3705 rdev->bdev); 3706 err = -EINVAL; 3707 goto abort_free; 3708 } 3709 3710 if (super_format >= 0) { 3711 err = super_types[super_format]. 3712 load_super(rdev, NULL, super_minor); 3713 if (err == -EINVAL) { 3714 pr_warn("md: %pg does not have a valid v%d.%d superblock, not importing!\n", 3715 rdev->bdev, 3716 super_format, super_minor); 3717 goto abort_free; 3718 } 3719 if (err < 0) { 3720 pr_warn("md: could not read %pg's sb, not importing!\n", 3721 rdev->bdev); 3722 goto abort_free; 3723 } 3724 } 3725 3726 return rdev; 3727 3728 abort_free: 3729 if (rdev->bdev) 3730 unlock_rdev(rdev); 3731 md_rdev_clear(rdev); 3732 kfree(rdev); 3733 return ERR_PTR(err); 3734 } 3735 3736 /* 3737 * Check a full RAID array for plausibility 3738 */ 3739 3740 static int analyze_sbs(struct mddev *mddev) 3741 { 3742 int i; 3743 struct md_rdev *rdev, *freshest, *tmp; 3744 3745 freshest = NULL; 3746 rdev_for_each_safe(rdev, tmp, mddev) 3747 switch (super_types[mddev->major_version]. 3748 load_super(rdev, freshest, mddev->minor_version)) { 3749 case 1: 3750 freshest = rdev; 3751 break; 3752 case 0: 3753 break; 3754 default: 3755 pr_warn("md: fatal superblock inconsistency in %pg -- removing from array\n", 3756 rdev->bdev); 3757 md_kick_rdev_from_array(rdev); 3758 } 3759 3760 /* Cannot find a valid fresh disk */ 3761 if (!freshest) { 3762 pr_warn("md: cannot find a valid disk\n"); 3763 return -EINVAL; 3764 } 3765 3766 super_types[mddev->major_version]. 3767 validate_super(mddev, freshest); 3768 3769 i = 0; 3770 rdev_for_each_safe(rdev, tmp, mddev) { 3771 if (mddev->max_disks && 3772 (rdev->desc_nr >= mddev->max_disks || 3773 i > mddev->max_disks)) { 3774 pr_warn("md: %s: %pg: only %d devices permitted\n", 3775 mdname(mddev), rdev->bdev, 3776 mddev->max_disks); 3777 md_kick_rdev_from_array(rdev); 3778 continue; 3779 } 3780 if (rdev != freshest) { 3781 if (super_types[mddev->major_version]. 3782 validate_super(mddev, rdev)) { 3783 pr_warn("md: kicking non-fresh %pg from array!\n", 3784 rdev->bdev); 3785 md_kick_rdev_from_array(rdev); 3786 continue; 3787 } 3788 } 3789 if (mddev->level == LEVEL_MULTIPATH) { 3790 rdev->desc_nr = i++; 3791 rdev->raid_disk = rdev->desc_nr; 3792 set_bit(In_sync, &rdev->flags); 3793 } else if (rdev->raid_disk >= 3794 (mddev->raid_disks - min(0, mddev->delta_disks)) && 3795 !test_bit(Journal, &rdev->flags)) { 3796 rdev->raid_disk = -1; 3797 clear_bit(In_sync, &rdev->flags); 3798 } 3799 } 3800 3801 return 0; 3802 } 3803 3804 /* Read a fixed-point number. 3805 * Numbers in sysfs attributes should be in "standard" units where 3806 * possible, so time should be in seconds. 3807 * However we internally use a a much smaller unit such as 3808 * milliseconds or jiffies. 3809 * This function takes a decimal number with a possible fractional 3810 * component, and produces an integer which is the result of 3811 * multiplying that number by 10^'scale'. 3812 * all without any floating-point arithmetic. 3813 */ 3814 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 3815 { 3816 unsigned long result = 0; 3817 long decimals = -1; 3818 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 3819 if (*cp == '.') 3820 decimals = 0; 3821 else if (decimals < scale) { 3822 unsigned int value; 3823 value = *cp - '0'; 3824 result = result * 10 + value; 3825 if (decimals >= 0) 3826 decimals++; 3827 } 3828 cp++; 3829 } 3830 if (*cp == '\n') 3831 cp++; 3832 if (*cp) 3833 return -EINVAL; 3834 if (decimals < 0) 3835 decimals = 0; 3836 *res = result * int_pow(10, scale - decimals); 3837 return 0; 3838 } 3839 3840 static ssize_t 3841 safe_delay_show(struct mddev *mddev, char *page) 3842 { 3843 int msec = (mddev->safemode_delay*1000)/HZ; 3844 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 3845 } 3846 static ssize_t 3847 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) 3848 { 3849 unsigned long msec; 3850 3851 if (mddev_is_clustered(mddev)) { 3852 pr_warn("md: Safemode is disabled for clustered mode\n"); 3853 return -EINVAL; 3854 } 3855 3856 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) 3857 return -EINVAL; 3858 if (msec == 0) 3859 mddev->safemode_delay = 0; 3860 else { 3861 unsigned long old_delay = mddev->safemode_delay; 3862 unsigned long new_delay = (msec*HZ)/1000; 3863 3864 if (new_delay == 0) 3865 new_delay = 1; 3866 mddev->safemode_delay = new_delay; 3867 if (new_delay < old_delay || old_delay == 0) 3868 mod_timer(&mddev->safemode_timer, jiffies+1); 3869 } 3870 return len; 3871 } 3872 static struct md_sysfs_entry md_safe_delay = 3873 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 3874 3875 static ssize_t 3876 level_show(struct mddev *mddev, char *page) 3877 { 3878 struct md_personality *p; 3879 int ret; 3880 spin_lock(&mddev->lock); 3881 p = mddev->pers; 3882 if (p) 3883 ret = sprintf(page, "%s\n", p->name); 3884 else if (mddev->clevel[0]) 3885 ret = sprintf(page, "%s\n", mddev->clevel); 3886 else if (mddev->level != LEVEL_NONE) 3887 ret = sprintf(page, "%d\n", mddev->level); 3888 else 3889 ret = 0; 3890 spin_unlock(&mddev->lock); 3891 return ret; 3892 } 3893 3894 static ssize_t 3895 level_store(struct mddev *mddev, const char *buf, size_t len) 3896 { 3897 char clevel[16]; 3898 ssize_t rv; 3899 size_t slen = len; 3900 struct md_personality *pers, *oldpers; 3901 long level; 3902 void *priv, *oldpriv; 3903 struct md_rdev *rdev; 3904 3905 if (slen == 0 || slen >= sizeof(clevel)) 3906 return -EINVAL; 3907 3908 rv = mddev_lock(mddev); 3909 if (rv) 3910 return rv; 3911 3912 if (mddev->pers == NULL) { 3913 strncpy(mddev->clevel, buf, slen); 3914 if (mddev->clevel[slen-1] == '\n') 3915 slen--; 3916 mddev->clevel[slen] = 0; 3917 mddev->level = LEVEL_NONE; 3918 rv = len; 3919 goto out_unlock; 3920 } 3921 rv = -EROFS; 3922 if (mddev->ro) 3923 goto out_unlock; 3924 3925 /* request to change the personality. Need to ensure: 3926 * - array is not engaged in resync/recovery/reshape 3927 * - old personality can be suspended 3928 * - new personality will access other array. 3929 */ 3930 3931 rv = -EBUSY; 3932 if (mddev->sync_thread || 3933 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3934 mddev->reshape_position != MaxSector || 3935 mddev->sysfs_active) 3936 goto out_unlock; 3937 3938 rv = -EINVAL; 3939 if (!mddev->pers->quiesce) { 3940 pr_warn("md: %s: %s does not support online personality change\n", 3941 mdname(mddev), mddev->pers->name); 3942 goto out_unlock; 3943 } 3944 3945 /* Now find the new personality */ 3946 strncpy(clevel, buf, slen); 3947 if (clevel[slen-1] == '\n') 3948 slen--; 3949 clevel[slen] = 0; 3950 if (kstrtol(clevel, 10, &level)) 3951 level = LEVEL_NONE; 3952 3953 if (request_module("md-%s", clevel) != 0) 3954 request_module("md-level-%s", clevel); 3955 spin_lock(&pers_lock); 3956 pers = find_pers(level, clevel); 3957 if (!pers || !try_module_get(pers->owner)) { 3958 spin_unlock(&pers_lock); 3959 pr_warn("md: personality %s not loaded\n", clevel); 3960 rv = -EINVAL; 3961 goto out_unlock; 3962 } 3963 spin_unlock(&pers_lock); 3964 3965 if (pers == mddev->pers) { 3966 /* Nothing to do! */ 3967 module_put(pers->owner); 3968 rv = len; 3969 goto out_unlock; 3970 } 3971 if (!pers->takeover) { 3972 module_put(pers->owner); 3973 pr_warn("md: %s: %s does not support personality takeover\n", 3974 mdname(mddev), clevel); 3975 rv = -EINVAL; 3976 goto out_unlock; 3977 } 3978 3979 rdev_for_each(rdev, mddev) 3980 rdev->new_raid_disk = rdev->raid_disk; 3981 3982 /* ->takeover must set new_* and/or delta_disks 3983 * if it succeeds, and may set them when it fails. 3984 */ 3985 priv = pers->takeover(mddev); 3986 if (IS_ERR(priv)) { 3987 mddev->new_level = mddev->level; 3988 mddev->new_layout = mddev->layout; 3989 mddev->new_chunk_sectors = mddev->chunk_sectors; 3990 mddev->raid_disks -= mddev->delta_disks; 3991 mddev->delta_disks = 0; 3992 mddev->reshape_backwards = 0; 3993 module_put(pers->owner); 3994 pr_warn("md: %s: %s would not accept array\n", 3995 mdname(mddev), clevel); 3996 rv = PTR_ERR(priv); 3997 goto out_unlock; 3998 } 3999 4000 /* Looks like we have a winner */ 4001 mddev_suspend(mddev); 4002 mddev_detach(mddev); 4003 4004 spin_lock(&mddev->lock); 4005 oldpers = mddev->pers; 4006 oldpriv = mddev->private; 4007 mddev->pers = pers; 4008 mddev->private = priv; 4009 strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 4010 mddev->level = mddev->new_level; 4011 mddev->layout = mddev->new_layout; 4012 mddev->chunk_sectors = mddev->new_chunk_sectors; 4013 mddev->delta_disks = 0; 4014 mddev->reshape_backwards = 0; 4015 mddev->degraded = 0; 4016 spin_unlock(&mddev->lock); 4017 4018 if (oldpers->sync_request == NULL && 4019 mddev->external) { 4020 /* We are converting from a no-redundancy array 4021 * to a redundancy array and metadata is managed 4022 * externally so we need to be sure that writes 4023 * won't block due to a need to transition 4024 * clean->dirty 4025 * until external management is started. 4026 */ 4027 mddev->in_sync = 0; 4028 mddev->safemode_delay = 0; 4029 mddev->safemode = 0; 4030 } 4031 4032 oldpers->free(mddev, oldpriv); 4033 4034 if (oldpers->sync_request == NULL && 4035 pers->sync_request != NULL) { 4036 /* need to add the md_redundancy_group */ 4037 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 4038 pr_warn("md: cannot register extra attributes for %s\n", 4039 mdname(mddev)); 4040 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); 4041 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); 4042 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); 4043 } 4044 if (oldpers->sync_request != NULL && 4045 pers->sync_request == NULL) { 4046 /* need to remove the md_redundancy_group */ 4047 if (mddev->to_remove == NULL) 4048 mddev->to_remove = &md_redundancy_group; 4049 } 4050 4051 module_put(oldpers->owner); 4052 4053 rdev_for_each(rdev, mddev) { 4054 if (rdev->raid_disk < 0) 4055 continue; 4056 if (rdev->new_raid_disk >= mddev->raid_disks) 4057 rdev->new_raid_disk = -1; 4058 if (rdev->new_raid_disk == rdev->raid_disk) 4059 continue; 4060 sysfs_unlink_rdev(mddev, rdev); 4061 } 4062 rdev_for_each(rdev, mddev) { 4063 if (rdev->raid_disk < 0) 4064 continue; 4065 if (rdev->new_raid_disk == rdev->raid_disk) 4066 continue; 4067 rdev->raid_disk = rdev->new_raid_disk; 4068 if (rdev->raid_disk < 0) 4069 clear_bit(In_sync, &rdev->flags); 4070 else { 4071 if (sysfs_link_rdev(mddev, rdev)) 4072 pr_warn("md: cannot register rd%d for %s after level change\n", 4073 rdev->raid_disk, mdname(mddev)); 4074 } 4075 } 4076 4077 if (pers->sync_request == NULL) { 4078 /* this is now an array without redundancy, so 4079 * it must always be in_sync 4080 */ 4081 mddev->in_sync = 1; 4082 del_timer_sync(&mddev->safemode_timer); 4083 } 4084 blk_set_stacking_limits(&mddev->queue->limits); 4085 pers->run(mddev); 4086 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 4087 mddev_resume(mddev); 4088 if (!mddev->thread) 4089 md_update_sb(mddev, 1); 4090 sysfs_notify_dirent_safe(mddev->sysfs_level); 4091 md_new_event(); 4092 rv = len; 4093 out_unlock: 4094 mddev_unlock(mddev); 4095 return rv; 4096 } 4097 4098 static struct md_sysfs_entry md_level = 4099 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 4100 4101 static ssize_t 4102 layout_show(struct mddev *mddev, char *page) 4103 { 4104 /* just a number, not meaningful for all levels */ 4105 if (mddev->reshape_position != MaxSector && 4106 mddev->layout != mddev->new_layout) 4107 return sprintf(page, "%d (%d)\n", 4108 mddev->new_layout, mddev->layout); 4109 return sprintf(page, "%d\n", mddev->layout); 4110 } 4111 4112 static ssize_t 4113 layout_store(struct mddev *mddev, const char *buf, size_t len) 4114 { 4115 unsigned int n; 4116 int err; 4117 4118 err = kstrtouint(buf, 10, &n); 4119 if (err < 0) 4120 return err; 4121 err = mddev_lock(mddev); 4122 if (err) 4123 return err; 4124 4125 if (mddev->pers) { 4126 if (mddev->pers->check_reshape == NULL) 4127 err = -EBUSY; 4128 else if (mddev->ro) 4129 err = -EROFS; 4130 else { 4131 mddev->new_layout = n; 4132 err = mddev->pers->check_reshape(mddev); 4133 if (err) 4134 mddev->new_layout = mddev->layout; 4135 } 4136 } else { 4137 mddev->new_layout = n; 4138 if (mddev->reshape_position == MaxSector) 4139 mddev->layout = n; 4140 } 4141 mddev_unlock(mddev); 4142 return err ?: len; 4143 } 4144 static struct md_sysfs_entry md_layout = 4145 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 4146 4147 static ssize_t 4148 raid_disks_show(struct mddev *mddev, char *page) 4149 { 4150 if (mddev->raid_disks == 0) 4151 return 0; 4152 if (mddev->reshape_position != MaxSector && 4153 mddev->delta_disks != 0) 4154 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 4155 mddev->raid_disks - mddev->delta_disks); 4156 return sprintf(page, "%d\n", mddev->raid_disks); 4157 } 4158 4159 static int update_raid_disks(struct mddev *mddev, int raid_disks); 4160 4161 static ssize_t 4162 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) 4163 { 4164 unsigned int n; 4165 int err; 4166 4167 err = kstrtouint(buf, 10, &n); 4168 if (err < 0) 4169 return err; 4170 4171 err = mddev_lock(mddev); 4172 if (err) 4173 return err; 4174 if (mddev->pers) 4175 err = update_raid_disks(mddev, n); 4176 else if (mddev->reshape_position != MaxSector) { 4177 struct md_rdev *rdev; 4178 int olddisks = mddev->raid_disks - mddev->delta_disks; 4179 4180 err = -EINVAL; 4181 rdev_for_each(rdev, mddev) { 4182 if (olddisks < n && 4183 rdev->data_offset < rdev->new_data_offset) 4184 goto out_unlock; 4185 if (olddisks > n && 4186 rdev->data_offset > rdev->new_data_offset) 4187 goto out_unlock; 4188 } 4189 err = 0; 4190 mddev->delta_disks = n - olddisks; 4191 mddev->raid_disks = n; 4192 mddev->reshape_backwards = (mddev->delta_disks < 0); 4193 } else 4194 mddev->raid_disks = n; 4195 out_unlock: 4196 mddev_unlock(mddev); 4197 return err ? err : len; 4198 } 4199 static struct md_sysfs_entry md_raid_disks = 4200 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 4201 4202 static ssize_t 4203 uuid_show(struct mddev *mddev, char *page) 4204 { 4205 return sprintf(page, "%pU\n", mddev->uuid); 4206 } 4207 static struct md_sysfs_entry md_uuid = 4208 __ATTR(uuid, S_IRUGO, uuid_show, NULL); 4209 4210 static ssize_t 4211 chunk_size_show(struct mddev *mddev, char *page) 4212 { 4213 if (mddev->reshape_position != MaxSector && 4214 mddev->chunk_sectors != mddev->new_chunk_sectors) 4215 return sprintf(page, "%d (%d)\n", 4216 mddev->new_chunk_sectors << 9, 4217 mddev->chunk_sectors << 9); 4218 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 4219 } 4220 4221 static ssize_t 4222 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) 4223 { 4224 unsigned long n; 4225 int err; 4226 4227 err = kstrtoul(buf, 10, &n); 4228 if (err < 0) 4229 return err; 4230 4231 err = mddev_lock(mddev); 4232 if (err) 4233 return err; 4234 if (mddev->pers) { 4235 if (mddev->pers->check_reshape == NULL) 4236 err = -EBUSY; 4237 else if (mddev->ro) 4238 err = -EROFS; 4239 else { 4240 mddev->new_chunk_sectors = n >> 9; 4241 err = mddev->pers->check_reshape(mddev); 4242 if (err) 4243 mddev->new_chunk_sectors = mddev->chunk_sectors; 4244 } 4245 } else { 4246 mddev->new_chunk_sectors = n >> 9; 4247 if (mddev->reshape_position == MaxSector) 4248 mddev->chunk_sectors = n >> 9; 4249 } 4250 mddev_unlock(mddev); 4251 return err ?: len; 4252 } 4253 static struct md_sysfs_entry md_chunk_size = 4254 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 4255 4256 static ssize_t 4257 resync_start_show(struct mddev *mddev, char *page) 4258 { 4259 if (mddev->recovery_cp == MaxSector) 4260 return sprintf(page, "none\n"); 4261 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 4262 } 4263 4264 static ssize_t 4265 resync_start_store(struct mddev *mddev, const char *buf, size_t len) 4266 { 4267 unsigned long long n; 4268 int err; 4269 4270 if (cmd_match(buf, "none")) 4271 n = MaxSector; 4272 else { 4273 err = kstrtoull(buf, 10, &n); 4274 if (err < 0) 4275 return err; 4276 if (n != (sector_t)n) 4277 return -EINVAL; 4278 } 4279 4280 err = mddev_lock(mddev); 4281 if (err) 4282 return err; 4283 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 4284 err = -EBUSY; 4285 4286 if (!err) { 4287 mddev->recovery_cp = n; 4288 if (mddev->pers) 4289 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 4290 } 4291 mddev_unlock(mddev); 4292 return err ?: len; 4293 } 4294 static struct md_sysfs_entry md_resync_start = 4295 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR, 4296 resync_start_show, resync_start_store); 4297 4298 /* 4299 * The array state can be: 4300 * 4301 * clear 4302 * No devices, no size, no level 4303 * Equivalent to STOP_ARRAY ioctl 4304 * inactive 4305 * May have some settings, but array is not active 4306 * all IO results in error 4307 * When written, doesn't tear down array, but just stops it 4308 * suspended (not supported yet) 4309 * All IO requests will block. The array can be reconfigured. 4310 * Writing this, if accepted, will block until array is quiescent 4311 * readonly 4312 * no resync can happen. no superblocks get written. 4313 * write requests fail 4314 * read-auto 4315 * like readonly, but behaves like 'clean' on a write request. 4316 * 4317 * clean - no pending writes, but otherwise active. 4318 * When written to inactive array, starts without resync 4319 * If a write request arrives then 4320 * if metadata is known, mark 'dirty' and switch to 'active'. 4321 * if not known, block and switch to write-pending 4322 * If written to an active array that has pending writes, then fails. 4323 * active 4324 * fully active: IO and resync can be happening. 4325 * When written to inactive array, starts with resync 4326 * 4327 * write-pending 4328 * clean, but writes are blocked waiting for 'active' to be written. 4329 * 4330 * active-idle 4331 * like active, but no writes have been seen for a while (100msec). 4332 * 4333 * broken 4334 * Array is failed. It's useful because mounted-arrays aren't stopped 4335 * when array is failed, so this state will at least alert the user that 4336 * something is wrong. 4337 */ 4338 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 4339 write_pending, active_idle, broken, bad_word}; 4340 static char *array_states[] = { 4341 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 4342 "write-pending", "active-idle", "broken", NULL }; 4343 4344 static int match_word(const char *word, char **list) 4345 { 4346 int n; 4347 for (n=0; list[n]; n++) 4348 if (cmd_match(word, list[n])) 4349 break; 4350 return n; 4351 } 4352 4353 static ssize_t 4354 array_state_show(struct mddev *mddev, char *page) 4355 { 4356 enum array_state st = inactive; 4357 4358 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) { 4359 switch(mddev->ro) { 4360 case 1: 4361 st = readonly; 4362 break; 4363 case 2: 4364 st = read_auto; 4365 break; 4366 case 0: 4367 spin_lock(&mddev->lock); 4368 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 4369 st = write_pending; 4370 else if (mddev->in_sync) 4371 st = clean; 4372 else if (mddev->safemode) 4373 st = active_idle; 4374 else 4375 st = active; 4376 spin_unlock(&mddev->lock); 4377 } 4378 4379 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean) 4380 st = broken; 4381 } else { 4382 if (list_empty(&mddev->disks) && 4383 mddev->raid_disks == 0 && 4384 mddev->dev_sectors == 0) 4385 st = clear; 4386 else 4387 st = inactive; 4388 } 4389 return sprintf(page, "%s\n", array_states[st]); 4390 } 4391 4392 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev); 4393 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev); 4394 static int restart_array(struct mddev *mddev); 4395 4396 static ssize_t 4397 array_state_store(struct mddev *mddev, const char *buf, size_t len) 4398 { 4399 int err = 0; 4400 enum array_state st = match_word(buf, array_states); 4401 4402 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) { 4403 /* don't take reconfig_mutex when toggling between 4404 * clean and active 4405 */ 4406 spin_lock(&mddev->lock); 4407 if (st == active) { 4408 restart_array(mddev); 4409 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 4410 md_wakeup_thread(mddev->thread); 4411 wake_up(&mddev->sb_wait); 4412 } else /* st == clean */ { 4413 restart_array(mddev); 4414 if (!set_in_sync(mddev)) 4415 err = -EBUSY; 4416 } 4417 if (!err) 4418 sysfs_notify_dirent_safe(mddev->sysfs_state); 4419 spin_unlock(&mddev->lock); 4420 return err ?: len; 4421 } 4422 err = mddev_lock(mddev); 4423 if (err) 4424 return err; 4425 err = -EINVAL; 4426 switch(st) { 4427 case bad_word: 4428 break; 4429 case clear: 4430 /* stopping an active array */ 4431 err = do_md_stop(mddev, 0, NULL); 4432 break; 4433 case inactive: 4434 /* stopping an active array */ 4435 if (mddev->pers) 4436 err = do_md_stop(mddev, 2, NULL); 4437 else 4438 err = 0; /* already inactive */ 4439 break; 4440 case suspended: 4441 break; /* not supported yet */ 4442 case readonly: 4443 if (mddev->pers) 4444 err = md_set_readonly(mddev, NULL); 4445 else { 4446 mddev->ro = 1; 4447 set_disk_ro(mddev->gendisk, 1); 4448 err = do_md_run(mddev); 4449 } 4450 break; 4451 case read_auto: 4452 if (mddev->pers) { 4453 if (mddev->ro == 0) 4454 err = md_set_readonly(mddev, NULL); 4455 else if (mddev->ro == 1) 4456 err = restart_array(mddev); 4457 if (err == 0) { 4458 mddev->ro = 2; 4459 set_disk_ro(mddev->gendisk, 0); 4460 } 4461 } else { 4462 mddev->ro = 2; 4463 err = do_md_run(mddev); 4464 } 4465 break; 4466 case clean: 4467 if (mddev->pers) { 4468 err = restart_array(mddev); 4469 if (err) 4470 break; 4471 spin_lock(&mddev->lock); 4472 if (!set_in_sync(mddev)) 4473 err = -EBUSY; 4474 spin_unlock(&mddev->lock); 4475 } else 4476 err = -EINVAL; 4477 break; 4478 case active: 4479 if (mddev->pers) { 4480 err = restart_array(mddev); 4481 if (err) 4482 break; 4483 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 4484 wake_up(&mddev->sb_wait); 4485 err = 0; 4486 } else { 4487 mddev->ro = 0; 4488 set_disk_ro(mddev->gendisk, 0); 4489 err = do_md_run(mddev); 4490 } 4491 break; 4492 case write_pending: 4493 case active_idle: 4494 case broken: 4495 /* these cannot be set */ 4496 break; 4497 } 4498 4499 if (!err) { 4500 if (mddev->hold_active == UNTIL_IOCTL) 4501 mddev->hold_active = 0; 4502 sysfs_notify_dirent_safe(mddev->sysfs_state); 4503 } 4504 mddev_unlock(mddev); 4505 return err ?: len; 4506 } 4507 static struct md_sysfs_entry md_array_state = 4508 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 4509 4510 static ssize_t 4511 max_corrected_read_errors_show(struct mddev *mddev, char *page) { 4512 return sprintf(page, "%d\n", 4513 atomic_read(&mddev->max_corr_read_errors)); 4514 } 4515 4516 static ssize_t 4517 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) 4518 { 4519 unsigned int n; 4520 int rv; 4521 4522 rv = kstrtouint(buf, 10, &n); 4523 if (rv < 0) 4524 return rv; 4525 atomic_set(&mddev->max_corr_read_errors, n); 4526 return len; 4527 } 4528 4529 static struct md_sysfs_entry max_corr_read_errors = 4530 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 4531 max_corrected_read_errors_store); 4532 4533 static ssize_t 4534 null_show(struct mddev *mddev, char *page) 4535 { 4536 return -EINVAL; 4537 } 4538 4539 /* need to ensure rdev_delayed_delete() has completed */ 4540 static void flush_rdev_wq(struct mddev *mddev) 4541 { 4542 struct md_rdev *rdev; 4543 4544 rcu_read_lock(); 4545 rdev_for_each_rcu(rdev, mddev) 4546 if (work_pending(&rdev->del_work)) { 4547 flush_workqueue(md_rdev_misc_wq); 4548 break; 4549 } 4550 rcu_read_unlock(); 4551 } 4552 4553 static ssize_t 4554 new_dev_store(struct mddev *mddev, const char *buf, size_t len) 4555 { 4556 /* buf must be %d:%d\n? giving major and minor numbers */ 4557 /* The new device is added to the array. 4558 * If the array has a persistent superblock, we read the 4559 * superblock to initialise info and check validity. 4560 * Otherwise, only checking done is that in bind_rdev_to_array, 4561 * which mainly checks size. 4562 */ 4563 char *e; 4564 int major = simple_strtoul(buf, &e, 10); 4565 int minor; 4566 dev_t dev; 4567 struct md_rdev *rdev; 4568 int err; 4569 4570 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 4571 return -EINVAL; 4572 minor = simple_strtoul(e+1, &e, 10); 4573 if (*e && *e != '\n') 4574 return -EINVAL; 4575 dev = MKDEV(major, minor); 4576 if (major != MAJOR(dev) || 4577 minor != MINOR(dev)) 4578 return -EOVERFLOW; 4579 4580 flush_rdev_wq(mddev); 4581 err = mddev_lock(mddev); 4582 if (err) 4583 return err; 4584 if (mddev->persistent) { 4585 rdev = md_import_device(dev, mddev->major_version, 4586 mddev->minor_version); 4587 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 4588 struct md_rdev *rdev0 4589 = list_entry(mddev->disks.next, 4590 struct md_rdev, same_set); 4591 err = super_types[mddev->major_version] 4592 .load_super(rdev, rdev0, mddev->minor_version); 4593 if (err < 0) 4594 goto out; 4595 } 4596 } else if (mddev->external) 4597 rdev = md_import_device(dev, -2, -1); 4598 else 4599 rdev = md_import_device(dev, -1, -1); 4600 4601 if (IS_ERR(rdev)) { 4602 mddev_unlock(mddev); 4603 return PTR_ERR(rdev); 4604 } 4605 err = bind_rdev_to_array(rdev, mddev); 4606 out: 4607 if (err) 4608 export_rdev(rdev); 4609 mddev_unlock(mddev); 4610 if (!err) 4611 md_new_event(); 4612 return err ? err : len; 4613 } 4614 4615 static struct md_sysfs_entry md_new_device = 4616 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 4617 4618 static ssize_t 4619 bitmap_store(struct mddev *mddev, const char *buf, size_t len) 4620 { 4621 char *end; 4622 unsigned long chunk, end_chunk; 4623 int err; 4624 4625 err = mddev_lock(mddev); 4626 if (err) 4627 return err; 4628 if (!mddev->bitmap) 4629 goto out; 4630 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 4631 while (*buf) { 4632 chunk = end_chunk = simple_strtoul(buf, &end, 0); 4633 if (buf == end) break; 4634 if (*end == '-') { /* range */ 4635 buf = end + 1; 4636 end_chunk = simple_strtoul(buf, &end, 0); 4637 if (buf == end) break; 4638 } 4639 if (*end && !isspace(*end)) break; 4640 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 4641 buf = skip_spaces(end); 4642 } 4643 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 4644 out: 4645 mddev_unlock(mddev); 4646 return len; 4647 } 4648 4649 static struct md_sysfs_entry md_bitmap = 4650 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 4651 4652 static ssize_t 4653 size_show(struct mddev *mddev, char *page) 4654 { 4655 return sprintf(page, "%llu\n", 4656 (unsigned long long)mddev->dev_sectors / 2); 4657 } 4658 4659 static int update_size(struct mddev *mddev, sector_t num_sectors); 4660 4661 static ssize_t 4662 size_store(struct mddev *mddev, const char *buf, size_t len) 4663 { 4664 /* If array is inactive, we can reduce the component size, but 4665 * not increase it (except from 0). 4666 * If array is active, we can try an on-line resize 4667 */ 4668 sector_t sectors; 4669 int err = strict_blocks_to_sectors(buf, §ors); 4670 4671 if (err < 0) 4672 return err; 4673 err = mddev_lock(mddev); 4674 if (err) 4675 return err; 4676 if (mddev->pers) { 4677 err = update_size(mddev, sectors); 4678 if (err == 0) 4679 md_update_sb(mddev, 1); 4680 } else { 4681 if (mddev->dev_sectors == 0 || 4682 mddev->dev_sectors > sectors) 4683 mddev->dev_sectors = sectors; 4684 else 4685 err = -ENOSPC; 4686 } 4687 mddev_unlock(mddev); 4688 return err ? err : len; 4689 } 4690 4691 static struct md_sysfs_entry md_size = 4692 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 4693 4694 /* Metadata version. 4695 * This is one of 4696 * 'none' for arrays with no metadata (good luck...) 4697 * 'external' for arrays with externally managed metadata, 4698 * or N.M for internally known formats 4699 */ 4700 static ssize_t 4701 metadata_show(struct mddev *mddev, char *page) 4702 { 4703 if (mddev->persistent) 4704 return sprintf(page, "%d.%d\n", 4705 mddev->major_version, mddev->minor_version); 4706 else if (mddev->external) 4707 return sprintf(page, "external:%s\n", mddev->metadata_type); 4708 else 4709 return sprintf(page, "none\n"); 4710 } 4711 4712 static ssize_t 4713 metadata_store(struct mddev *mddev, const char *buf, size_t len) 4714 { 4715 int major, minor; 4716 char *e; 4717 int err; 4718 /* Changing the details of 'external' metadata is 4719 * always permitted. Otherwise there must be 4720 * no devices attached to the array. 4721 */ 4722 4723 err = mddev_lock(mddev); 4724 if (err) 4725 return err; 4726 err = -EBUSY; 4727 if (mddev->external && strncmp(buf, "external:", 9) == 0) 4728 ; 4729 else if (!list_empty(&mddev->disks)) 4730 goto out_unlock; 4731 4732 err = 0; 4733 if (cmd_match(buf, "none")) { 4734 mddev->persistent = 0; 4735 mddev->external = 0; 4736 mddev->major_version = 0; 4737 mddev->minor_version = 90; 4738 goto out_unlock; 4739 } 4740 if (strncmp(buf, "external:", 9) == 0) { 4741 size_t namelen = len-9; 4742 if (namelen >= sizeof(mddev->metadata_type)) 4743 namelen = sizeof(mddev->metadata_type)-1; 4744 strncpy(mddev->metadata_type, buf+9, namelen); 4745 mddev->metadata_type[namelen] = 0; 4746 if (namelen && mddev->metadata_type[namelen-1] == '\n') 4747 mddev->metadata_type[--namelen] = 0; 4748 mddev->persistent = 0; 4749 mddev->external = 1; 4750 mddev->major_version = 0; 4751 mddev->minor_version = 90; 4752 goto out_unlock; 4753 } 4754 major = simple_strtoul(buf, &e, 10); 4755 err = -EINVAL; 4756 if (e==buf || *e != '.') 4757 goto out_unlock; 4758 buf = e+1; 4759 minor = simple_strtoul(buf, &e, 10); 4760 if (e==buf || (*e && *e != '\n') ) 4761 goto out_unlock; 4762 err = -ENOENT; 4763 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 4764 goto out_unlock; 4765 mddev->major_version = major; 4766 mddev->minor_version = minor; 4767 mddev->persistent = 1; 4768 mddev->external = 0; 4769 err = 0; 4770 out_unlock: 4771 mddev_unlock(mddev); 4772 return err ?: len; 4773 } 4774 4775 static struct md_sysfs_entry md_metadata = 4776 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4777 4778 static ssize_t 4779 action_show(struct mddev *mddev, char *page) 4780 { 4781 char *type = "idle"; 4782 unsigned long recovery = mddev->recovery; 4783 if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 4784 type = "frozen"; 4785 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) || 4786 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) { 4787 if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 4788 type = "reshape"; 4789 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 4790 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) 4791 type = "resync"; 4792 else if (test_bit(MD_RECOVERY_CHECK, &recovery)) 4793 type = "check"; 4794 else 4795 type = "repair"; 4796 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 4797 type = "recover"; 4798 else if (mddev->reshape_position != MaxSector) 4799 type = "reshape"; 4800 } 4801 return sprintf(page, "%s\n", type); 4802 } 4803 4804 static ssize_t 4805 action_store(struct mddev *mddev, const char *page, size_t len) 4806 { 4807 if (!mddev->pers || !mddev->pers->sync_request) 4808 return -EINVAL; 4809 4810 4811 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4812 if (cmd_match(page, "frozen")) 4813 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4814 else 4815 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4816 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 4817 mddev_lock(mddev) == 0) { 4818 if (work_pending(&mddev->del_work)) 4819 flush_workqueue(md_misc_wq); 4820 if (mddev->sync_thread) { 4821 sector_t save_rp = mddev->reshape_position; 4822 4823 mddev_unlock(mddev); 4824 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4825 md_unregister_thread(&mddev->sync_thread); 4826 mddev_lock_nointr(mddev); 4827 /* 4828 * set RECOVERY_INTR again and restore reshape 4829 * position in case others changed them after 4830 * got lock, eg, reshape_position_store and 4831 * md_check_recovery. 4832 */ 4833 mddev->reshape_position = save_rp; 4834 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4835 md_reap_sync_thread(mddev); 4836 } 4837 mddev_unlock(mddev); 4838 } 4839 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4840 return -EBUSY; 4841 else if (cmd_match(page, "resync")) 4842 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4843 else if (cmd_match(page, "recover")) { 4844 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4845 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4846 } else if (cmd_match(page, "reshape")) { 4847 int err; 4848 if (mddev->pers->start_reshape == NULL) 4849 return -EINVAL; 4850 err = mddev_lock(mddev); 4851 if (!err) { 4852 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4853 err = -EBUSY; 4854 else { 4855 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4856 err = mddev->pers->start_reshape(mddev); 4857 } 4858 mddev_unlock(mddev); 4859 } 4860 if (err) 4861 return err; 4862 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 4863 } else { 4864 if (cmd_match(page, "check")) 4865 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4866 else if (!cmd_match(page, "repair")) 4867 return -EINVAL; 4868 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4869 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4870 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4871 } 4872 if (mddev->ro == 2) { 4873 /* A write to sync_action is enough to justify 4874 * canceling read-auto mode 4875 */ 4876 mddev->ro = 0; 4877 md_wakeup_thread(mddev->sync_thread); 4878 } 4879 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4880 md_wakeup_thread(mddev->thread); 4881 sysfs_notify_dirent_safe(mddev->sysfs_action); 4882 return len; 4883 } 4884 4885 static struct md_sysfs_entry md_scan_mode = 4886 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 4887 4888 static ssize_t 4889 last_sync_action_show(struct mddev *mddev, char *page) 4890 { 4891 return sprintf(page, "%s\n", mddev->last_sync_action); 4892 } 4893 4894 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action); 4895 4896 static ssize_t 4897 mismatch_cnt_show(struct mddev *mddev, char *page) 4898 { 4899 return sprintf(page, "%llu\n", 4900 (unsigned long long) 4901 atomic64_read(&mddev->resync_mismatches)); 4902 } 4903 4904 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 4905 4906 static ssize_t 4907 sync_min_show(struct mddev *mddev, char *page) 4908 { 4909 return sprintf(page, "%d (%s)\n", speed_min(mddev), 4910 mddev->sync_speed_min ? "local": "system"); 4911 } 4912 4913 static ssize_t 4914 sync_min_store(struct mddev *mddev, const char *buf, size_t len) 4915 { 4916 unsigned int min; 4917 int rv; 4918 4919 if (strncmp(buf, "system", 6)==0) { 4920 min = 0; 4921 } else { 4922 rv = kstrtouint(buf, 10, &min); 4923 if (rv < 0) 4924 return rv; 4925 if (min == 0) 4926 return -EINVAL; 4927 } 4928 mddev->sync_speed_min = min; 4929 return len; 4930 } 4931 4932 static struct md_sysfs_entry md_sync_min = 4933 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 4934 4935 static ssize_t 4936 sync_max_show(struct mddev *mddev, char *page) 4937 { 4938 return sprintf(page, "%d (%s)\n", speed_max(mddev), 4939 mddev->sync_speed_max ? "local": "system"); 4940 } 4941 4942 static ssize_t 4943 sync_max_store(struct mddev *mddev, const char *buf, size_t len) 4944 { 4945 unsigned int max; 4946 int rv; 4947 4948 if (strncmp(buf, "system", 6)==0) { 4949 max = 0; 4950 } else { 4951 rv = kstrtouint(buf, 10, &max); 4952 if (rv < 0) 4953 return rv; 4954 if (max == 0) 4955 return -EINVAL; 4956 } 4957 mddev->sync_speed_max = max; 4958 return len; 4959 } 4960 4961 static struct md_sysfs_entry md_sync_max = 4962 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 4963 4964 static ssize_t 4965 degraded_show(struct mddev *mddev, char *page) 4966 { 4967 return sprintf(page, "%d\n", mddev->degraded); 4968 } 4969 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 4970 4971 static ssize_t 4972 sync_force_parallel_show(struct mddev *mddev, char *page) 4973 { 4974 return sprintf(page, "%d\n", mddev->parallel_resync); 4975 } 4976 4977 static ssize_t 4978 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) 4979 { 4980 long n; 4981 4982 if (kstrtol(buf, 10, &n)) 4983 return -EINVAL; 4984 4985 if (n != 0 && n != 1) 4986 return -EINVAL; 4987 4988 mddev->parallel_resync = n; 4989 4990 if (mddev->sync_thread) 4991 wake_up(&resync_wait); 4992 4993 return len; 4994 } 4995 4996 /* force parallel resync, even with shared block devices */ 4997 static struct md_sysfs_entry md_sync_force_parallel = 4998 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 4999 sync_force_parallel_show, sync_force_parallel_store); 5000 5001 static ssize_t 5002 sync_speed_show(struct mddev *mddev, char *page) 5003 { 5004 unsigned long resync, dt, db; 5005 if (mddev->curr_resync == MD_RESYNC_NONE) 5006 return sprintf(page, "none\n"); 5007 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 5008 dt = (jiffies - mddev->resync_mark) / HZ; 5009 if (!dt) dt++; 5010 db = resync - mddev->resync_mark_cnt; 5011 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 5012 } 5013 5014 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 5015 5016 static ssize_t 5017 sync_completed_show(struct mddev *mddev, char *page) 5018 { 5019 unsigned long long max_sectors, resync; 5020 5021 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5022 return sprintf(page, "none\n"); 5023 5024 if (mddev->curr_resync == MD_RESYNC_YIELDED || 5025 mddev->curr_resync == MD_RESYNC_DELAYED) 5026 return sprintf(page, "delayed\n"); 5027 5028 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 5029 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 5030 max_sectors = mddev->resync_max_sectors; 5031 else 5032 max_sectors = mddev->dev_sectors; 5033 5034 resync = mddev->curr_resync_completed; 5035 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 5036 } 5037 5038 static struct md_sysfs_entry md_sync_completed = 5039 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL); 5040 5041 static ssize_t 5042 min_sync_show(struct mddev *mddev, char *page) 5043 { 5044 return sprintf(page, "%llu\n", 5045 (unsigned long long)mddev->resync_min); 5046 } 5047 static ssize_t 5048 min_sync_store(struct mddev *mddev, const char *buf, size_t len) 5049 { 5050 unsigned long long min; 5051 int err; 5052 5053 if (kstrtoull(buf, 10, &min)) 5054 return -EINVAL; 5055 5056 spin_lock(&mddev->lock); 5057 err = -EINVAL; 5058 if (min > mddev->resync_max) 5059 goto out_unlock; 5060 5061 err = -EBUSY; 5062 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5063 goto out_unlock; 5064 5065 /* Round down to multiple of 4K for safety */ 5066 mddev->resync_min = round_down(min, 8); 5067 err = 0; 5068 5069 out_unlock: 5070 spin_unlock(&mddev->lock); 5071 return err ?: len; 5072 } 5073 5074 static struct md_sysfs_entry md_min_sync = 5075 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 5076 5077 static ssize_t 5078 max_sync_show(struct mddev *mddev, char *page) 5079 { 5080 if (mddev->resync_max == MaxSector) 5081 return sprintf(page, "max\n"); 5082 else 5083 return sprintf(page, "%llu\n", 5084 (unsigned long long)mddev->resync_max); 5085 } 5086 static ssize_t 5087 max_sync_store(struct mddev *mddev, const char *buf, size_t len) 5088 { 5089 int err; 5090 spin_lock(&mddev->lock); 5091 if (strncmp(buf, "max", 3) == 0) 5092 mddev->resync_max = MaxSector; 5093 else { 5094 unsigned long long max; 5095 int chunk; 5096 5097 err = -EINVAL; 5098 if (kstrtoull(buf, 10, &max)) 5099 goto out_unlock; 5100 if (max < mddev->resync_min) 5101 goto out_unlock; 5102 5103 err = -EBUSY; 5104 if (max < mddev->resync_max && 5105 mddev->ro == 0 && 5106 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5107 goto out_unlock; 5108 5109 /* Must be a multiple of chunk_size */ 5110 chunk = mddev->chunk_sectors; 5111 if (chunk) { 5112 sector_t temp = max; 5113 5114 err = -EINVAL; 5115 if (sector_div(temp, chunk)) 5116 goto out_unlock; 5117 } 5118 mddev->resync_max = max; 5119 } 5120 wake_up(&mddev->recovery_wait); 5121 err = 0; 5122 out_unlock: 5123 spin_unlock(&mddev->lock); 5124 return err ?: len; 5125 } 5126 5127 static struct md_sysfs_entry md_max_sync = 5128 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 5129 5130 static ssize_t 5131 suspend_lo_show(struct mddev *mddev, char *page) 5132 { 5133 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 5134 } 5135 5136 static ssize_t 5137 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) 5138 { 5139 unsigned long long new; 5140 int err; 5141 5142 err = kstrtoull(buf, 10, &new); 5143 if (err < 0) 5144 return err; 5145 if (new != (sector_t)new) 5146 return -EINVAL; 5147 5148 err = mddev_lock(mddev); 5149 if (err) 5150 return err; 5151 err = -EINVAL; 5152 if (mddev->pers == NULL || 5153 mddev->pers->quiesce == NULL) 5154 goto unlock; 5155 mddev_suspend(mddev); 5156 mddev->suspend_lo = new; 5157 mddev_resume(mddev); 5158 5159 err = 0; 5160 unlock: 5161 mddev_unlock(mddev); 5162 return err ?: len; 5163 } 5164 static struct md_sysfs_entry md_suspend_lo = 5165 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 5166 5167 static ssize_t 5168 suspend_hi_show(struct mddev *mddev, char *page) 5169 { 5170 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 5171 } 5172 5173 static ssize_t 5174 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) 5175 { 5176 unsigned long long new; 5177 int err; 5178 5179 err = kstrtoull(buf, 10, &new); 5180 if (err < 0) 5181 return err; 5182 if (new != (sector_t)new) 5183 return -EINVAL; 5184 5185 err = mddev_lock(mddev); 5186 if (err) 5187 return err; 5188 err = -EINVAL; 5189 if (mddev->pers == NULL) 5190 goto unlock; 5191 5192 mddev_suspend(mddev); 5193 mddev->suspend_hi = new; 5194 mddev_resume(mddev); 5195 5196 err = 0; 5197 unlock: 5198 mddev_unlock(mddev); 5199 return err ?: len; 5200 } 5201 static struct md_sysfs_entry md_suspend_hi = 5202 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 5203 5204 static ssize_t 5205 reshape_position_show(struct mddev *mddev, char *page) 5206 { 5207 if (mddev->reshape_position != MaxSector) 5208 return sprintf(page, "%llu\n", 5209 (unsigned long long)mddev->reshape_position); 5210 strcpy(page, "none\n"); 5211 return 5; 5212 } 5213 5214 static ssize_t 5215 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) 5216 { 5217 struct md_rdev *rdev; 5218 unsigned long long new; 5219 int err; 5220 5221 err = kstrtoull(buf, 10, &new); 5222 if (err < 0) 5223 return err; 5224 if (new != (sector_t)new) 5225 return -EINVAL; 5226 err = mddev_lock(mddev); 5227 if (err) 5228 return err; 5229 err = -EBUSY; 5230 if (mddev->pers) 5231 goto unlock; 5232 mddev->reshape_position = new; 5233 mddev->delta_disks = 0; 5234 mddev->reshape_backwards = 0; 5235 mddev->new_level = mddev->level; 5236 mddev->new_layout = mddev->layout; 5237 mddev->new_chunk_sectors = mddev->chunk_sectors; 5238 rdev_for_each(rdev, mddev) 5239 rdev->new_data_offset = rdev->data_offset; 5240 err = 0; 5241 unlock: 5242 mddev_unlock(mddev); 5243 return err ?: len; 5244 } 5245 5246 static struct md_sysfs_entry md_reshape_position = 5247 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 5248 reshape_position_store); 5249 5250 static ssize_t 5251 reshape_direction_show(struct mddev *mddev, char *page) 5252 { 5253 return sprintf(page, "%s\n", 5254 mddev->reshape_backwards ? "backwards" : "forwards"); 5255 } 5256 5257 static ssize_t 5258 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) 5259 { 5260 int backwards = 0; 5261 int err; 5262 5263 if (cmd_match(buf, "forwards")) 5264 backwards = 0; 5265 else if (cmd_match(buf, "backwards")) 5266 backwards = 1; 5267 else 5268 return -EINVAL; 5269 if (mddev->reshape_backwards == backwards) 5270 return len; 5271 5272 err = mddev_lock(mddev); 5273 if (err) 5274 return err; 5275 /* check if we are allowed to change */ 5276 if (mddev->delta_disks) 5277 err = -EBUSY; 5278 else if (mddev->persistent && 5279 mddev->major_version == 0) 5280 err = -EINVAL; 5281 else 5282 mddev->reshape_backwards = backwards; 5283 mddev_unlock(mddev); 5284 return err ?: len; 5285 } 5286 5287 static struct md_sysfs_entry md_reshape_direction = 5288 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show, 5289 reshape_direction_store); 5290 5291 static ssize_t 5292 array_size_show(struct mddev *mddev, char *page) 5293 { 5294 if (mddev->external_size) 5295 return sprintf(page, "%llu\n", 5296 (unsigned long long)mddev->array_sectors/2); 5297 else 5298 return sprintf(page, "default\n"); 5299 } 5300 5301 static ssize_t 5302 array_size_store(struct mddev *mddev, const char *buf, size_t len) 5303 { 5304 sector_t sectors; 5305 int err; 5306 5307 err = mddev_lock(mddev); 5308 if (err) 5309 return err; 5310 5311 /* cluster raid doesn't support change array_sectors */ 5312 if (mddev_is_clustered(mddev)) { 5313 mddev_unlock(mddev); 5314 return -EINVAL; 5315 } 5316 5317 if (strncmp(buf, "default", 7) == 0) { 5318 if (mddev->pers) 5319 sectors = mddev->pers->size(mddev, 0, 0); 5320 else 5321 sectors = mddev->array_sectors; 5322 5323 mddev->external_size = 0; 5324 } else { 5325 if (strict_blocks_to_sectors(buf, §ors) < 0) 5326 err = -EINVAL; 5327 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 5328 err = -E2BIG; 5329 else 5330 mddev->external_size = 1; 5331 } 5332 5333 if (!err) { 5334 mddev->array_sectors = sectors; 5335 if (mddev->pers) 5336 set_capacity_and_notify(mddev->gendisk, 5337 mddev->array_sectors); 5338 } 5339 mddev_unlock(mddev); 5340 return err ?: len; 5341 } 5342 5343 static struct md_sysfs_entry md_array_size = 5344 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 5345 array_size_store); 5346 5347 static ssize_t 5348 consistency_policy_show(struct mddev *mddev, char *page) 5349 { 5350 int ret; 5351 5352 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 5353 ret = sprintf(page, "journal\n"); 5354 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) { 5355 ret = sprintf(page, "ppl\n"); 5356 } else if (mddev->bitmap) { 5357 ret = sprintf(page, "bitmap\n"); 5358 } else if (mddev->pers) { 5359 if (mddev->pers->sync_request) 5360 ret = sprintf(page, "resync\n"); 5361 else 5362 ret = sprintf(page, "none\n"); 5363 } else { 5364 ret = sprintf(page, "unknown\n"); 5365 } 5366 5367 return ret; 5368 } 5369 5370 static ssize_t 5371 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len) 5372 { 5373 int err = 0; 5374 5375 if (mddev->pers) { 5376 if (mddev->pers->change_consistency_policy) 5377 err = mddev->pers->change_consistency_policy(mddev, buf); 5378 else 5379 err = -EBUSY; 5380 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) { 5381 set_bit(MD_HAS_PPL, &mddev->flags); 5382 } else { 5383 err = -EINVAL; 5384 } 5385 5386 return err ? err : len; 5387 } 5388 5389 static struct md_sysfs_entry md_consistency_policy = 5390 __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show, 5391 consistency_policy_store); 5392 5393 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page) 5394 { 5395 return sprintf(page, "%d\n", mddev->fail_last_dev); 5396 } 5397 5398 /* 5399 * Setting fail_last_dev to true to allow last device to be forcibly removed 5400 * from RAID1/RAID10. 5401 */ 5402 static ssize_t 5403 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len) 5404 { 5405 int ret; 5406 bool value; 5407 5408 ret = kstrtobool(buf, &value); 5409 if (ret) 5410 return ret; 5411 5412 if (value != mddev->fail_last_dev) 5413 mddev->fail_last_dev = value; 5414 5415 return len; 5416 } 5417 static struct md_sysfs_entry md_fail_last_dev = 5418 __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show, 5419 fail_last_dev_store); 5420 5421 static ssize_t serialize_policy_show(struct mddev *mddev, char *page) 5422 { 5423 if (mddev->pers == NULL || (mddev->pers->level != 1)) 5424 return sprintf(page, "n/a\n"); 5425 else 5426 return sprintf(page, "%d\n", mddev->serialize_policy); 5427 } 5428 5429 /* 5430 * Setting serialize_policy to true to enforce write IO is not reordered 5431 * for raid1. 5432 */ 5433 static ssize_t 5434 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len) 5435 { 5436 int err; 5437 bool value; 5438 5439 err = kstrtobool(buf, &value); 5440 if (err) 5441 return err; 5442 5443 if (value == mddev->serialize_policy) 5444 return len; 5445 5446 err = mddev_lock(mddev); 5447 if (err) 5448 return err; 5449 if (mddev->pers == NULL || (mddev->pers->level != 1)) { 5450 pr_err("md: serialize_policy is only effective for raid1\n"); 5451 err = -EINVAL; 5452 goto unlock; 5453 } 5454 5455 mddev_suspend(mddev); 5456 if (value) 5457 mddev_create_serial_pool(mddev, NULL, true); 5458 else 5459 mddev_destroy_serial_pool(mddev, NULL, true); 5460 mddev->serialize_policy = value; 5461 mddev_resume(mddev); 5462 unlock: 5463 mddev_unlock(mddev); 5464 return err ?: len; 5465 } 5466 5467 static struct md_sysfs_entry md_serialize_policy = 5468 __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show, 5469 serialize_policy_store); 5470 5471 5472 static struct attribute *md_default_attrs[] = { 5473 &md_level.attr, 5474 &md_layout.attr, 5475 &md_raid_disks.attr, 5476 &md_uuid.attr, 5477 &md_chunk_size.attr, 5478 &md_size.attr, 5479 &md_resync_start.attr, 5480 &md_metadata.attr, 5481 &md_new_device.attr, 5482 &md_safe_delay.attr, 5483 &md_array_state.attr, 5484 &md_reshape_position.attr, 5485 &md_reshape_direction.attr, 5486 &md_array_size.attr, 5487 &max_corr_read_errors.attr, 5488 &md_consistency_policy.attr, 5489 &md_fail_last_dev.attr, 5490 &md_serialize_policy.attr, 5491 NULL, 5492 }; 5493 5494 static const struct attribute_group md_default_group = { 5495 .attrs = md_default_attrs, 5496 }; 5497 5498 static struct attribute *md_redundancy_attrs[] = { 5499 &md_scan_mode.attr, 5500 &md_last_scan_mode.attr, 5501 &md_mismatches.attr, 5502 &md_sync_min.attr, 5503 &md_sync_max.attr, 5504 &md_sync_speed.attr, 5505 &md_sync_force_parallel.attr, 5506 &md_sync_completed.attr, 5507 &md_min_sync.attr, 5508 &md_max_sync.attr, 5509 &md_suspend_lo.attr, 5510 &md_suspend_hi.attr, 5511 &md_bitmap.attr, 5512 &md_degraded.attr, 5513 NULL, 5514 }; 5515 static const struct attribute_group md_redundancy_group = { 5516 .name = NULL, 5517 .attrs = md_redundancy_attrs, 5518 }; 5519 5520 static const struct attribute_group *md_attr_groups[] = { 5521 &md_default_group, 5522 &md_bitmap_group, 5523 NULL, 5524 }; 5525 5526 static ssize_t 5527 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 5528 { 5529 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 5530 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 5531 ssize_t rv; 5532 5533 if (!entry->show) 5534 return -EIO; 5535 spin_lock(&all_mddevs_lock); 5536 if (!mddev_get(mddev)) { 5537 spin_unlock(&all_mddevs_lock); 5538 return -EBUSY; 5539 } 5540 spin_unlock(&all_mddevs_lock); 5541 5542 rv = entry->show(mddev, page); 5543 mddev_put(mddev); 5544 return rv; 5545 } 5546 5547 static ssize_t 5548 md_attr_store(struct kobject *kobj, struct attribute *attr, 5549 const char *page, size_t length) 5550 { 5551 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 5552 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 5553 ssize_t rv; 5554 5555 if (!entry->store) 5556 return -EIO; 5557 if (!capable(CAP_SYS_ADMIN)) 5558 return -EACCES; 5559 spin_lock(&all_mddevs_lock); 5560 if (!mddev_get(mddev)) { 5561 spin_unlock(&all_mddevs_lock); 5562 return -EBUSY; 5563 } 5564 spin_unlock(&all_mddevs_lock); 5565 rv = entry->store(mddev, page, length); 5566 mddev_put(mddev); 5567 return rv; 5568 } 5569 5570 static void md_kobj_release(struct kobject *ko) 5571 { 5572 struct mddev *mddev = container_of(ko, struct mddev, kobj); 5573 5574 if (mddev->sysfs_state) 5575 sysfs_put(mddev->sysfs_state); 5576 if (mddev->sysfs_level) 5577 sysfs_put(mddev->sysfs_level); 5578 5579 del_gendisk(mddev->gendisk); 5580 put_disk(mddev->gendisk); 5581 } 5582 5583 static const struct sysfs_ops md_sysfs_ops = { 5584 .show = md_attr_show, 5585 .store = md_attr_store, 5586 }; 5587 static struct kobj_type md_ktype = { 5588 .release = md_kobj_release, 5589 .sysfs_ops = &md_sysfs_ops, 5590 .default_groups = md_attr_groups, 5591 }; 5592 5593 int mdp_major = 0; 5594 5595 static void mddev_delayed_delete(struct work_struct *ws) 5596 { 5597 struct mddev *mddev = container_of(ws, struct mddev, del_work); 5598 5599 kobject_put(&mddev->kobj); 5600 } 5601 5602 static void no_op(struct percpu_ref *r) {} 5603 5604 int mddev_init_writes_pending(struct mddev *mddev) 5605 { 5606 if (mddev->writes_pending.percpu_count_ptr) 5607 return 0; 5608 if (percpu_ref_init(&mddev->writes_pending, no_op, 5609 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0) 5610 return -ENOMEM; 5611 /* We want to start with the refcount at zero */ 5612 percpu_ref_put(&mddev->writes_pending); 5613 return 0; 5614 } 5615 EXPORT_SYMBOL_GPL(mddev_init_writes_pending); 5616 5617 int md_alloc(dev_t dev, char *name) 5618 { 5619 /* 5620 * If dev is zero, name is the name of a device to allocate with 5621 * an arbitrary minor number. It will be "md_???" 5622 * If dev is non-zero it must be a device number with a MAJOR of 5623 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then 5624 * the device is being created by opening a node in /dev. 5625 * If "name" is not NULL, the device is being created by 5626 * writing to /sys/module/md_mod/parameters/new_array. 5627 */ 5628 static DEFINE_MUTEX(disks_mutex); 5629 struct mddev *mddev; 5630 struct gendisk *disk; 5631 int partitioned; 5632 int shift; 5633 int unit; 5634 int error ; 5635 5636 /* 5637 * Wait for any previous instance of this device to be completely 5638 * removed (mddev_delayed_delete). 5639 */ 5640 flush_workqueue(md_misc_wq); 5641 5642 mutex_lock(&disks_mutex); 5643 mddev = mddev_alloc(dev); 5644 if (IS_ERR(mddev)) { 5645 error = PTR_ERR(mddev); 5646 goto out_unlock; 5647 } 5648 5649 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 5650 shift = partitioned ? MdpMinorShift : 0; 5651 unit = MINOR(mddev->unit) >> shift; 5652 5653 if (name && !dev) { 5654 /* Need to ensure that 'name' is not a duplicate. 5655 */ 5656 struct mddev *mddev2; 5657 spin_lock(&all_mddevs_lock); 5658 5659 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 5660 if (mddev2->gendisk && 5661 strcmp(mddev2->gendisk->disk_name, name) == 0) { 5662 spin_unlock(&all_mddevs_lock); 5663 error = -EEXIST; 5664 goto out_free_mddev; 5665 } 5666 spin_unlock(&all_mddevs_lock); 5667 } 5668 if (name && dev) 5669 /* 5670 * Creating /dev/mdNNN via "newarray", so adjust hold_active. 5671 */ 5672 mddev->hold_active = UNTIL_STOP; 5673 5674 error = -ENOMEM; 5675 disk = blk_alloc_disk(NUMA_NO_NODE); 5676 if (!disk) 5677 goto out_free_mddev; 5678 5679 disk->major = MAJOR(mddev->unit); 5680 disk->first_minor = unit << shift; 5681 disk->minors = 1 << shift; 5682 if (name) 5683 strcpy(disk->disk_name, name); 5684 else if (partitioned) 5685 sprintf(disk->disk_name, "md_d%d", unit); 5686 else 5687 sprintf(disk->disk_name, "md%d", unit); 5688 disk->fops = &md_fops; 5689 disk->private_data = mddev; 5690 5691 mddev->queue = disk->queue; 5692 blk_set_stacking_limits(&mddev->queue->limits); 5693 blk_queue_write_cache(mddev->queue, true, true); 5694 disk->events |= DISK_EVENT_MEDIA_CHANGE; 5695 mddev->gendisk = disk; 5696 error = add_disk(disk); 5697 if (error) 5698 goto out_put_disk; 5699 5700 kobject_init(&mddev->kobj, &md_ktype); 5701 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); 5702 if (error) { 5703 /* 5704 * The disk is already live at this point. Clear the hold flag 5705 * and let mddev_put take care of the deletion, as it isn't any 5706 * different from a normal close on last release now. 5707 */ 5708 mddev->hold_active = 0; 5709 goto done; 5710 } 5711 5712 kobject_uevent(&mddev->kobj, KOBJ_ADD); 5713 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 5714 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level"); 5715 5716 done: 5717 mutex_unlock(&disks_mutex); 5718 mddev_put(mddev); 5719 return error; 5720 5721 out_put_disk: 5722 put_disk(disk); 5723 out_free_mddev: 5724 mddev_free(mddev); 5725 out_unlock: 5726 mutex_unlock(&disks_mutex); 5727 return error; 5728 } 5729 5730 static void md_probe(dev_t dev) 5731 { 5732 if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512) 5733 return; 5734 if (create_on_open) 5735 md_alloc(dev, NULL); 5736 } 5737 5738 static int add_named_array(const char *val, const struct kernel_param *kp) 5739 { 5740 /* 5741 * val must be "md_*" or "mdNNN". 5742 * For "md_*" we allocate an array with a large free minor number, and 5743 * set the name to val. val must not already be an active name. 5744 * For "mdNNN" we allocate an array with the minor number NNN 5745 * which must not already be in use. 5746 */ 5747 int len = strlen(val); 5748 char buf[DISK_NAME_LEN]; 5749 unsigned long devnum; 5750 5751 while (len && val[len-1] == '\n') 5752 len--; 5753 if (len >= DISK_NAME_LEN) 5754 return -E2BIG; 5755 strscpy(buf, val, len+1); 5756 if (strncmp(buf, "md_", 3) == 0) 5757 return md_alloc(0, buf); 5758 if (strncmp(buf, "md", 2) == 0 && 5759 isdigit(buf[2]) && 5760 kstrtoul(buf+2, 10, &devnum) == 0 && 5761 devnum <= MINORMASK) 5762 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL); 5763 5764 return -EINVAL; 5765 } 5766 5767 static void md_safemode_timeout(struct timer_list *t) 5768 { 5769 struct mddev *mddev = from_timer(mddev, t, safemode_timer); 5770 5771 mddev->safemode = 1; 5772 if (mddev->external) 5773 sysfs_notify_dirent_safe(mddev->sysfs_state); 5774 5775 md_wakeup_thread(mddev->thread); 5776 } 5777 5778 static int start_dirty_degraded; 5779 5780 int md_run(struct mddev *mddev) 5781 { 5782 int err; 5783 struct md_rdev *rdev; 5784 struct md_personality *pers; 5785 bool nowait = true; 5786 5787 if (list_empty(&mddev->disks)) 5788 /* cannot run an array with no devices.. */ 5789 return -EINVAL; 5790 5791 if (mddev->pers) 5792 return -EBUSY; 5793 /* Cannot run until previous stop completes properly */ 5794 if (mddev->sysfs_active) 5795 return -EBUSY; 5796 5797 /* 5798 * Analyze all RAID superblock(s) 5799 */ 5800 if (!mddev->raid_disks) { 5801 if (!mddev->persistent) 5802 return -EINVAL; 5803 err = analyze_sbs(mddev); 5804 if (err) 5805 return -EINVAL; 5806 } 5807 5808 if (mddev->level != LEVEL_NONE) 5809 request_module("md-level-%d", mddev->level); 5810 else if (mddev->clevel[0]) 5811 request_module("md-%s", mddev->clevel); 5812 5813 /* 5814 * Drop all container device buffers, from now on 5815 * the only valid external interface is through the md 5816 * device. 5817 */ 5818 mddev->has_superblocks = false; 5819 rdev_for_each(rdev, mddev) { 5820 if (test_bit(Faulty, &rdev->flags)) 5821 continue; 5822 sync_blockdev(rdev->bdev); 5823 invalidate_bdev(rdev->bdev); 5824 if (mddev->ro != 1 && rdev_read_only(rdev)) { 5825 mddev->ro = 1; 5826 if (mddev->gendisk) 5827 set_disk_ro(mddev->gendisk, 1); 5828 } 5829 5830 if (rdev->sb_page) 5831 mddev->has_superblocks = true; 5832 5833 /* perform some consistency tests on the device. 5834 * We don't want the data to overlap the metadata, 5835 * Internal Bitmap issues have been handled elsewhere. 5836 */ 5837 if (rdev->meta_bdev) { 5838 /* Nothing to check */; 5839 } else if (rdev->data_offset < rdev->sb_start) { 5840 if (mddev->dev_sectors && 5841 rdev->data_offset + mddev->dev_sectors 5842 > rdev->sb_start) { 5843 pr_warn("md: %s: data overlaps metadata\n", 5844 mdname(mddev)); 5845 return -EINVAL; 5846 } 5847 } else { 5848 if (rdev->sb_start + rdev->sb_size/512 5849 > rdev->data_offset) { 5850 pr_warn("md: %s: metadata overlaps data\n", 5851 mdname(mddev)); 5852 return -EINVAL; 5853 } 5854 } 5855 sysfs_notify_dirent_safe(rdev->sysfs_state); 5856 nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev)); 5857 } 5858 5859 if (!bioset_initialized(&mddev->bio_set)) { 5860 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 5861 if (err) 5862 return err; 5863 } 5864 if (!bioset_initialized(&mddev->sync_set)) { 5865 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 5866 if (err) 5867 goto exit_bio_set; 5868 } 5869 5870 spin_lock(&pers_lock); 5871 pers = find_pers(mddev->level, mddev->clevel); 5872 if (!pers || !try_module_get(pers->owner)) { 5873 spin_unlock(&pers_lock); 5874 if (mddev->level != LEVEL_NONE) 5875 pr_warn("md: personality for level %d is not loaded!\n", 5876 mddev->level); 5877 else 5878 pr_warn("md: personality for level %s is not loaded!\n", 5879 mddev->clevel); 5880 err = -EINVAL; 5881 goto abort; 5882 } 5883 spin_unlock(&pers_lock); 5884 if (mddev->level != pers->level) { 5885 mddev->level = pers->level; 5886 mddev->new_level = pers->level; 5887 } 5888 strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 5889 5890 if (mddev->reshape_position != MaxSector && 5891 pers->start_reshape == NULL) { 5892 /* This personality cannot handle reshaping... */ 5893 module_put(pers->owner); 5894 err = -EINVAL; 5895 goto abort; 5896 } 5897 5898 if (pers->sync_request) { 5899 /* Warn if this is a potentially silly 5900 * configuration. 5901 */ 5902 struct md_rdev *rdev2; 5903 int warned = 0; 5904 5905 rdev_for_each(rdev, mddev) 5906 rdev_for_each(rdev2, mddev) { 5907 if (rdev < rdev2 && 5908 rdev->bdev->bd_disk == 5909 rdev2->bdev->bd_disk) { 5910 pr_warn("%s: WARNING: %pg appears to be on the same physical disk as %pg.\n", 5911 mdname(mddev), 5912 rdev->bdev, 5913 rdev2->bdev); 5914 warned = 1; 5915 } 5916 } 5917 5918 if (warned) 5919 pr_warn("True protection against single-disk failure might be compromised.\n"); 5920 } 5921 5922 mddev->recovery = 0; 5923 /* may be over-ridden by personality */ 5924 mddev->resync_max_sectors = mddev->dev_sectors; 5925 5926 mddev->ok_start_degraded = start_dirty_degraded; 5927 5928 if (start_readonly && mddev->ro == 0) 5929 mddev->ro = 2; /* read-only, but switch on first write */ 5930 5931 err = pers->run(mddev); 5932 if (err) 5933 pr_warn("md: pers->run() failed ...\n"); 5934 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { 5935 WARN_ONCE(!mddev->external_size, 5936 "%s: default size too small, but 'external_size' not in effect?\n", 5937 __func__); 5938 pr_warn("md: invalid array_size %llu > default size %llu\n", 5939 (unsigned long long)mddev->array_sectors / 2, 5940 (unsigned long long)pers->size(mddev, 0, 0) / 2); 5941 err = -EINVAL; 5942 } 5943 if (err == 0 && pers->sync_request && 5944 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { 5945 struct bitmap *bitmap; 5946 5947 bitmap = md_bitmap_create(mddev, -1); 5948 if (IS_ERR(bitmap)) { 5949 err = PTR_ERR(bitmap); 5950 pr_warn("%s: failed to create bitmap (%d)\n", 5951 mdname(mddev), err); 5952 } else 5953 mddev->bitmap = bitmap; 5954 5955 } 5956 if (err) 5957 goto bitmap_abort; 5958 5959 if (mddev->bitmap_info.max_write_behind > 0) { 5960 bool create_pool = false; 5961 5962 rdev_for_each(rdev, mddev) { 5963 if (test_bit(WriteMostly, &rdev->flags) && 5964 rdev_init_serial(rdev)) 5965 create_pool = true; 5966 } 5967 if (create_pool && mddev->serial_info_pool == NULL) { 5968 mddev->serial_info_pool = 5969 mempool_create_kmalloc_pool(NR_SERIAL_INFOS, 5970 sizeof(struct serial_info)); 5971 if (!mddev->serial_info_pool) { 5972 err = -ENOMEM; 5973 goto bitmap_abort; 5974 } 5975 } 5976 } 5977 5978 if (mddev->queue) { 5979 bool nonrot = true; 5980 5981 rdev_for_each(rdev, mddev) { 5982 if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) { 5983 nonrot = false; 5984 break; 5985 } 5986 } 5987 if (mddev->degraded) 5988 nonrot = false; 5989 if (nonrot) 5990 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); 5991 else 5992 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); 5993 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue); 5994 5995 /* Set the NOWAIT flags if all underlying devices support it */ 5996 if (nowait) 5997 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue); 5998 } 5999 if (pers->sync_request) { 6000 if (mddev->kobj.sd && 6001 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 6002 pr_warn("md: cannot register extra attributes for %s\n", 6003 mdname(mddev)); 6004 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 6005 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); 6006 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); 6007 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 6008 mddev->ro = 0; 6009 6010 atomic_set(&mddev->max_corr_read_errors, 6011 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 6012 mddev->safemode = 0; 6013 if (mddev_is_clustered(mddev)) 6014 mddev->safemode_delay = 0; 6015 else 6016 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; 6017 mddev->in_sync = 1; 6018 smp_wmb(); 6019 spin_lock(&mddev->lock); 6020 mddev->pers = pers; 6021 spin_unlock(&mddev->lock); 6022 rdev_for_each(rdev, mddev) 6023 if (rdev->raid_disk >= 0) 6024 sysfs_link_rdev(mddev, rdev); /* failure here is OK */ 6025 6026 if (mddev->degraded && !mddev->ro) 6027 /* This ensures that recovering status is reported immediately 6028 * via sysfs - until a lack of spares is confirmed. 6029 */ 6030 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6031 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6032 6033 if (mddev->sb_flags) 6034 md_update_sb(mddev, 0); 6035 6036 md_new_event(); 6037 return 0; 6038 6039 bitmap_abort: 6040 mddev_detach(mddev); 6041 if (mddev->private) 6042 pers->free(mddev, mddev->private); 6043 mddev->private = NULL; 6044 module_put(pers->owner); 6045 md_bitmap_destroy(mddev); 6046 abort: 6047 bioset_exit(&mddev->sync_set); 6048 exit_bio_set: 6049 bioset_exit(&mddev->bio_set); 6050 return err; 6051 } 6052 EXPORT_SYMBOL_GPL(md_run); 6053 6054 int do_md_run(struct mddev *mddev) 6055 { 6056 int err; 6057 6058 set_bit(MD_NOT_READY, &mddev->flags); 6059 err = md_run(mddev); 6060 if (err) 6061 goto out; 6062 err = md_bitmap_load(mddev); 6063 if (err) { 6064 md_bitmap_destroy(mddev); 6065 goto out; 6066 } 6067 6068 if (mddev_is_clustered(mddev)) 6069 md_allow_write(mddev); 6070 6071 /* run start up tasks that require md_thread */ 6072 md_start(mddev); 6073 6074 md_wakeup_thread(mddev->thread); 6075 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 6076 6077 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors); 6078 clear_bit(MD_NOT_READY, &mddev->flags); 6079 mddev->changed = 1; 6080 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 6081 sysfs_notify_dirent_safe(mddev->sysfs_state); 6082 sysfs_notify_dirent_safe(mddev->sysfs_action); 6083 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 6084 out: 6085 clear_bit(MD_NOT_READY, &mddev->flags); 6086 return err; 6087 } 6088 6089 int md_start(struct mddev *mddev) 6090 { 6091 int ret = 0; 6092 6093 if (mddev->pers->start) { 6094 set_bit(MD_RECOVERY_WAIT, &mddev->recovery); 6095 md_wakeup_thread(mddev->thread); 6096 ret = mddev->pers->start(mddev); 6097 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); 6098 md_wakeup_thread(mddev->sync_thread); 6099 } 6100 return ret; 6101 } 6102 EXPORT_SYMBOL_GPL(md_start); 6103 6104 static int restart_array(struct mddev *mddev) 6105 { 6106 struct gendisk *disk = mddev->gendisk; 6107 struct md_rdev *rdev; 6108 bool has_journal = false; 6109 bool has_readonly = false; 6110 6111 /* Complain if it has no devices */ 6112 if (list_empty(&mddev->disks)) 6113 return -ENXIO; 6114 if (!mddev->pers) 6115 return -EINVAL; 6116 if (!mddev->ro) 6117 return -EBUSY; 6118 6119 rcu_read_lock(); 6120 rdev_for_each_rcu(rdev, mddev) { 6121 if (test_bit(Journal, &rdev->flags) && 6122 !test_bit(Faulty, &rdev->flags)) 6123 has_journal = true; 6124 if (rdev_read_only(rdev)) 6125 has_readonly = true; 6126 } 6127 rcu_read_unlock(); 6128 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal) 6129 /* Don't restart rw with journal missing/faulty */ 6130 return -EINVAL; 6131 if (has_readonly) 6132 return -EROFS; 6133 6134 mddev->safemode = 0; 6135 mddev->ro = 0; 6136 set_disk_ro(disk, 0); 6137 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); 6138 /* Kick recovery or resync if necessary */ 6139 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6140 md_wakeup_thread(mddev->thread); 6141 md_wakeup_thread(mddev->sync_thread); 6142 sysfs_notify_dirent_safe(mddev->sysfs_state); 6143 return 0; 6144 } 6145 6146 static void md_clean(struct mddev *mddev) 6147 { 6148 mddev->array_sectors = 0; 6149 mddev->external_size = 0; 6150 mddev->dev_sectors = 0; 6151 mddev->raid_disks = 0; 6152 mddev->recovery_cp = 0; 6153 mddev->resync_min = 0; 6154 mddev->resync_max = MaxSector; 6155 mddev->reshape_position = MaxSector; 6156 mddev->external = 0; 6157 mddev->persistent = 0; 6158 mddev->level = LEVEL_NONE; 6159 mddev->clevel[0] = 0; 6160 mddev->flags = 0; 6161 mddev->sb_flags = 0; 6162 mddev->ro = 0; 6163 mddev->metadata_type[0] = 0; 6164 mddev->chunk_sectors = 0; 6165 mddev->ctime = mddev->utime = 0; 6166 mddev->layout = 0; 6167 mddev->max_disks = 0; 6168 mddev->events = 0; 6169 mddev->can_decrease_events = 0; 6170 mddev->delta_disks = 0; 6171 mddev->reshape_backwards = 0; 6172 mddev->new_level = LEVEL_NONE; 6173 mddev->new_layout = 0; 6174 mddev->new_chunk_sectors = 0; 6175 mddev->curr_resync = 0; 6176 atomic64_set(&mddev->resync_mismatches, 0); 6177 mddev->suspend_lo = mddev->suspend_hi = 0; 6178 mddev->sync_speed_min = mddev->sync_speed_max = 0; 6179 mddev->recovery = 0; 6180 mddev->in_sync = 0; 6181 mddev->changed = 0; 6182 mddev->degraded = 0; 6183 mddev->safemode = 0; 6184 mddev->private = NULL; 6185 mddev->cluster_info = NULL; 6186 mddev->bitmap_info.offset = 0; 6187 mddev->bitmap_info.default_offset = 0; 6188 mddev->bitmap_info.default_space = 0; 6189 mddev->bitmap_info.chunksize = 0; 6190 mddev->bitmap_info.daemon_sleep = 0; 6191 mddev->bitmap_info.max_write_behind = 0; 6192 mddev->bitmap_info.nodes = 0; 6193 } 6194 6195 static void __md_stop_writes(struct mddev *mddev) 6196 { 6197 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6198 if (work_pending(&mddev->del_work)) 6199 flush_workqueue(md_misc_wq); 6200 if (mddev->sync_thread) { 6201 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6202 md_unregister_thread(&mddev->sync_thread); 6203 md_reap_sync_thread(mddev); 6204 } 6205 6206 del_timer_sync(&mddev->safemode_timer); 6207 6208 if (mddev->pers && mddev->pers->quiesce) { 6209 mddev->pers->quiesce(mddev, 1); 6210 mddev->pers->quiesce(mddev, 0); 6211 } 6212 md_bitmap_flush(mddev); 6213 6214 if (mddev->ro == 0 && 6215 ((!mddev->in_sync && !mddev_is_clustered(mddev)) || 6216 mddev->sb_flags)) { 6217 /* mark array as shutdown cleanly */ 6218 if (!mddev_is_clustered(mddev)) 6219 mddev->in_sync = 1; 6220 md_update_sb(mddev, 1); 6221 } 6222 /* disable policy to guarantee rdevs free resources for serialization */ 6223 mddev->serialize_policy = 0; 6224 mddev_destroy_serial_pool(mddev, NULL, true); 6225 } 6226 6227 void md_stop_writes(struct mddev *mddev) 6228 { 6229 mddev_lock_nointr(mddev); 6230 __md_stop_writes(mddev); 6231 mddev_unlock(mddev); 6232 } 6233 EXPORT_SYMBOL_GPL(md_stop_writes); 6234 6235 static void mddev_detach(struct mddev *mddev) 6236 { 6237 md_bitmap_wait_behind_writes(mddev); 6238 if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) { 6239 mddev->pers->quiesce(mddev, 1); 6240 mddev->pers->quiesce(mddev, 0); 6241 } 6242 md_unregister_thread(&mddev->thread); 6243 if (mddev->queue) 6244 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 6245 } 6246 6247 static void __md_stop(struct mddev *mddev) 6248 { 6249 struct md_personality *pers = mddev->pers; 6250 md_bitmap_destroy(mddev); 6251 mddev_detach(mddev); 6252 /* Ensure ->event_work is done */ 6253 if (mddev->event_work.func) 6254 flush_workqueue(md_misc_wq); 6255 spin_lock(&mddev->lock); 6256 mddev->pers = NULL; 6257 spin_unlock(&mddev->lock); 6258 if (mddev->private) 6259 pers->free(mddev, mddev->private); 6260 mddev->private = NULL; 6261 if (pers->sync_request && mddev->to_remove == NULL) 6262 mddev->to_remove = &md_redundancy_group; 6263 module_put(pers->owner); 6264 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6265 } 6266 6267 void md_stop(struct mddev *mddev) 6268 { 6269 /* stop the array and free an attached data structures. 6270 * This is called from dm-raid 6271 */ 6272 __md_stop(mddev); 6273 bioset_exit(&mddev->bio_set); 6274 bioset_exit(&mddev->sync_set); 6275 } 6276 6277 EXPORT_SYMBOL_GPL(md_stop); 6278 6279 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) 6280 { 6281 int err = 0; 6282 int did_freeze = 0; 6283 6284 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 6285 did_freeze = 1; 6286 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6287 md_wakeup_thread(mddev->thread); 6288 } 6289 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 6290 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6291 if (mddev->sync_thread) 6292 /* Thread might be blocked waiting for metadata update 6293 * which will now never happen */ 6294 wake_up_process(mddev->sync_thread->tsk); 6295 6296 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 6297 return -EBUSY; 6298 mddev_unlock(mddev); 6299 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, 6300 &mddev->recovery)); 6301 wait_event(mddev->sb_wait, 6302 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 6303 mddev_lock_nointr(mddev); 6304 6305 mutex_lock(&mddev->open_mutex); 6306 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 6307 mddev->sync_thread || 6308 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 6309 pr_warn("md: %s still in use.\n",mdname(mddev)); 6310 if (did_freeze) { 6311 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6312 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6313 md_wakeup_thread(mddev->thread); 6314 } 6315 err = -EBUSY; 6316 goto out; 6317 } 6318 if (mddev->pers) { 6319 __md_stop_writes(mddev); 6320 6321 err = -ENXIO; 6322 if (mddev->ro==1) 6323 goto out; 6324 mddev->ro = 1; 6325 set_disk_ro(mddev->gendisk, 1); 6326 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6327 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6328 md_wakeup_thread(mddev->thread); 6329 sysfs_notify_dirent_safe(mddev->sysfs_state); 6330 err = 0; 6331 } 6332 out: 6333 mutex_unlock(&mddev->open_mutex); 6334 return err; 6335 } 6336 6337 /* mode: 6338 * 0 - completely stop and dis-assemble array 6339 * 2 - stop but do not disassemble array 6340 */ 6341 static int do_md_stop(struct mddev *mddev, int mode, 6342 struct block_device *bdev) 6343 { 6344 struct gendisk *disk = mddev->gendisk; 6345 struct md_rdev *rdev; 6346 int did_freeze = 0; 6347 6348 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 6349 did_freeze = 1; 6350 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6351 md_wakeup_thread(mddev->thread); 6352 } 6353 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 6354 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6355 if (mddev->sync_thread) 6356 /* Thread might be blocked waiting for metadata update 6357 * which will now never happen */ 6358 wake_up_process(mddev->sync_thread->tsk); 6359 6360 mddev_unlock(mddev); 6361 wait_event(resync_wait, (mddev->sync_thread == NULL && 6362 !test_bit(MD_RECOVERY_RUNNING, 6363 &mddev->recovery))); 6364 mddev_lock_nointr(mddev); 6365 6366 mutex_lock(&mddev->open_mutex); 6367 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 6368 mddev->sysfs_active || 6369 mddev->sync_thread || 6370 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 6371 pr_warn("md: %s still in use.\n",mdname(mddev)); 6372 mutex_unlock(&mddev->open_mutex); 6373 if (did_freeze) { 6374 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6375 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6376 md_wakeup_thread(mddev->thread); 6377 } 6378 return -EBUSY; 6379 } 6380 if (mddev->pers) { 6381 if (mddev->ro) 6382 set_disk_ro(disk, 0); 6383 6384 __md_stop_writes(mddev); 6385 __md_stop(mddev); 6386 6387 /* tell userspace to handle 'inactive' */ 6388 sysfs_notify_dirent_safe(mddev->sysfs_state); 6389 6390 rdev_for_each(rdev, mddev) 6391 if (rdev->raid_disk >= 0) 6392 sysfs_unlink_rdev(mddev, rdev); 6393 6394 set_capacity_and_notify(disk, 0); 6395 mutex_unlock(&mddev->open_mutex); 6396 mddev->changed = 1; 6397 6398 if (mddev->ro) 6399 mddev->ro = 0; 6400 } else 6401 mutex_unlock(&mddev->open_mutex); 6402 /* 6403 * Free resources if final stop 6404 */ 6405 if (mode == 0) { 6406 pr_info("md: %s stopped.\n", mdname(mddev)); 6407 6408 if (mddev->bitmap_info.file) { 6409 struct file *f = mddev->bitmap_info.file; 6410 spin_lock(&mddev->lock); 6411 mddev->bitmap_info.file = NULL; 6412 spin_unlock(&mddev->lock); 6413 fput(f); 6414 } 6415 mddev->bitmap_info.offset = 0; 6416 6417 export_array(mddev); 6418 6419 md_clean(mddev); 6420 if (mddev->hold_active == UNTIL_STOP) 6421 mddev->hold_active = 0; 6422 } 6423 md_new_event(); 6424 sysfs_notify_dirent_safe(mddev->sysfs_state); 6425 return 0; 6426 } 6427 6428 #ifndef MODULE 6429 static void autorun_array(struct mddev *mddev) 6430 { 6431 struct md_rdev *rdev; 6432 int err; 6433 6434 if (list_empty(&mddev->disks)) 6435 return; 6436 6437 pr_info("md: running: "); 6438 6439 rdev_for_each(rdev, mddev) { 6440 pr_cont("<%pg>", rdev->bdev); 6441 } 6442 pr_cont("\n"); 6443 6444 err = do_md_run(mddev); 6445 if (err) { 6446 pr_warn("md: do_md_run() returned %d\n", err); 6447 do_md_stop(mddev, 0, NULL); 6448 } 6449 } 6450 6451 /* 6452 * lets try to run arrays based on all disks that have arrived 6453 * until now. (those are in pending_raid_disks) 6454 * 6455 * the method: pick the first pending disk, collect all disks with 6456 * the same UUID, remove all from the pending list and put them into 6457 * the 'same_array' list. Then order this list based on superblock 6458 * update time (freshest comes first), kick out 'old' disks and 6459 * compare superblocks. If everything's fine then run it. 6460 * 6461 * If "unit" is allocated, then bump its reference count 6462 */ 6463 static void autorun_devices(int part) 6464 { 6465 struct md_rdev *rdev0, *rdev, *tmp; 6466 struct mddev *mddev; 6467 6468 pr_info("md: autorun ...\n"); 6469 while (!list_empty(&pending_raid_disks)) { 6470 int unit; 6471 dev_t dev; 6472 LIST_HEAD(candidates); 6473 rdev0 = list_entry(pending_raid_disks.next, 6474 struct md_rdev, same_set); 6475 6476 pr_debug("md: considering %pg ...\n", rdev0->bdev); 6477 INIT_LIST_HEAD(&candidates); 6478 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 6479 if (super_90_load(rdev, rdev0, 0) >= 0) { 6480 pr_debug("md: adding %pg ...\n", 6481 rdev->bdev); 6482 list_move(&rdev->same_set, &candidates); 6483 } 6484 /* 6485 * now we have a set of devices, with all of them having 6486 * mostly sane superblocks. It's time to allocate the 6487 * mddev. 6488 */ 6489 if (part) { 6490 dev = MKDEV(mdp_major, 6491 rdev0->preferred_minor << MdpMinorShift); 6492 unit = MINOR(dev) >> MdpMinorShift; 6493 } else { 6494 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 6495 unit = MINOR(dev); 6496 } 6497 if (rdev0->preferred_minor != unit) { 6498 pr_warn("md: unit number in %pg is bad: %d\n", 6499 rdev0->bdev, rdev0->preferred_minor); 6500 break; 6501 } 6502 6503 md_probe(dev); 6504 mddev = mddev_find(dev); 6505 if (!mddev) 6506 break; 6507 6508 if (mddev_lock(mddev)) 6509 pr_warn("md: %s locked, cannot run\n", mdname(mddev)); 6510 else if (mddev->raid_disks || mddev->major_version 6511 || !list_empty(&mddev->disks)) { 6512 pr_warn("md: %s already running, cannot run %pg\n", 6513 mdname(mddev), rdev0->bdev); 6514 mddev_unlock(mddev); 6515 } else { 6516 pr_debug("md: created %s\n", mdname(mddev)); 6517 mddev->persistent = 1; 6518 rdev_for_each_list(rdev, tmp, &candidates) { 6519 list_del_init(&rdev->same_set); 6520 if (bind_rdev_to_array(rdev, mddev)) 6521 export_rdev(rdev); 6522 } 6523 autorun_array(mddev); 6524 mddev_unlock(mddev); 6525 } 6526 /* on success, candidates will be empty, on error 6527 * it won't... 6528 */ 6529 rdev_for_each_list(rdev, tmp, &candidates) { 6530 list_del_init(&rdev->same_set); 6531 export_rdev(rdev); 6532 } 6533 mddev_put(mddev); 6534 } 6535 pr_info("md: ... autorun DONE.\n"); 6536 } 6537 #endif /* !MODULE */ 6538 6539 static int get_version(void __user *arg) 6540 { 6541 mdu_version_t ver; 6542 6543 ver.major = MD_MAJOR_VERSION; 6544 ver.minor = MD_MINOR_VERSION; 6545 ver.patchlevel = MD_PATCHLEVEL_VERSION; 6546 6547 if (copy_to_user(arg, &ver, sizeof(ver))) 6548 return -EFAULT; 6549 6550 return 0; 6551 } 6552 6553 static int get_array_info(struct mddev *mddev, void __user *arg) 6554 { 6555 mdu_array_info_t info; 6556 int nr,working,insync,failed,spare; 6557 struct md_rdev *rdev; 6558 6559 nr = working = insync = failed = spare = 0; 6560 rcu_read_lock(); 6561 rdev_for_each_rcu(rdev, mddev) { 6562 nr++; 6563 if (test_bit(Faulty, &rdev->flags)) 6564 failed++; 6565 else { 6566 working++; 6567 if (test_bit(In_sync, &rdev->flags)) 6568 insync++; 6569 else if (test_bit(Journal, &rdev->flags)) 6570 /* TODO: add journal count to md_u.h */ 6571 ; 6572 else 6573 spare++; 6574 } 6575 } 6576 rcu_read_unlock(); 6577 6578 info.major_version = mddev->major_version; 6579 info.minor_version = mddev->minor_version; 6580 info.patch_version = MD_PATCHLEVEL_VERSION; 6581 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); 6582 info.level = mddev->level; 6583 info.size = mddev->dev_sectors / 2; 6584 if (info.size != mddev->dev_sectors / 2) /* overflow */ 6585 info.size = -1; 6586 info.nr_disks = nr; 6587 info.raid_disks = mddev->raid_disks; 6588 info.md_minor = mddev->md_minor; 6589 info.not_persistent= !mddev->persistent; 6590 6591 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); 6592 info.state = 0; 6593 if (mddev->in_sync) 6594 info.state = (1<<MD_SB_CLEAN); 6595 if (mddev->bitmap && mddev->bitmap_info.offset) 6596 info.state |= (1<<MD_SB_BITMAP_PRESENT); 6597 if (mddev_is_clustered(mddev)) 6598 info.state |= (1<<MD_SB_CLUSTERED); 6599 info.active_disks = insync; 6600 info.working_disks = working; 6601 info.failed_disks = failed; 6602 info.spare_disks = spare; 6603 6604 info.layout = mddev->layout; 6605 info.chunk_size = mddev->chunk_sectors << 9; 6606 6607 if (copy_to_user(arg, &info, sizeof(info))) 6608 return -EFAULT; 6609 6610 return 0; 6611 } 6612 6613 static int get_bitmap_file(struct mddev *mddev, void __user * arg) 6614 { 6615 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 6616 char *ptr; 6617 int err; 6618 6619 file = kzalloc(sizeof(*file), GFP_NOIO); 6620 if (!file) 6621 return -ENOMEM; 6622 6623 err = 0; 6624 spin_lock(&mddev->lock); 6625 /* bitmap enabled */ 6626 if (mddev->bitmap_info.file) { 6627 ptr = file_path(mddev->bitmap_info.file, file->pathname, 6628 sizeof(file->pathname)); 6629 if (IS_ERR(ptr)) 6630 err = PTR_ERR(ptr); 6631 else 6632 memmove(file->pathname, ptr, 6633 sizeof(file->pathname)-(ptr-file->pathname)); 6634 } 6635 spin_unlock(&mddev->lock); 6636 6637 if (err == 0 && 6638 copy_to_user(arg, file, sizeof(*file))) 6639 err = -EFAULT; 6640 6641 kfree(file); 6642 return err; 6643 } 6644 6645 static int get_disk_info(struct mddev *mddev, void __user * arg) 6646 { 6647 mdu_disk_info_t info; 6648 struct md_rdev *rdev; 6649 6650 if (copy_from_user(&info, arg, sizeof(info))) 6651 return -EFAULT; 6652 6653 rcu_read_lock(); 6654 rdev = md_find_rdev_nr_rcu(mddev, info.number); 6655 if (rdev) { 6656 info.major = MAJOR(rdev->bdev->bd_dev); 6657 info.minor = MINOR(rdev->bdev->bd_dev); 6658 info.raid_disk = rdev->raid_disk; 6659 info.state = 0; 6660 if (test_bit(Faulty, &rdev->flags)) 6661 info.state |= (1<<MD_DISK_FAULTY); 6662 else if (test_bit(In_sync, &rdev->flags)) { 6663 info.state |= (1<<MD_DISK_ACTIVE); 6664 info.state |= (1<<MD_DISK_SYNC); 6665 } 6666 if (test_bit(Journal, &rdev->flags)) 6667 info.state |= (1<<MD_DISK_JOURNAL); 6668 if (test_bit(WriteMostly, &rdev->flags)) 6669 info.state |= (1<<MD_DISK_WRITEMOSTLY); 6670 if (test_bit(FailFast, &rdev->flags)) 6671 info.state |= (1<<MD_DISK_FAILFAST); 6672 } else { 6673 info.major = info.minor = 0; 6674 info.raid_disk = -1; 6675 info.state = (1<<MD_DISK_REMOVED); 6676 } 6677 rcu_read_unlock(); 6678 6679 if (copy_to_user(arg, &info, sizeof(info))) 6680 return -EFAULT; 6681 6682 return 0; 6683 } 6684 6685 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) 6686 { 6687 struct md_rdev *rdev; 6688 dev_t dev = MKDEV(info->major,info->minor); 6689 6690 if (mddev_is_clustered(mddev) && 6691 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) { 6692 pr_warn("%s: Cannot add to clustered mddev.\n", 6693 mdname(mddev)); 6694 return -EINVAL; 6695 } 6696 6697 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 6698 return -EOVERFLOW; 6699 6700 if (!mddev->raid_disks) { 6701 int err; 6702 /* expecting a device which has a superblock */ 6703 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 6704 if (IS_ERR(rdev)) { 6705 pr_warn("md: md_import_device returned %ld\n", 6706 PTR_ERR(rdev)); 6707 return PTR_ERR(rdev); 6708 } 6709 if (!list_empty(&mddev->disks)) { 6710 struct md_rdev *rdev0 6711 = list_entry(mddev->disks.next, 6712 struct md_rdev, same_set); 6713 err = super_types[mddev->major_version] 6714 .load_super(rdev, rdev0, mddev->minor_version); 6715 if (err < 0) { 6716 pr_warn("md: %pg has different UUID to %pg\n", 6717 rdev->bdev, 6718 rdev0->bdev); 6719 export_rdev(rdev); 6720 return -EINVAL; 6721 } 6722 } 6723 err = bind_rdev_to_array(rdev, mddev); 6724 if (err) 6725 export_rdev(rdev); 6726 return err; 6727 } 6728 6729 /* 6730 * md_add_new_disk can be used once the array is assembled 6731 * to add "hot spares". They must already have a superblock 6732 * written 6733 */ 6734 if (mddev->pers) { 6735 int err; 6736 if (!mddev->pers->hot_add_disk) { 6737 pr_warn("%s: personality does not support diskops!\n", 6738 mdname(mddev)); 6739 return -EINVAL; 6740 } 6741 if (mddev->persistent) 6742 rdev = md_import_device(dev, mddev->major_version, 6743 mddev->minor_version); 6744 else 6745 rdev = md_import_device(dev, -1, -1); 6746 if (IS_ERR(rdev)) { 6747 pr_warn("md: md_import_device returned %ld\n", 6748 PTR_ERR(rdev)); 6749 return PTR_ERR(rdev); 6750 } 6751 /* set saved_raid_disk if appropriate */ 6752 if (!mddev->persistent) { 6753 if (info->state & (1<<MD_DISK_SYNC) && 6754 info->raid_disk < mddev->raid_disks) { 6755 rdev->raid_disk = info->raid_disk; 6756 set_bit(In_sync, &rdev->flags); 6757 clear_bit(Bitmap_sync, &rdev->flags); 6758 } else 6759 rdev->raid_disk = -1; 6760 rdev->saved_raid_disk = rdev->raid_disk; 6761 } else 6762 super_types[mddev->major_version]. 6763 validate_super(mddev, rdev); 6764 if ((info->state & (1<<MD_DISK_SYNC)) && 6765 rdev->raid_disk != info->raid_disk) { 6766 /* This was a hot-add request, but events doesn't 6767 * match, so reject it. 6768 */ 6769 export_rdev(rdev); 6770 return -EINVAL; 6771 } 6772 6773 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 6774 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 6775 set_bit(WriteMostly, &rdev->flags); 6776 else 6777 clear_bit(WriteMostly, &rdev->flags); 6778 if (info->state & (1<<MD_DISK_FAILFAST)) 6779 set_bit(FailFast, &rdev->flags); 6780 else 6781 clear_bit(FailFast, &rdev->flags); 6782 6783 if (info->state & (1<<MD_DISK_JOURNAL)) { 6784 struct md_rdev *rdev2; 6785 bool has_journal = false; 6786 6787 /* make sure no existing journal disk */ 6788 rdev_for_each(rdev2, mddev) { 6789 if (test_bit(Journal, &rdev2->flags)) { 6790 has_journal = true; 6791 break; 6792 } 6793 } 6794 if (has_journal || mddev->bitmap) { 6795 export_rdev(rdev); 6796 return -EBUSY; 6797 } 6798 set_bit(Journal, &rdev->flags); 6799 } 6800 /* 6801 * check whether the device shows up in other nodes 6802 */ 6803 if (mddev_is_clustered(mddev)) { 6804 if (info->state & (1 << MD_DISK_CANDIDATE)) 6805 set_bit(Candidate, &rdev->flags); 6806 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) { 6807 /* --add initiated by this node */ 6808 err = md_cluster_ops->add_new_disk(mddev, rdev); 6809 if (err) { 6810 export_rdev(rdev); 6811 return err; 6812 } 6813 } 6814 } 6815 6816 rdev->raid_disk = -1; 6817 err = bind_rdev_to_array(rdev, mddev); 6818 6819 if (err) 6820 export_rdev(rdev); 6821 6822 if (mddev_is_clustered(mddev)) { 6823 if (info->state & (1 << MD_DISK_CANDIDATE)) { 6824 if (!err) { 6825 err = md_cluster_ops->new_disk_ack(mddev, 6826 err == 0); 6827 if (err) 6828 md_kick_rdev_from_array(rdev); 6829 } 6830 } else { 6831 if (err) 6832 md_cluster_ops->add_new_disk_cancel(mddev); 6833 else 6834 err = add_bound_rdev(rdev); 6835 } 6836 6837 } else if (!err) 6838 err = add_bound_rdev(rdev); 6839 6840 return err; 6841 } 6842 6843 /* otherwise, md_add_new_disk is only allowed 6844 * for major_version==0 superblocks 6845 */ 6846 if (mddev->major_version != 0) { 6847 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev)); 6848 return -EINVAL; 6849 } 6850 6851 if (!(info->state & (1<<MD_DISK_FAULTY))) { 6852 int err; 6853 rdev = md_import_device(dev, -1, 0); 6854 if (IS_ERR(rdev)) { 6855 pr_warn("md: error, md_import_device() returned %ld\n", 6856 PTR_ERR(rdev)); 6857 return PTR_ERR(rdev); 6858 } 6859 rdev->desc_nr = info->number; 6860 if (info->raid_disk < mddev->raid_disks) 6861 rdev->raid_disk = info->raid_disk; 6862 else 6863 rdev->raid_disk = -1; 6864 6865 if (rdev->raid_disk < mddev->raid_disks) 6866 if (info->state & (1<<MD_DISK_SYNC)) 6867 set_bit(In_sync, &rdev->flags); 6868 6869 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 6870 set_bit(WriteMostly, &rdev->flags); 6871 if (info->state & (1<<MD_DISK_FAILFAST)) 6872 set_bit(FailFast, &rdev->flags); 6873 6874 if (!mddev->persistent) { 6875 pr_debug("md: nonpersistent superblock ...\n"); 6876 rdev->sb_start = bdev_nr_sectors(rdev->bdev); 6877 } else 6878 rdev->sb_start = calc_dev_sboffset(rdev); 6879 rdev->sectors = rdev->sb_start; 6880 6881 err = bind_rdev_to_array(rdev, mddev); 6882 if (err) { 6883 export_rdev(rdev); 6884 return err; 6885 } 6886 } 6887 6888 return 0; 6889 } 6890 6891 static int hot_remove_disk(struct mddev *mddev, dev_t dev) 6892 { 6893 struct md_rdev *rdev; 6894 6895 if (!mddev->pers) 6896 return -ENODEV; 6897 6898 rdev = find_rdev(mddev, dev); 6899 if (!rdev) 6900 return -ENXIO; 6901 6902 if (rdev->raid_disk < 0) 6903 goto kick_rdev; 6904 6905 clear_bit(Blocked, &rdev->flags); 6906 remove_and_add_spares(mddev, rdev); 6907 6908 if (rdev->raid_disk >= 0) 6909 goto busy; 6910 6911 kick_rdev: 6912 if (mddev_is_clustered(mddev)) { 6913 if (md_cluster_ops->remove_disk(mddev, rdev)) 6914 goto busy; 6915 } 6916 6917 md_kick_rdev_from_array(rdev); 6918 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6919 if (mddev->thread) 6920 md_wakeup_thread(mddev->thread); 6921 else 6922 md_update_sb(mddev, 1); 6923 md_new_event(); 6924 6925 return 0; 6926 busy: 6927 pr_debug("md: cannot remove active disk %pg from %s ...\n", 6928 rdev->bdev, mdname(mddev)); 6929 return -EBUSY; 6930 } 6931 6932 static int hot_add_disk(struct mddev *mddev, dev_t dev) 6933 { 6934 int err; 6935 struct md_rdev *rdev; 6936 6937 if (!mddev->pers) 6938 return -ENODEV; 6939 6940 if (mddev->major_version != 0) { 6941 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n", 6942 mdname(mddev)); 6943 return -EINVAL; 6944 } 6945 if (!mddev->pers->hot_add_disk) { 6946 pr_warn("%s: personality does not support diskops!\n", 6947 mdname(mddev)); 6948 return -EINVAL; 6949 } 6950 6951 rdev = md_import_device(dev, -1, 0); 6952 if (IS_ERR(rdev)) { 6953 pr_warn("md: error, md_import_device() returned %ld\n", 6954 PTR_ERR(rdev)); 6955 return -EINVAL; 6956 } 6957 6958 if (mddev->persistent) 6959 rdev->sb_start = calc_dev_sboffset(rdev); 6960 else 6961 rdev->sb_start = bdev_nr_sectors(rdev->bdev); 6962 6963 rdev->sectors = rdev->sb_start; 6964 6965 if (test_bit(Faulty, &rdev->flags)) { 6966 pr_warn("md: can not hot-add faulty %pg disk to %s!\n", 6967 rdev->bdev, mdname(mddev)); 6968 err = -EINVAL; 6969 goto abort_export; 6970 } 6971 6972 clear_bit(In_sync, &rdev->flags); 6973 rdev->desc_nr = -1; 6974 rdev->saved_raid_disk = -1; 6975 err = bind_rdev_to_array(rdev, mddev); 6976 if (err) 6977 goto abort_export; 6978 6979 /* 6980 * The rest should better be atomic, we can have disk failures 6981 * noticed in interrupt contexts ... 6982 */ 6983 6984 rdev->raid_disk = -1; 6985 6986 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6987 if (!mddev->thread) 6988 md_update_sb(mddev, 1); 6989 /* 6990 * If the new disk does not support REQ_NOWAIT, 6991 * disable on the whole MD. 6992 */ 6993 if (!blk_queue_nowait(bdev_get_queue(rdev->bdev))) { 6994 pr_info("%s: Disabling nowait because %pg does not support nowait\n", 6995 mdname(mddev), rdev->bdev); 6996 blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue); 6997 } 6998 /* 6999 * Kick recovery, maybe this spare has to be added to the 7000 * array immediately. 7001 */ 7002 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7003 md_wakeup_thread(mddev->thread); 7004 md_new_event(); 7005 return 0; 7006 7007 abort_export: 7008 export_rdev(rdev); 7009 return err; 7010 } 7011 7012 static int set_bitmap_file(struct mddev *mddev, int fd) 7013 { 7014 int err = 0; 7015 7016 if (mddev->pers) { 7017 if (!mddev->pers->quiesce || !mddev->thread) 7018 return -EBUSY; 7019 if (mddev->recovery || mddev->sync_thread) 7020 return -EBUSY; 7021 /* we should be able to change the bitmap.. */ 7022 } 7023 7024 if (fd >= 0) { 7025 struct inode *inode; 7026 struct file *f; 7027 7028 if (mddev->bitmap || mddev->bitmap_info.file) 7029 return -EEXIST; /* cannot add when bitmap is present */ 7030 f = fget(fd); 7031 7032 if (f == NULL) { 7033 pr_warn("%s: error: failed to get bitmap file\n", 7034 mdname(mddev)); 7035 return -EBADF; 7036 } 7037 7038 inode = f->f_mapping->host; 7039 if (!S_ISREG(inode->i_mode)) { 7040 pr_warn("%s: error: bitmap file must be a regular file\n", 7041 mdname(mddev)); 7042 err = -EBADF; 7043 } else if (!(f->f_mode & FMODE_WRITE)) { 7044 pr_warn("%s: error: bitmap file must open for write\n", 7045 mdname(mddev)); 7046 err = -EBADF; 7047 } else if (atomic_read(&inode->i_writecount) != 1) { 7048 pr_warn("%s: error: bitmap file is already in use\n", 7049 mdname(mddev)); 7050 err = -EBUSY; 7051 } 7052 if (err) { 7053 fput(f); 7054 return err; 7055 } 7056 mddev->bitmap_info.file = f; 7057 mddev->bitmap_info.offset = 0; /* file overrides offset */ 7058 } else if (mddev->bitmap == NULL) 7059 return -ENOENT; /* cannot remove what isn't there */ 7060 err = 0; 7061 if (mddev->pers) { 7062 if (fd >= 0) { 7063 struct bitmap *bitmap; 7064 7065 bitmap = md_bitmap_create(mddev, -1); 7066 mddev_suspend(mddev); 7067 if (!IS_ERR(bitmap)) { 7068 mddev->bitmap = bitmap; 7069 err = md_bitmap_load(mddev); 7070 } else 7071 err = PTR_ERR(bitmap); 7072 if (err) { 7073 md_bitmap_destroy(mddev); 7074 fd = -1; 7075 } 7076 mddev_resume(mddev); 7077 } else if (fd < 0) { 7078 mddev_suspend(mddev); 7079 md_bitmap_destroy(mddev); 7080 mddev_resume(mddev); 7081 } 7082 } 7083 if (fd < 0) { 7084 struct file *f = mddev->bitmap_info.file; 7085 if (f) { 7086 spin_lock(&mddev->lock); 7087 mddev->bitmap_info.file = NULL; 7088 spin_unlock(&mddev->lock); 7089 fput(f); 7090 } 7091 } 7092 7093 return err; 7094 } 7095 7096 /* 7097 * md_set_array_info is used two different ways 7098 * The original usage is when creating a new array. 7099 * In this usage, raid_disks is > 0 and it together with 7100 * level, size, not_persistent,layout,chunksize determine the 7101 * shape of the array. 7102 * This will always create an array with a type-0.90.0 superblock. 7103 * The newer usage is when assembling an array. 7104 * In this case raid_disks will be 0, and the major_version field is 7105 * use to determine which style super-blocks are to be found on the devices. 7106 * The minor and patch _version numbers are also kept incase the 7107 * super_block handler wishes to interpret them. 7108 */ 7109 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info) 7110 { 7111 if (info->raid_disks == 0) { 7112 /* just setting version number for superblock loading */ 7113 if (info->major_version < 0 || 7114 info->major_version >= ARRAY_SIZE(super_types) || 7115 super_types[info->major_version].name == NULL) { 7116 /* maybe try to auto-load a module? */ 7117 pr_warn("md: superblock version %d not known\n", 7118 info->major_version); 7119 return -EINVAL; 7120 } 7121 mddev->major_version = info->major_version; 7122 mddev->minor_version = info->minor_version; 7123 mddev->patch_version = info->patch_version; 7124 mddev->persistent = !info->not_persistent; 7125 /* ensure mddev_put doesn't delete this now that there 7126 * is some minimal configuration. 7127 */ 7128 mddev->ctime = ktime_get_real_seconds(); 7129 return 0; 7130 } 7131 mddev->major_version = MD_MAJOR_VERSION; 7132 mddev->minor_version = MD_MINOR_VERSION; 7133 mddev->patch_version = MD_PATCHLEVEL_VERSION; 7134 mddev->ctime = ktime_get_real_seconds(); 7135 7136 mddev->level = info->level; 7137 mddev->clevel[0] = 0; 7138 mddev->dev_sectors = 2 * (sector_t)info->size; 7139 mddev->raid_disks = info->raid_disks; 7140 /* don't set md_minor, it is determined by which /dev/md* was 7141 * openned 7142 */ 7143 if (info->state & (1<<MD_SB_CLEAN)) 7144 mddev->recovery_cp = MaxSector; 7145 else 7146 mddev->recovery_cp = 0; 7147 mddev->persistent = ! info->not_persistent; 7148 mddev->external = 0; 7149 7150 mddev->layout = info->layout; 7151 if (mddev->level == 0) 7152 /* Cannot trust RAID0 layout info here */ 7153 mddev->layout = -1; 7154 mddev->chunk_sectors = info->chunk_size >> 9; 7155 7156 if (mddev->persistent) { 7157 mddev->max_disks = MD_SB_DISKS; 7158 mddev->flags = 0; 7159 mddev->sb_flags = 0; 7160 } 7161 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 7162 7163 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 7164 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 7165 mddev->bitmap_info.offset = 0; 7166 7167 mddev->reshape_position = MaxSector; 7168 7169 /* 7170 * Generate a 128 bit UUID 7171 */ 7172 get_random_bytes(mddev->uuid, 16); 7173 7174 mddev->new_level = mddev->level; 7175 mddev->new_chunk_sectors = mddev->chunk_sectors; 7176 mddev->new_layout = mddev->layout; 7177 mddev->delta_disks = 0; 7178 mddev->reshape_backwards = 0; 7179 7180 return 0; 7181 } 7182 7183 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) 7184 { 7185 lockdep_assert_held(&mddev->reconfig_mutex); 7186 7187 if (mddev->external_size) 7188 return; 7189 7190 mddev->array_sectors = array_sectors; 7191 } 7192 EXPORT_SYMBOL(md_set_array_sectors); 7193 7194 static int update_size(struct mddev *mddev, sector_t num_sectors) 7195 { 7196 struct md_rdev *rdev; 7197 int rv; 7198 int fit = (num_sectors == 0); 7199 sector_t old_dev_sectors = mddev->dev_sectors; 7200 7201 if (mddev->pers->resize == NULL) 7202 return -EINVAL; 7203 /* The "num_sectors" is the number of sectors of each device that 7204 * is used. This can only make sense for arrays with redundancy. 7205 * linear and raid0 always use whatever space is available. We can only 7206 * consider changing this number if no resync or reconstruction is 7207 * happening, and if the new size is acceptable. It must fit before the 7208 * sb_start or, if that is <data_offset, it must fit before the size 7209 * of each device. If num_sectors is zero, we find the largest size 7210 * that fits. 7211 */ 7212 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 7213 mddev->sync_thread) 7214 return -EBUSY; 7215 if (mddev->ro) 7216 return -EROFS; 7217 7218 rdev_for_each(rdev, mddev) { 7219 sector_t avail = rdev->sectors; 7220 7221 if (fit && (num_sectors == 0 || num_sectors > avail)) 7222 num_sectors = avail; 7223 if (avail < num_sectors) 7224 return -ENOSPC; 7225 } 7226 rv = mddev->pers->resize(mddev, num_sectors); 7227 if (!rv) { 7228 if (mddev_is_clustered(mddev)) 7229 md_cluster_ops->update_size(mddev, old_dev_sectors); 7230 else if (mddev->queue) { 7231 set_capacity_and_notify(mddev->gendisk, 7232 mddev->array_sectors); 7233 } 7234 } 7235 return rv; 7236 } 7237 7238 static int update_raid_disks(struct mddev *mddev, int raid_disks) 7239 { 7240 int rv; 7241 struct md_rdev *rdev; 7242 /* change the number of raid disks */ 7243 if (mddev->pers->check_reshape == NULL) 7244 return -EINVAL; 7245 if (mddev->ro) 7246 return -EROFS; 7247 if (raid_disks <= 0 || 7248 (mddev->max_disks && raid_disks >= mddev->max_disks)) 7249 return -EINVAL; 7250 if (mddev->sync_thread || 7251 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 7252 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) || 7253 mddev->reshape_position != MaxSector) 7254 return -EBUSY; 7255 7256 rdev_for_each(rdev, mddev) { 7257 if (mddev->raid_disks < raid_disks && 7258 rdev->data_offset < rdev->new_data_offset) 7259 return -EINVAL; 7260 if (mddev->raid_disks > raid_disks && 7261 rdev->data_offset > rdev->new_data_offset) 7262 return -EINVAL; 7263 } 7264 7265 mddev->delta_disks = raid_disks - mddev->raid_disks; 7266 if (mddev->delta_disks < 0) 7267 mddev->reshape_backwards = 1; 7268 else if (mddev->delta_disks > 0) 7269 mddev->reshape_backwards = 0; 7270 7271 rv = mddev->pers->check_reshape(mddev); 7272 if (rv < 0) { 7273 mddev->delta_disks = 0; 7274 mddev->reshape_backwards = 0; 7275 } 7276 return rv; 7277 } 7278 7279 /* 7280 * update_array_info is used to change the configuration of an 7281 * on-line array. 7282 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 7283 * fields in the info are checked against the array. 7284 * Any differences that cannot be handled will cause an error. 7285 * Normally, only one change can be managed at a time. 7286 */ 7287 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) 7288 { 7289 int rv = 0; 7290 int cnt = 0; 7291 int state = 0; 7292 7293 /* calculate expected state,ignoring low bits */ 7294 if (mddev->bitmap && mddev->bitmap_info.offset) 7295 state |= (1 << MD_SB_BITMAP_PRESENT); 7296 7297 if (mddev->major_version != info->major_version || 7298 mddev->minor_version != info->minor_version || 7299 /* mddev->patch_version != info->patch_version || */ 7300 mddev->ctime != info->ctime || 7301 mddev->level != info->level || 7302 /* mddev->layout != info->layout || */ 7303 mddev->persistent != !info->not_persistent || 7304 mddev->chunk_sectors != info->chunk_size >> 9 || 7305 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 7306 ((state^info->state) & 0xfffffe00) 7307 ) 7308 return -EINVAL; 7309 /* Check there is only one change */ 7310 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 7311 cnt++; 7312 if (mddev->raid_disks != info->raid_disks) 7313 cnt++; 7314 if (mddev->layout != info->layout) 7315 cnt++; 7316 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 7317 cnt++; 7318 if (cnt == 0) 7319 return 0; 7320 if (cnt > 1) 7321 return -EINVAL; 7322 7323 if (mddev->layout != info->layout) { 7324 /* Change layout 7325 * we don't need to do anything at the md level, the 7326 * personality will take care of it all. 7327 */ 7328 if (mddev->pers->check_reshape == NULL) 7329 return -EINVAL; 7330 else { 7331 mddev->new_layout = info->layout; 7332 rv = mddev->pers->check_reshape(mddev); 7333 if (rv) 7334 mddev->new_layout = mddev->layout; 7335 return rv; 7336 } 7337 } 7338 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 7339 rv = update_size(mddev, (sector_t)info->size * 2); 7340 7341 if (mddev->raid_disks != info->raid_disks) 7342 rv = update_raid_disks(mddev, info->raid_disks); 7343 7344 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 7345 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { 7346 rv = -EINVAL; 7347 goto err; 7348 } 7349 if (mddev->recovery || mddev->sync_thread) { 7350 rv = -EBUSY; 7351 goto err; 7352 } 7353 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 7354 struct bitmap *bitmap; 7355 /* add the bitmap */ 7356 if (mddev->bitmap) { 7357 rv = -EEXIST; 7358 goto err; 7359 } 7360 if (mddev->bitmap_info.default_offset == 0) { 7361 rv = -EINVAL; 7362 goto err; 7363 } 7364 mddev->bitmap_info.offset = 7365 mddev->bitmap_info.default_offset; 7366 mddev->bitmap_info.space = 7367 mddev->bitmap_info.default_space; 7368 bitmap = md_bitmap_create(mddev, -1); 7369 mddev_suspend(mddev); 7370 if (!IS_ERR(bitmap)) { 7371 mddev->bitmap = bitmap; 7372 rv = md_bitmap_load(mddev); 7373 } else 7374 rv = PTR_ERR(bitmap); 7375 if (rv) 7376 md_bitmap_destroy(mddev); 7377 mddev_resume(mddev); 7378 } else { 7379 /* remove the bitmap */ 7380 if (!mddev->bitmap) { 7381 rv = -ENOENT; 7382 goto err; 7383 } 7384 if (mddev->bitmap->storage.file) { 7385 rv = -EINVAL; 7386 goto err; 7387 } 7388 if (mddev->bitmap_info.nodes) { 7389 /* hold PW on all the bitmap lock */ 7390 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) { 7391 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n"); 7392 rv = -EPERM; 7393 md_cluster_ops->unlock_all_bitmaps(mddev); 7394 goto err; 7395 } 7396 7397 mddev->bitmap_info.nodes = 0; 7398 md_cluster_ops->leave(mddev); 7399 module_put(md_cluster_mod); 7400 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; 7401 } 7402 mddev_suspend(mddev); 7403 md_bitmap_destroy(mddev); 7404 mddev_resume(mddev); 7405 mddev->bitmap_info.offset = 0; 7406 } 7407 } 7408 md_update_sb(mddev, 1); 7409 return rv; 7410 err: 7411 return rv; 7412 } 7413 7414 static int set_disk_faulty(struct mddev *mddev, dev_t dev) 7415 { 7416 struct md_rdev *rdev; 7417 int err = 0; 7418 7419 if (mddev->pers == NULL) 7420 return -ENODEV; 7421 7422 rcu_read_lock(); 7423 rdev = md_find_rdev_rcu(mddev, dev); 7424 if (!rdev) 7425 err = -ENODEV; 7426 else { 7427 md_error(mddev, rdev); 7428 if (test_bit(MD_BROKEN, &mddev->flags)) 7429 err = -EBUSY; 7430 } 7431 rcu_read_unlock(); 7432 return err; 7433 } 7434 7435 /* 7436 * We have a problem here : there is no easy way to give a CHS 7437 * virtual geometry. We currently pretend that we have a 2 heads 7438 * 4 sectors (with a BIG number of cylinders...). This drives 7439 * dosfs just mad... ;-) 7440 */ 7441 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 7442 { 7443 struct mddev *mddev = bdev->bd_disk->private_data; 7444 7445 geo->heads = 2; 7446 geo->sectors = 4; 7447 geo->cylinders = mddev->array_sectors / 8; 7448 return 0; 7449 } 7450 7451 static inline bool md_ioctl_valid(unsigned int cmd) 7452 { 7453 switch (cmd) { 7454 case ADD_NEW_DISK: 7455 case GET_ARRAY_INFO: 7456 case GET_BITMAP_FILE: 7457 case GET_DISK_INFO: 7458 case HOT_ADD_DISK: 7459 case HOT_REMOVE_DISK: 7460 case RAID_VERSION: 7461 case RESTART_ARRAY_RW: 7462 case RUN_ARRAY: 7463 case SET_ARRAY_INFO: 7464 case SET_BITMAP_FILE: 7465 case SET_DISK_FAULTY: 7466 case STOP_ARRAY: 7467 case STOP_ARRAY_RO: 7468 case CLUSTERED_DISK_NACK: 7469 return true; 7470 default: 7471 return false; 7472 } 7473 } 7474 7475 static int md_ioctl(struct block_device *bdev, fmode_t mode, 7476 unsigned int cmd, unsigned long arg) 7477 { 7478 int err = 0; 7479 void __user *argp = (void __user *)arg; 7480 struct mddev *mddev = NULL; 7481 bool did_set_md_closing = false; 7482 7483 if (!md_ioctl_valid(cmd)) 7484 return -ENOTTY; 7485 7486 switch (cmd) { 7487 case RAID_VERSION: 7488 case GET_ARRAY_INFO: 7489 case GET_DISK_INFO: 7490 break; 7491 default: 7492 if (!capable(CAP_SYS_ADMIN)) 7493 return -EACCES; 7494 } 7495 7496 /* 7497 * Commands dealing with the RAID driver but not any 7498 * particular array: 7499 */ 7500 switch (cmd) { 7501 case RAID_VERSION: 7502 err = get_version(argp); 7503 goto out; 7504 default:; 7505 } 7506 7507 /* 7508 * Commands creating/starting a new array: 7509 */ 7510 7511 mddev = bdev->bd_disk->private_data; 7512 7513 if (!mddev) { 7514 BUG(); 7515 goto out; 7516 } 7517 7518 /* Some actions do not requires the mutex */ 7519 switch (cmd) { 7520 case GET_ARRAY_INFO: 7521 if (!mddev->raid_disks && !mddev->external) 7522 err = -ENODEV; 7523 else 7524 err = get_array_info(mddev, argp); 7525 goto out; 7526 7527 case GET_DISK_INFO: 7528 if (!mddev->raid_disks && !mddev->external) 7529 err = -ENODEV; 7530 else 7531 err = get_disk_info(mddev, argp); 7532 goto out; 7533 7534 case SET_DISK_FAULTY: 7535 err = set_disk_faulty(mddev, new_decode_dev(arg)); 7536 goto out; 7537 7538 case GET_BITMAP_FILE: 7539 err = get_bitmap_file(mddev, argp); 7540 goto out; 7541 7542 } 7543 7544 if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK) 7545 flush_rdev_wq(mddev); 7546 7547 if (cmd == HOT_REMOVE_DISK) 7548 /* need to ensure recovery thread has run */ 7549 wait_event_interruptible_timeout(mddev->sb_wait, 7550 !test_bit(MD_RECOVERY_NEEDED, 7551 &mddev->recovery), 7552 msecs_to_jiffies(5000)); 7553 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { 7554 /* Need to flush page cache, and ensure no-one else opens 7555 * and writes 7556 */ 7557 mutex_lock(&mddev->open_mutex); 7558 if (mddev->pers && atomic_read(&mddev->openers) > 1) { 7559 mutex_unlock(&mddev->open_mutex); 7560 err = -EBUSY; 7561 goto out; 7562 } 7563 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) { 7564 mutex_unlock(&mddev->open_mutex); 7565 err = -EBUSY; 7566 goto out; 7567 } 7568 did_set_md_closing = true; 7569 mutex_unlock(&mddev->open_mutex); 7570 sync_blockdev(bdev); 7571 } 7572 err = mddev_lock(mddev); 7573 if (err) { 7574 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n", 7575 err, cmd); 7576 goto out; 7577 } 7578 7579 if (cmd == SET_ARRAY_INFO) { 7580 mdu_array_info_t info; 7581 if (!arg) 7582 memset(&info, 0, sizeof(info)); 7583 else if (copy_from_user(&info, argp, sizeof(info))) { 7584 err = -EFAULT; 7585 goto unlock; 7586 } 7587 if (mddev->pers) { 7588 err = update_array_info(mddev, &info); 7589 if (err) { 7590 pr_warn("md: couldn't update array info. %d\n", err); 7591 goto unlock; 7592 } 7593 goto unlock; 7594 } 7595 if (!list_empty(&mddev->disks)) { 7596 pr_warn("md: array %s already has disks!\n", mdname(mddev)); 7597 err = -EBUSY; 7598 goto unlock; 7599 } 7600 if (mddev->raid_disks) { 7601 pr_warn("md: array %s already initialised!\n", mdname(mddev)); 7602 err = -EBUSY; 7603 goto unlock; 7604 } 7605 err = md_set_array_info(mddev, &info); 7606 if (err) { 7607 pr_warn("md: couldn't set array info. %d\n", err); 7608 goto unlock; 7609 } 7610 goto unlock; 7611 } 7612 7613 /* 7614 * Commands querying/configuring an existing array: 7615 */ 7616 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 7617 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 7618 if ((!mddev->raid_disks && !mddev->external) 7619 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 7620 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 7621 && cmd != GET_BITMAP_FILE) { 7622 err = -ENODEV; 7623 goto unlock; 7624 } 7625 7626 /* 7627 * Commands even a read-only array can execute: 7628 */ 7629 switch (cmd) { 7630 case RESTART_ARRAY_RW: 7631 err = restart_array(mddev); 7632 goto unlock; 7633 7634 case STOP_ARRAY: 7635 err = do_md_stop(mddev, 0, bdev); 7636 goto unlock; 7637 7638 case STOP_ARRAY_RO: 7639 err = md_set_readonly(mddev, bdev); 7640 goto unlock; 7641 7642 case HOT_REMOVE_DISK: 7643 err = hot_remove_disk(mddev, new_decode_dev(arg)); 7644 goto unlock; 7645 7646 case ADD_NEW_DISK: 7647 /* We can support ADD_NEW_DISK on read-only arrays 7648 * only if we are re-adding a preexisting device. 7649 * So require mddev->pers and MD_DISK_SYNC. 7650 */ 7651 if (mddev->pers) { 7652 mdu_disk_info_t info; 7653 if (copy_from_user(&info, argp, sizeof(info))) 7654 err = -EFAULT; 7655 else if (!(info.state & (1<<MD_DISK_SYNC))) 7656 /* Need to clear read-only for this */ 7657 break; 7658 else 7659 err = md_add_new_disk(mddev, &info); 7660 goto unlock; 7661 } 7662 break; 7663 } 7664 7665 /* 7666 * The remaining ioctls are changing the state of the 7667 * superblock, so we do not allow them on read-only arrays. 7668 */ 7669 if (mddev->ro && mddev->pers) { 7670 if (mddev->ro == 2) { 7671 mddev->ro = 0; 7672 sysfs_notify_dirent_safe(mddev->sysfs_state); 7673 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7674 /* mddev_unlock will wake thread */ 7675 /* If a device failed while we were read-only, we 7676 * need to make sure the metadata is updated now. 7677 */ 7678 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { 7679 mddev_unlock(mddev); 7680 wait_event(mddev->sb_wait, 7681 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && 7682 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 7683 mddev_lock_nointr(mddev); 7684 } 7685 } else { 7686 err = -EROFS; 7687 goto unlock; 7688 } 7689 } 7690 7691 switch (cmd) { 7692 case ADD_NEW_DISK: 7693 { 7694 mdu_disk_info_t info; 7695 if (copy_from_user(&info, argp, sizeof(info))) 7696 err = -EFAULT; 7697 else 7698 err = md_add_new_disk(mddev, &info); 7699 goto unlock; 7700 } 7701 7702 case CLUSTERED_DISK_NACK: 7703 if (mddev_is_clustered(mddev)) 7704 md_cluster_ops->new_disk_ack(mddev, false); 7705 else 7706 err = -EINVAL; 7707 goto unlock; 7708 7709 case HOT_ADD_DISK: 7710 err = hot_add_disk(mddev, new_decode_dev(arg)); 7711 goto unlock; 7712 7713 case RUN_ARRAY: 7714 err = do_md_run(mddev); 7715 goto unlock; 7716 7717 case SET_BITMAP_FILE: 7718 err = set_bitmap_file(mddev, (int)arg); 7719 goto unlock; 7720 7721 default: 7722 err = -EINVAL; 7723 goto unlock; 7724 } 7725 7726 unlock: 7727 if (mddev->hold_active == UNTIL_IOCTL && 7728 err != -EINVAL) 7729 mddev->hold_active = 0; 7730 mddev_unlock(mddev); 7731 out: 7732 if(did_set_md_closing) 7733 clear_bit(MD_CLOSING, &mddev->flags); 7734 return err; 7735 } 7736 #ifdef CONFIG_COMPAT 7737 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, 7738 unsigned int cmd, unsigned long arg) 7739 { 7740 switch (cmd) { 7741 case HOT_REMOVE_DISK: 7742 case HOT_ADD_DISK: 7743 case SET_DISK_FAULTY: 7744 case SET_BITMAP_FILE: 7745 /* These take in integer arg, do not convert */ 7746 break; 7747 default: 7748 arg = (unsigned long)compat_ptr(arg); 7749 break; 7750 } 7751 7752 return md_ioctl(bdev, mode, cmd, arg); 7753 } 7754 #endif /* CONFIG_COMPAT */ 7755 7756 static int md_set_read_only(struct block_device *bdev, bool ro) 7757 { 7758 struct mddev *mddev = bdev->bd_disk->private_data; 7759 int err; 7760 7761 err = mddev_lock(mddev); 7762 if (err) 7763 return err; 7764 7765 if (!mddev->raid_disks && !mddev->external) { 7766 err = -ENODEV; 7767 goto out_unlock; 7768 } 7769 7770 /* 7771 * Transitioning to read-auto need only happen for arrays that call 7772 * md_write_start and which are not ready for writes yet. 7773 */ 7774 if (!ro && mddev->ro == 1 && mddev->pers) { 7775 err = restart_array(mddev); 7776 if (err) 7777 goto out_unlock; 7778 mddev->ro = 2; 7779 } 7780 7781 out_unlock: 7782 mddev_unlock(mddev); 7783 return err; 7784 } 7785 7786 static int md_open(struct block_device *bdev, fmode_t mode) 7787 { 7788 struct mddev *mddev; 7789 int err; 7790 7791 spin_lock(&all_mddevs_lock); 7792 mddev = mddev_get(bdev->bd_disk->private_data); 7793 spin_unlock(&all_mddevs_lock); 7794 if (!mddev) 7795 return -ENODEV; 7796 7797 err = mutex_lock_interruptible(&mddev->open_mutex); 7798 if (err) 7799 goto out; 7800 7801 err = -ENODEV; 7802 if (test_bit(MD_CLOSING, &mddev->flags)) 7803 goto out_unlock; 7804 7805 atomic_inc(&mddev->openers); 7806 mutex_unlock(&mddev->open_mutex); 7807 7808 bdev_check_media_change(bdev); 7809 return 0; 7810 7811 out_unlock: 7812 mutex_unlock(&mddev->open_mutex); 7813 out: 7814 mddev_put(mddev); 7815 return err; 7816 } 7817 7818 static void md_release(struct gendisk *disk, fmode_t mode) 7819 { 7820 struct mddev *mddev = disk->private_data; 7821 7822 BUG_ON(!mddev); 7823 atomic_dec(&mddev->openers); 7824 mddev_put(mddev); 7825 } 7826 7827 static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing) 7828 { 7829 struct mddev *mddev = disk->private_data; 7830 unsigned int ret = 0; 7831 7832 if (mddev->changed) 7833 ret = DISK_EVENT_MEDIA_CHANGE; 7834 mddev->changed = 0; 7835 return ret; 7836 } 7837 7838 static void md_free_disk(struct gendisk *disk) 7839 { 7840 struct mddev *mddev = disk->private_data; 7841 7842 percpu_ref_exit(&mddev->writes_pending); 7843 bioset_exit(&mddev->bio_set); 7844 bioset_exit(&mddev->sync_set); 7845 7846 mddev_free(mddev); 7847 } 7848 7849 const struct block_device_operations md_fops = 7850 { 7851 .owner = THIS_MODULE, 7852 .submit_bio = md_submit_bio, 7853 .open = md_open, 7854 .release = md_release, 7855 .ioctl = md_ioctl, 7856 #ifdef CONFIG_COMPAT 7857 .compat_ioctl = md_compat_ioctl, 7858 #endif 7859 .getgeo = md_getgeo, 7860 .check_events = md_check_events, 7861 .set_read_only = md_set_read_only, 7862 .free_disk = md_free_disk, 7863 }; 7864 7865 static int md_thread(void *arg) 7866 { 7867 struct md_thread *thread = arg; 7868 7869 /* 7870 * md_thread is a 'system-thread', it's priority should be very 7871 * high. We avoid resource deadlocks individually in each 7872 * raid personality. (RAID5 does preallocation) We also use RR and 7873 * the very same RT priority as kswapd, thus we will never get 7874 * into a priority inversion deadlock. 7875 * 7876 * we definitely have to have equal or higher priority than 7877 * bdflush, otherwise bdflush will deadlock if there are too 7878 * many dirty RAID5 blocks. 7879 */ 7880 7881 allow_signal(SIGKILL); 7882 while (!kthread_should_stop()) { 7883 7884 /* We need to wait INTERRUPTIBLE so that 7885 * we don't add to the load-average. 7886 * That means we need to be sure no signals are 7887 * pending 7888 */ 7889 if (signal_pending(current)) 7890 flush_signals(current); 7891 7892 wait_event_interruptible_timeout 7893 (thread->wqueue, 7894 test_bit(THREAD_WAKEUP, &thread->flags) 7895 || kthread_should_stop() || kthread_should_park(), 7896 thread->timeout); 7897 7898 clear_bit(THREAD_WAKEUP, &thread->flags); 7899 if (kthread_should_park()) 7900 kthread_parkme(); 7901 if (!kthread_should_stop()) 7902 thread->run(thread); 7903 } 7904 7905 return 0; 7906 } 7907 7908 void md_wakeup_thread(struct md_thread *thread) 7909 { 7910 if (thread) { 7911 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); 7912 set_bit(THREAD_WAKEUP, &thread->flags); 7913 wake_up(&thread->wqueue); 7914 } 7915 } 7916 EXPORT_SYMBOL(md_wakeup_thread); 7917 7918 struct md_thread *md_register_thread(void (*run) (struct md_thread *), 7919 struct mddev *mddev, const char *name) 7920 { 7921 struct md_thread *thread; 7922 7923 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL); 7924 if (!thread) 7925 return NULL; 7926 7927 init_waitqueue_head(&thread->wqueue); 7928 7929 thread->run = run; 7930 thread->mddev = mddev; 7931 thread->timeout = MAX_SCHEDULE_TIMEOUT; 7932 thread->tsk = kthread_run(md_thread, thread, 7933 "%s_%s", 7934 mdname(thread->mddev), 7935 name); 7936 if (IS_ERR(thread->tsk)) { 7937 kfree(thread); 7938 return NULL; 7939 } 7940 return thread; 7941 } 7942 EXPORT_SYMBOL(md_register_thread); 7943 7944 void md_unregister_thread(struct md_thread **threadp) 7945 { 7946 struct md_thread *thread; 7947 7948 /* 7949 * Locking ensures that mddev_unlock does not wake_up a 7950 * non-existent thread 7951 */ 7952 spin_lock(&pers_lock); 7953 thread = *threadp; 7954 if (!thread) { 7955 spin_unlock(&pers_lock); 7956 return; 7957 } 7958 *threadp = NULL; 7959 spin_unlock(&pers_lock); 7960 7961 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 7962 kthread_stop(thread->tsk); 7963 kfree(thread); 7964 } 7965 EXPORT_SYMBOL(md_unregister_thread); 7966 7967 void md_error(struct mddev *mddev, struct md_rdev *rdev) 7968 { 7969 if (!rdev || test_bit(Faulty, &rdev->flags)) 7970 return; 7971 7972 if (!mddev->pers || !mddev->pers->error_handler) 7973 return; 7974 mddev->pers->error_handler(mddev, rdev); 7975 7976 if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags)) 7977 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7978 sysfs_notify_dirent_safe(rdev->sysfs_state); 7979 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7980 if (!test_bit(MD_BROKEN, &mddev->flags)) { 7981 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7982 md_wakeup_thread(mddev->thread); 7983 } 7984 if (mddev->event_work.func) 7985 queue_work(md_misc_wq, &mddev->event_work); 7986 md_new_event(); 7987 } 7988 EXPORT_SYMBOL(md_error); 7989 7990 /* seq_file implementation /proc/mdstat */ 7991 7992 static void status_unused(struct seq_file *seq) 7993 { 7994 int i = 0; 7995 struct md_rdev *rdev; 7996 7997 seq_printf(seq, "unused devices: "); 7998 7999 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 8000 i++; 8001 seq_printf(seq, "%pg ", rdev->bdev); 8002 } 8003 if (!i) 8004 seq_printf(seq, "<none>"); 8005 8006 seq_printf(seq, "\n"); 8007 } 8008 8009 static int status_resync(struct seq_file *seq, struct mddev *mddev) 8010 { 8011 sector_t max_sectors, resync, res; 8012 unsigned long dt, db = 0; 8013 sector_t rt, curr_mark_cnt, resync_mark_cnt; 8014 int scale, recovery_active; 8015 unsigned int per_milli; 8016 8017 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 8018 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 8019 max_sectors = mddev->resync_max_sectors; 8020 else 8021 max_sectors = mddev->dev_sectors; 8022 8023 resync = mddev->curr_resync; 8024 if (resync < MD_RESYNC_ACTIVE) { 8025 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 8026 /* Still cleaning up */ 8027 resync = max_sectors; 8028 } else if (resync > max_sectors) { 8029 resync = max_sectors; 8030 } else { 8031 resync -= atomic_read(&mddev->recovery_active); 8032 if (resync < MD_RESYNC_ACTIVE) { 8033 /* 8034 * Resync has started, but the subtraction has 8035 * yielded one of the special values. Force it 8036 * to active to ensure the status reports an 8037 * active resync. 8038 */ 8039 resync = MD_RESYNC_ACTIVE; 8040 } 8041 } 8042 8043 if (resync == MD_RESYNC_NONE) { 8044 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { 8045 struct md_rdev *rdev; 8046 8047 rdev_for_each(rdev, mddev) 8048 if (rdev->raid_disk >= 0 && 8049 !test_bit(Faulty, &rdev->flags) && 8050 rdev->recovery_offset != MaxSector && 8051 rdev->recovery_offset) { 8052 seq_printf(seq, "\trecover=REMOTE"); 8053 return 1; 8054 } 8055 if (mddev->reshape_position != MaxSector) 8056 seq_printf(seq, "\treshape=REMOTE"); 8057 else 8058 seq_printf(seq, "\tresync=REMOTE"); 8059 return 1; 8060 } 8061 if (mddev->recovery_cp < MaxSector) { 8062 seq_printf(seq, "\tresync=PENDING"); 8063 return 1; 8064 } 8065 return 0; 8066 } 8067 if (resync < MD_RESYNC_ACTIVE) { 8068 seq_printf(seq, "\tresync=DELAYED"); 8069 return 1; 8070 } 8071 8072 WARN_ON(max_sectors == 0); 8073 /* Pick 'scale' such that (resync>>scale)*1000 will fit 8074 * in a sector_t, and (max_sectors>>scale) will fit in a 8075 * u32, as those are the requirements for sector_div. 8076 * Thus 'scale' must be at least 10 8077 */ 8078 scale = 10; 8079 if (sizeof(sector_t) > sizeof(unsigned long)) { 8080 while ( max_sectors/2 > (1ULL<<(scale+32))) 8081 scale++; 8082 } 8083 res = (resync>>scale)*1000; 8084 sector_div(res, (u32)((max_sectors>>scale)+1)); 8085 8086 per_milli = res; 8087 { 8088 int i, x = per_milli/50, y = 20-x; 8089 seq_printf(seq, "["); 8090 for (i = 0; i < x; i++) 8091 seq_printf(seq, "="); 8092 seq_printf(seq, ">"); 8093 for (i = 0; i < y; i++) 8094 seq_printf(seq, "."); 8095 seq_printf(seq, "] "); 8096 } 8097 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 8098 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 8099 "reshape" : 8100 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 8101 "check" : 8102 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 8103 "resync" : "recovery"))), 8104 per_milli/10, per_milli % 10, 8105 (unsigned long long) resync/2, 8106 (unsigned long long) max_sectors/2); 8107 8108 /* 8109 * dt: time from mark until now 8110 * db: blocks written from mark until now 8111 * rt: remaining time 8112 * 8113 * rt is a sector_t, which is always 64bit now. We are keeping 8114 * the original algorithm, but it is not really necessary. 8115 * 8116 * Original algorithm: 8117 * So we divide before multiply in case it is 32bit and close 8118 * to the limit. 8119 * We scale the divisor (db) by 32 to avoid losing precision 8120 * near the end of resync when the number of remaining sectors 8121 * is close to 'db'. 8122 * We then divide rt by 32 after multiplying by db to compensate. 8123 * The '+1' avoids division by zero if db is very small. 8124 */ 8125 dt = ((jiffies - mddev->resync_mark) / HZ); 8126 if (!dt) dt++; 8127 8128 curr_mark_cnt = mddev->curr_mark_cnt; 8129 recovery_active = atomic_read(&mddev->recovery_active); 8130 resync_mark_cnt = mddev->resync_mark_cnt; 8131 8132 if (curr_mark_cnt >= (recovery_active + resync_mark_cnt)) 8133 db = curr_mark_cnt - (recovery_active + resync_mark_cnt); 8134 8135 rt = max_sectors - resync; /* number of remaining sectors */ 8136 rt = div64_u64(rt, db/32+1); 8137 rt *= dt; 8138 rt >>= 5; 8139 8140 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 8141 ((unsigned long)rt % 60)/6); 8142 8143 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 8144 return 1; 8145 } 8146 8147 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 8148 { 8149 struct list_head *tmp; 8150 loff_t l = *pos; 8151 struct mddev *mddev; 8152 8153 if (l == 0x10000) { 8154 ++*pos; 8155 return (void *)2; 8156 } 8157 if (l > 0x10000) 8158 return NULL; 8159 if (!l--) 8160 /* header */ 8161 return (void*)1; 8162 8163 spin_lock(&all_mddevs_lock); 8164 list_for_each(tmp,&all_mddevs) 8165 if (!l--) { 8166 mddev = list_entry(tmp, struct mddev, all_mddevs); 8167 mddev_get(mddev); 8168 if (!mddev_get(mddev)) 8169 continue; 8170 spin_unlock(&all_mddevs_lock); 8171 return mddev; 8172 } 8173 spin_unlock(&all_mddevs_lock); 8174 if (!l--) 8175 return (void*)2;/* tail */ 8176 return NULL; 8177 } 8178 8179 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 8180 { 8181 struct list_head *tmp; 8182 struct mddev *next_mddev, *mddev = v; 8183 struct mddev *to_put = NULL; 8184 8185 ++*pos; 8186 if (v == (void*)2) 8187 return NULL; 8188 8189 spin_lock(&all_mddevs_lock); 8190 if (v == (void*)1) { 8191 tmp = all_mddevs.next; 8192 } else { 8193 to_put = mddev; 8194 tmp = mddev->all_mddevs.next; 8195 } 8196 8197 for (;;) { 8198 if (tmp == &all_mddevs) { 8199 next_mddev = (void*)2; 8200 *pos = 0x10000; 8201 break; 8202 } 8203 next_mddev = list_entry(tmp, struct mddev, all_mddevs); 8204 if (mddev_get(next_mddev)) 8205 break; 8206 mddev = next_mddev; 8207 tmp = mddev->all_mddevs.next; 8208 } 8209 spin_unlock(&all_mddevs_lock); 8210 8211 if (to_put) 8212 mddev_put(mddev); 8213 return next_mddev; 8214 8215 } 8216 8217 static void md_seq_stop(struct seq_file *seq, void *v) 8218 { 8219 struct mddev *mddev = v; 8220 8221 if (mddev && v != (void*)1 && v != (void*)2) 8222 mddev_put(mddev); 8223 } 8224 8225 static int md_seq_show(struct seq_file *seq, void *v) 8226 { 8227 struct mddev *mddev = v; 8228 sector_t sectors; 8229 struct md_rdev *rdev; 8230 8231 if (v == (void*)1) { 8232 struct md_personality *pers; 8233 seq_printf(seq, "Personalities : "); 8234 spin_lock(&pers_lock); 8235 list_for_each_entry(pers, &pers_list, list) 8236 seq_printf(seq, "[%s] ", pers->name); 8237 8238 spin_unlock(&pers_lock); 8239 seq_printf(seq, "\n"); 8240 seq->poll_event = atomic_read(&md_event_count); 8241 return 0; 8242 } 8243 if (v == (void*)2) { 8244 status_unused(seq); 8245 return 0; 8246 } 8247 8248 spin_lock(&mddev->lock); 8249 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 8250 seq_printf(seq, "%s : %sactive", mdname(mddev), 8251 mddev->pers ? "" : "in"); 8252 if (mddev->pers) { 8253 if (mddev->ro==1) 8254 seq_printf(seq, " (read-only)"); 8255 if (mddev->ro==2) 8256 seq_printf(seq, " (auto-read-only)"); 8257 seq_printf(seq, " %s", mddev->pers->name); 8258 } 8259 8260 sectors = 0; 8261 rcu_read_lock(); 8262 rdev_for_each_rcu(rdev, mddev) { 8263 seq_printf(seq, " %pg[%d]", rdev->bdev, rdev->desc_nr); 8264 8265 if (test_bit(WriteMostly, &rdev->flags)) 8266 seq_printf(seq, "(W)"); 8267 if (test_bit(Journal, &rdev->flags)) 8268 seq_printf(seq, "(J)"); 8269 if (test_bit(Faulty, &rdev->flags)) { 8270 seq_printf(seq, "(F)"); 8271 continue; 8272 } 8273 if (rdev->raid_disk < 0) 8274 seq_printf(seq, "(S)"); /* spare */ 8275 if (test_bit(Replacement, &rdev->flags)) 8276 seq_printf(seq, "(R)"); 8277 sectors += rdev->sectors; 8278 } 8279 rcu_read_unlock(); 8280 8281 if (!list_empty(&mddev->disks)) { 8282 if (mddev->pers) 8283 seq_printf(seq, "\n %llu blocks", 8284 (unsigned long long) 8285 mddev->array_sectors / 2); 8286 else 8287 seq_printf(seq, "\n %llu blocks", 8288 (unsigned long long)sectors / 2); 8289 } 8290 if (mddev->persistent) { 8291 if (mddev->major_version != 0 || 8292 mddev->minor_version != 90) { 8293 seq_printf(seq," super %d.%d", 8294 mddev->major_version, 8295 mddev->minor_version); 8296 } 8297 } else if (mddev->external) 8298 seq_printf(seq, " super external:%s", 8299 mddev->metadata_type); 8300 else 8301 seq_printf(seq, " super non-persistent"); 8302 8303 if (mddev->pers) { 8304 mddev->pers->status(seq, mddev); 8305 seq_printf(seq, "\n "); 8306 if (mddev->pers->sync_request) { 8307 if (status_resync(seq, mddev)) 8308 seq_printf(seq, "\n "); 8309 } 8310 } else 8311 seq_printf(seq, "\n "); 8312 8313 md_bitmap_status(seq, mddev->bitmap); 8314 8315 seq_printf(seq, "\n"); 8316 } 8317 spin_unlock(&mddev->lock); 8318 8319 return 0; 8320 } 8321 8322 static const struct seq_operations md_seq_ops = { 8323 .start = md_seq_start, 8324 .next = md_seq_next, 8325 .stop = md_seq_stop, 8326 .show = md_seq_show, 8327 }; 8328 8329 static int md_seq_open(struct inode *inode, struct file *file) 8330 { 8331 struct seq_file *seq; 8332 int error; 8333 8334 error = seq_open(file, &md_seq_ops); 8335 if (error) 8336 return error; 8337 8338 seq = file->private_data; 8339 seq->poll_event = atomic_read(&md_event_count); 8340 return error; 8341 } 8342 8343 static int md_unloading; 8344 static __poll_t mdstat_poll(struct file *filp, poll_table *wait) 8345 { 8346 struct seq_file *seq = filp->private_data; 8347 __poll_t mask; 8348 8349 if (md_unloading) 8350 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; 8351 poll_wait(filp, &md_event_waiters, wait); 8352 8353 /* always allow read */ 8354 mask = EPOLLIN | EPOLLRDNORM; 8355 8356 if (seq->poll_event != atomic_read(&md_event_count)) 8357 mask |= EPOLLERR | EPOLLPRI; 8358 return mask; 8359 } 8360 8361 static const struct proc_ops mdstat_proc_ops = { 8362 .proc_open = md_seq_open, 8363 .proc_read = seq_read, 8364 .proc_lseek = seq_lseek, 8365 .proc_release = seq_release, 8366 .proc_poll = mdstat_poll, 8367 }; 8368 8369 int register_md_personality(struct md_personality *p) 8370 { 8371 pr_debug("md: %s personality registered for level %d\n", 8372 p->name, p->level); 8373 spin_lock(&pers_lock); 8374 list_add_tail(&p->list, &pers_list); 8375 spin_unlock(&pers_lock); 8376 return 0; 8377 } 8378 EXPORT_SYMBOL(register_md_personality); 8379 8380 int unregister_md_personality(struct md_personality *p) 8381 { 8382 pr_debug("md: %s personality unregistered\n", p->name); 8383 spin_lock(&pers_lock); 8384 list_del_init(&p->list); 8385 spin_unlock(&pers_lock); 8386 return 0; 8387 } 8388 EXPORT_SYMBOL(unregister_md_personality); 8389 8390 int register_md_cluster_operations(struct md_cluster_operations *ops, 8391 struct module *module) 8392 { 8393 int ret = 0; 8394 spin_lock(&pers_lock); 8395 if (md_cluster_ops != NULL) 8396 ret = -EALREADY; 8397 else { 8398 md_cluster_ops = ops; 8399 md_cluster_mod = module; 8400 } 8401 spin_unlock(&pers_lock); 8402 return ret; 8403 } 8404 EXPORT_SYMBOL(register_md_cluster_operations); 8405 8406 int unregister_md_cluster_operations(void) 8407 { 8408 spin_lock(&pers_lock); 8409 md_cluster_ops = NULL; 8410 spin_unlock(&pers_lock); 8411 return 0; 8412 } 8413 EXPORT_SYMBOL(unregister_md_cluster_operations); 8414 8415 int md_setup_cluster(struct mddev *mddev, int nodes) 8416 { 8417 int ret; 8418 if (!md_cluster_ops) 8419 request_module("md-cluster"); 8420 spin_lock(&pers_lock); 8421 /* ensure module won't be unloaded */ 8422 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) { 8423 pr_warn("can't find md-cluster module or get its reference.\n"); 8424 spin_unlock(&pers_lock); 8425 return -ENOENT; 8426 } 8427 spin_unlock(&pers_lock); 8428 8429 ret = md_cluster_ops->join(mddev, nodes); 8430 if (!ret) 8431 mddev->safemode_delay = 0; 8432 return ret; 8433 } 8434 8435 void md_cluster_stop(struct mddev *mddev) 8436 { 8437 if (!md_cluster_ops) 8438 return; 8439 md_cluster_ops->leave(mddev); 8440 module_put(md_cluster_mod); 8441 } 8442 8443 static int is_mddev_idle(struct mddev *mddev, int init) 8444 { 8445 struct md_rdev *rdev; 8446 int idle; 8447 int curr_events; 8448 8449 idle = 1; 8450 rcu_read_lock(); 8451 rdev_for_each_rcu(rdev, mddev) { 8452 struct gendisk *disk = rdev->bdev->bd_disk; 8453 curr_events = (int)part_stat_read_accum(disk->part0, sectors) - 8454 atomic_read(&disk->sync_io); 8455 /* sync IO will cause sync_io to increase before the disk_stats 8456 * as sync_io is counted when a request starts, and 8457 * disk_stats is counted when it completes. 8458 * So resync activity will cause curr_events to be smaller than 8459 * when there was no such activity. 8460 * non-sync IO will cause disk_stat to increase without 8461 * increasing sync_io so curr_events will (eventually) 8462 * be larger than it was before. Once it becomes 8463 * substantially larger, the test below will cause 8464 * the array to appear non-idle, and resync will slow 8465 * down. 8466 * If there is a lot of outstanding resync activity when 8467 * we set last_event to curr_events, then all that activity 8468 * completing might cause the array to appear non-idle 8469 * and resync will be slowed down even though there might 8470 * not have been non-resync activity. This will only 8471 * happen once though. 'last_events' will soon reflect 8472 * the state where there is little or no outstanding 8473 * resync requests, and further resync activity will 8474 * always make curr_events less than last_events. 8475 * 8476 */ 8477 if (init || curr_events - rdev->last_events > 64) { 8478 rdev->last_events = curr_events; 8479 idle = 0; 8480 } 8481 } 8482 rcu_read_unlock(); 8483 return idle; 8484 } 8485 8486 void md_done_sync(struct mddev *mddev, int blocks, int ok) 8487 { 8488 /* another "blocks" (512byte) blocks have been synced */ 8489 atomic_sub(blocks, &mddev->recovery_active); 8490 wake_up(&mddev->recovery_wait); 8491 if (!ok) { 8492 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8493 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); 8494 md_wakeup_thread(mddev->thread); 8495 // stop recovery, signal do_sync .... 8496 } 8497 } 8498 EXPORT_SYMBOL(md_done_sync); 8499 8500 /* md_write_start(mddev, bi) 8501 * If we need to update some array metadata (e.g. 'active' flag 8502 * in superblock) before writing, schedule a superblock update 8503 * and wait for it to complete. 8504 * A return value of 'false' means that the write wasn't recorded 8505 * and cannot proceed as the array is being suspend. 8506 */ 8507 bool md_write_start(struct mddev *mddev, struct bio *bi) 8508 { 8509 int did_change = 0; 8510 8511 if (bio_data_dir(bi) != WRITE) 8512 return true; 8513 8514 BUG_ON(mddev->ro == 1); 8515 if (mddev->ro == 2) { 8516 /* need to switch to read/write */ 8517 mddev->ro = 0; 8518 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8519 md_wakeup_thread(mddev->thread); 8520 md_wakeup_thread(mddev->sync_thread); 8521 did_change = 1; 8522 } 8523 rcu_read_lock(); 8524 percpu_ref_get(&mddev->writes_pending); 8525 smp_mb(); /* Match smp_mb in set_in_sync() */ 8526 if (mddev->safemode == 1) 8527 mddev->safemode = 0; 8528 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */ 8529 if (mddev->in_sync || mddev->sync_checkers) { 8530 spin_lock(&mddev->lock); 8531 if (mddev->in_sync) { 8532 mddev->in_sync = 0; 8533 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8534 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 8535 md_wakeup_thread(mddev->thread); 8536 did_change = 1; 8537 } 8538 spin_unlock(&mddev->lock); 8539 } 8540 rcu_read_unlock(); 8541 if (did_change) 8542 sysfs_notify_dirent_safe(mddev->sysfs_state); 8543 if (!mddev->has_superblocks) 8544 return true; 8545 wait_event(mddev->sb_wait, 8546 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || 8547 mddev->suspended); 8548 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 8549 percpu_ref_put(&mddev->writes_pending); 8550 return false; 8551 } 8552 return true; 8553 } 8554 EXPORT_SYMBOL(md_write_start); 8555 8556 /* md_write_inc can only be called when md_write_start() has 8557 * already been called at least once of the current request. 8558 * It increments the counter and is useful when a single request 8559 * is split into several parts. Each part causes an increment and 8560 * so needs a matching md_write_end(). 8561 * Unlike md_write_start(), it is safe to call md_write_inc() inside 8562 * a spinlocked region. 8563 */ 8564 void md_write_inc(struct mddev *mddev, struct bio *bi) 8565 { 8566 if (bio_data_dir(bi) != WRITE) 8567 return; 8568 WARN_ON_ONCE(mddev->in_sync || mddev->ro); 8569 percpu_ref_get(&mddev->writes_pending); 8570 } 8571 EXPORT_SYMBOL(md_write_inc); 8572 8573 void md_write_end(struct mddev *mddev) 8574 { 8575 percpu_ref_put(&mddev->writes_pending); 8576 8577 if (mddev->safemode == 2) 8578 md_wakeup_thread(mddev->thread); 8579 else if (mddev->safemode_delay) 8580 /* The roundup() ensures this only performs locking once 8581 * every ->safemode_delay jiffies 8582 */ 8583 mod_timer(&mddev->safemode_timer, 8584 roundup(jiffies, mddev->safemode_delay) + 8585 mddev->safemode_delay); 8586 } 8587 8588 EXPORT_SYMBOL(md_write_end); 8589 8590 /* This is used by raid0 and raid10 */ 8591 void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, 8592 struct bio *bio, sector_t start, sector_t size) 8593 { 8594 struct bio *discard_bio = NULL; 8595 8596 if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO, 8597 &discard_bio) || !discard_bio) 8598 return; 8599 8600 bio_chain(discard_bio, bio); 8601 bio_clone_blkg_association(discard_bio, bio); 8602 if (mddev->gendisk) 8603 trace_block_bio_remap(discard_bio, 8604 disk_devt(mddev->gendisk), 8605 bio->bi_iter.bi_sector); 8606 submit_bio_noacct(discard_bio); 8607 } 8608 EXPORT_SYMBOL_GPL(md_submit_discard_bio); 8609 8610 int acct_bioset_init(struct mddev *mddev) 8611 { 8612 int err = 0; 8613 8614 if (!bioset_initialized(&mddev->io_acct_set)) 8615 err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE, 8616 offsetof(struct md_io_acct, bio_clone), 0); 8617 return err; 8618 } 8619 EXPORT_SYMBOL_GPL(acct_bioset_init); 8620 8621 void acct_bioset_exit(struct mddev *mddev) 8622 { 8623 bioset_exit(&mddev->io_acct_set); 8624 } 8625 EXPORT_SYMBOL_GPL(acct_bioset_exit); 8626 8627 static void md_end_io_acct(struct bio *bio) 8628 { 8629 struct md_io_acct *md_io_acct = bio->bi_private; 8630 struct bio *orig_bio = md_io_acct->orig_bio; 8631 8632 orig_bio->bi_status = bio->bi_status; 8633 8634 bio_end_io_acct(orig_bio, md_io_acct->start_time); 8635 bio_put(bio); 8636 bio_endio(orig_bio); 8637 } 8638 8639 /* 8640 * Used by personalities that don't already clone the bio and thus can't 8641 * easily add the timestamp to their extended bio structure. 8642 */ 8643 void md_account_bio(struct mddev *mddev, struct bio **bio) 8644 { 8645 struct block_device *bdev = (*bio)->bi_bdev; 8646 struct md_io_acct *md_io_acct; 8647 struct bio *clone; 8648 8649 if (!blk_queue_io_stat(bdev->bd_disk->queue)) 8650 return; 8651 8652 clone = bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_acct_set); 8653 md_io_acct = container_of(clone, struct md_io_acct, bio_clone); 8654 md_io_acct->orig_bio = *bio; 8655 md_io_acct->start_time = bio_start_io_acct(*bio); 8656 8657 clone->bi_end_io = md_end_io_acct; 8658 clone->bi_private = md_io_acct; 8659 *bio = clone; 8660 } 8661 EXPORT_SYMBOL_GPL(md_account_bio); 8662 8663 /* md_allow_write(mddev) 8664 * Calling this ensures that the array is marked 'active' so that writes 8665 * may proceed without blocking. It is important to call this before 8666 * attempting a GFP_KERNEL allocation while holding the mddev lock. 8667 * Must be called with mddev_lock held. 8668 */ 8669 void md_allow_write(struct mddev *mddev) 8670 { 8671 if (!mddev->pers) 8672 return; 8673 if (mddev->ro) 8674 return; 8675 if (!mddev->pers->sync_request) 8676 return; 8677 8678 spin_lock(&mddev->lock); 8679 if (mddev->in_sync) { 8680 mddev->in_sync = 0; 8681 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8682 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 8683 if (mddev->safemode_delay && 8684 mddev->safemode == 0) 8685 mddev->safemode = 1; 8686 spin_unlock(&mddev->lock); 8687 md_update_sb(mddev, 0); 8688 sysfs_notify_dirent_safe(mddev->sysfs_state); 8689 /* wait for the dirty state to be recorded in the metadata */ 8690 wait_event(mddev->sb_wait, 8691 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 8692 } else 8693 spin_unlock(&mddev->lock); 8694 } 8695 EXPORT_SYMBOL_GPL(md_allow_write); 8696 8697 #define SYNC_MARKS 10 8698 #define SYNC_MARK_STEP (3*HZ) 8699 #define UPDATE_FREQUENCY (5*60*HZ) 8700 void md_do_sync(struct md_thread *thread) 8701 { 8702 struct mddev *mddev = thread->mddev; 8703 struct mddev *mddev2; 8704 unsigned int currspeed = 0, window; 8705 sector_t max_sectors,j, io_sectors, recovery_done; 8706 unsigned long mark[SYNC_MARKS]; 8707 unsigned long update_time; 8708 sector_t mark_cnt[SYNC_MARKS]; 8709 int last_mark,m; 8710 sector_t last_check; 8711 int skipped = 0; 8712 struct md_rdev *rdev; 8713 char *desc, *action = NULL; 8714 struct blk_plug plug; 8715 int ret; 8716 8717 /* just incase thread restarts... */ 8718 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 8719 test_bit(MD_RECOVERY_WAIT, &mddev->recovery)) 8720 return; 8721 if (mddev->ro) {/* never try to sync a read-only array */ 8722 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8723 return; 8724 } 8725 8726 if (mddev_is_clustered(mddev)) { 8727 ret = md_cluster_ops->resync_start(mddev); 8728 if (ret) 8729 goto skip; 8730 8731 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags); 8732 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 8733 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || 8734 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 8735 && ((unsigned long long)mddev->curr_resync_completed 8736 < (unsigned long long)mddev->resync_max_sectors)) 8737 goto skip; 8738 } 8739 8740 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8741 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 8742 desc = "data-check"; 8743 action = "check"; 8744 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 8745 desc = "requested-resync"; 8746 action = "repair"; 8747 } else 8748 desc = "resync"; 8749 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 8750 desc = "reshape"; 8751 else 8752 desc = "recovery"; 8753 8754 mddev->last_sync_action = action ?: desc; 8755 8756 /* 8757 * Before starting a resync we must have set curr_resync to 8758 * 2, and then checked that every "conflicting" array has curr_resync 8759 * less than ours. When we find one that is the same or higher 8760 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 8761 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 8762 * This will mean we have to start checking from the beginning again. 8763 * 8764 */ 8765 8766 do { 8767 int mddev2_minor = -1; 8768 mddev->curr_resync = MD_RESYNC_DELAYED; 8769 8770 try_again: 8771 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8772 goto skip; 8773 spin_lock(&all_mddevs_lock); 8774 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) { 8775 if (test_bit(MD_DELETED, &mddev2->flags)) 8776 continue; 8777 if (mddev2 == mddev) 8778 continue; 8779 if (!mddev->parallel_resync 8780 && mddev2->curr_resync 8781 && match_mddev_units(mddev, mddev2)) { 8782 DEFINE_WAIT(wq); 8783 if (mddev < mddev2 && 8784 mddev->curr_resync == MD_RESYNC_DELAYED) { 8785 /* arbitrarily yield */ 8786 mddev->curr_resync = MD_RESYNC_YIELDED; 8787 wake_up(&resync_wait); 8788 } 8789 if (mddev > mddev2 && 8790 mddev->curr_resync == MD_RESYNC_YIELDED) 8791 /* no need to wait here, we can wait the next 8792 * time 'round when curr_resync == 2 8793 */ 8794 continue; 8795 /* We need to wait 'interruptible' so as not to 8796 * contribute to the load average, and not to 8797 * be caught by 'softlockup' 8798 */ 8799 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 8800 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8801 mddev2->curr_resync >= mddev->curr_resync) { 8802 if (mddev2_minor != mddev2->md_minor) { 8803 mddev2_minor = mddev2->md_minor; 8804 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n", 8805 desc, mdname(mddev), 8806 mdname(mddev2)); 8807 } 8808 spin_unlock(&all_mddevs_lock); 8809 8810 if (signal_pending(current)) 8811 flush_signals(current); 8812 schedule(); 8813 finish_wait(&resync_wait, &wq); 8814 goto try_again; 8815 } 8816 finish_wait(&resync_wait, &wq); 8817 } 8818 } 8819 spin_unlock(&all_mddevs_lock); 8820 } while (mddev->curr_resync < MD_RESYNC_DELAYED); 8821 8822 j = 0; 8823 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8824 /* resync follows the size requested by the personality, 8825 * which defaults to physical size, but can be virtual size 8826 */ 8827 max_sectors = mddev->resync_max_sectors; 8828 atomic64_set(&mddev->resync_mismatches, 0); 8829 /* we don't use the checkpoint if there's a bitmap */ 8830 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 8831 j = mddev->resync_min; 8832 else if (!mddev->bitmap) 8833 j = mddev->recovery_cp; 8834 8835 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 8836 max_sectors = mddev->resync_max_sectors; 8837 /* 8838 * If the original node aborts reshaping then we continue the 8839 * reshaping, so set j again to avoid restart reshape from the 8840 * first beginning 8841 */ 8842 if (mddev_is_clustered(mddev) && 8843 mddev->reshape_position != MaxSector) 8844 j = mddev->reshape_position; 8845 } else { 8846 /* recovery follows the physical size of devices */ 8847 max_sectors = mddev->dev_sectors; 8848 j = MaxSector; 8849 rcu_read_lock(); 8850 rdev_for_each_rcu(rdev, mddev) 8851 if (rdev->raid_disk >= 0 && 8852 !test_bit(Journal, &rdev->flags) && 8853 !test_bit(Faulty, &rdev->flags) && 8854 !test_bit(In_sync, &rdev->flags) && 8855 rdev->recovery_offset < j) 8856 j = rdev->recovery_offset; 8857 rcu_read_unlock(); 8858 8859 /* If there is a bitmap, we need to make sure all 8860 * writes that started before we added a spare 8861 * complete before we start doing a recovery. 8862 * Otherwise the write might complete and (via 8863 * bitmap_endwrite) set a bit in the bitmap after the 8864 * recovery has checked that bit and skipped that 8865 * region. 8866 */ 8867 if (mddev->bitmap) { 8868 mddev->pers->quiesce(mddev, 1); 8869 mddev->pers->quiesce(mddev, 0); 8870 } 8871 } 8872 8873 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); 8874 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); 8875 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n", 8876 speed_max(mddev), desc); 8877 8878 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 8879 8880 io_sectors = 0; 8881 for (m = 0; m < SYNC_MARKS; m++) { 8882 mark[m] = jiffies; 8883 mark_cnt[m] = io_sectors; 8884 } 8885 last_mark = 0; 8886 mddev->resync_mark = mark[last_mark]; 8887 mddev->resync_mark_cnt = mark_cnt[last_mark]; 8888 8889 /* 8890 * Tune reconstruction: 8891 */ 8892 window = 32 * (PAGE_SIZE / 512); 8893 pr_debug("md: using %dk window, over a total of %lluk.\n", 8894 window/2, (unsigned long long)max_sectors/2); 8895 8896 atomic_set(&mddev->recovery_active, 0); 8897 last_check = 0; 8898 8899 if (j>2) { 8900 pr_debug("md: resuming %s of %s from checkpoint.\n", 8901 desc, mdname(mddev)); 8902 mddev->curr_resync = j; 8903 } else 8904 mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */ 8905 mddev->curr_resync_completed = j; 8906 sysfs_notify_dirent_safe(mddev->sysfs_completed); 8907 md_new_event(); 8908 update_time = jiffies; 8909 8910 blk_start_plug(&plug); 8911 while (j < max_sectors) { 8912 sector_t sectors; 8913 8914 skipped = 0; 8915 8916 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8917 ((mddev->curr_resync > mddev->curr_resync_completed && 8918 (mddev->curr_resync - mddev->curr_resync_completed) 8919 > (max_sectors >> 4)) || 8920 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) || 8921 (j - mddev->curr_resync_completed)*2 8922 >= mddev->resync_max - mddev->curr_resync_completed || 8923 mddev->curr_resync_completed > mddev->resync_max 8924 )) { 8925 /* time to update curr_resync_completed */ 8926 wait_event(mddev->recovery_wait, 8927 atomic_read(&mddev->recovery_active) == 0); 8928 mddev->curr_resync_completed = j; 8929 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 8930 j > mddev->recovery_cp) 8931 mddev->recovery_cp = j; 8932 update_time = jiffies; 8933 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8934 sysfs_notify_dirent_safe(mddev->sysfs_completed); 8935 } 8936 8937 while (j >= mddev->resync_max && 8938 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8939 /* As this condition is controlled by user-space, 8940 * we can block indefinitely, so use '_interruptible' 8941 * to avoid triggering warnings. 8942 */ 8943 flush_signals(current); /* just in case */ 8944 wait_event_interruptible(mddev->recovery_wait, 8945 mddev->resync_max > j 8946 || test_bit(MD_RECOVERY_INTR, 8947 &mddev->recovery)); 8948 } 8949 8950 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8951 break; 8952 8953 sectors = mddev->pers->sync_request(mddev, j, &skipped); 8954 if (sectors == 0) { 8955 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8956 break; 8957 } 8958 8959 if (!skipped) { /* actual IO requested */ 8960 io_sectors += sectors; 8961 atomic_add(sectors, &mddev->recovery_active); 8962 } 8963 8964 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8965 break; 8966 8967 j += sectors; 8968 if (j > max_sectors) 8969 /* when skipping, extra large numbers can be returned. */ 8970 j = max_sectors; 8971 if (j > 2) 8972 mddev->curr_resync = j; 8973 mddev->curr_mark_cnt = io_sectors; 8974 if (last_check == 0) 8975 /* this is the earliest that rebuild will be 8976 * visible in /proc/mdstat 8977 */ 8978 md_new_event(); 8979 8980 if (last_check + window > io_sectors || j == max_sectors) 8981 continue; 8982 8983 last_check = io_sectors; 8984 repeat: 8985 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 8986 /* step marks */ 8987 int next = (last_mark+1) % SYNC_MARKS; 8988 8989 mddev->resync_mark = mark[next]; 8990 mddev->resync_mark_cnt = mark_cnt[next]; 8991 mark[next] = jiffies; 8992 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 8993 last_mark = next; 8994 } 8995 8996 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8997 break; 8998 8999 /* 9000 * this loop exits only if either when we are slower than 9001 * the 'hard' speed limit, or the system was IO-idle for 9002 * a jiffy. 9003 * the system might be non-idle CPU-wise, but we only care 9004 * about not overloading the IO subsystem. (things like an 9005 * e2fsck being done on the RAID array should execute fast) 9006 */ 9007 cond_resched(); 9008 9009 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); 9010 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 9011 /((jiffies-mddev->resync_mark)/HZ +1) +1; 9012 9013 if (currspeed > speed_min(mddev)) { 9014 if (currspeed > speed_max(mddev)) { 9015 msleep(500); 9016 goto repeat; 9017 } 9018 if (!is_mddev_idle(mddev, 0)) { 9019 /* 9020 * Give other IO more of a chance. 9021 * The faster the devices, the less we wait. 9022 */ 9023 wait_event(mddev->recovery_wait, 9024 !atomic_read(&mddev->recovery_active)); 9025 } 9026 } 9027 } 9028 pr_info("md: %s: %s %s.\n",mdname(mddev), desc, 9029 test_bit(MD_RECOVERY_INTR, &mddev->recovery) 9030 ? "interrupted" : "done"); 9031 /* 9032 * this also signals 'finished resyncing' to md_stop 9033 */ 9034 blk_finish_plug(&plug); 9035 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 9036 9037 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9038 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 9039 mddev->curr_resync >= MD_RESYNC_ACTIVE) { 9040 mddev->curr_resync_completed = mddev->curr_resync; 9041 sysfs_notify_dirent_safe(mddev->sysfs_completed); 9042 } 9043 mddev->pers->sync_request(mddev, max_sectors, &skipped); 9044 9045 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 9046 mddev->curr_resync >= MD_RESYNC_ACTIVE) { 9047 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 9048 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 9049 if (mddev->curr_resync >= mddev->recovery_cp) { 9050 pr_debug("md: checkpointing %s of %s.\n", 9051 desc, mdname(mddev)); 9052 if (test_bit(MD_RECOVERY_ERROR, 9053 &mddev->recovery)) 9054 mddev->recovery_cp = 9055 mddev->curr_resync_completed; 9056 else 9057 mddev->recovery_cp = 9058 mddev->curr_resync; 9059 } 9060 } else 9061 mddev->recovery_cp = MaxSector; 9062 } else { 9063 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 9064 mddev->curr_resync = MaxSector; 9065 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9066 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { 9067 rcu_read_lock(); 9068 rdev_for_each_rcu(rdev, mddev) 9069 if (rdev->raid_disk >= 0 && 9070 mddev->delta_disks >= 0 && 9071 !test_bit(Journal, &rdev->flags) && 9072 !test_bit(Faulty, &rdev->flags) && 9073 !test_bit(In_sync, &rdev->flags) && 9074 rdev->recovery_offset < mddev->curr_resync) 9075 rdev->recovery_offset = mddev->curr_resync; 9076 rcu_read_unlock(); 9077 } 9078 } 9079 } 9080 skip: 9081 /* set CHANGE_PENDING here since maybe another update is needed, 9082 * so other nodes are informed. It should be harmless for normal 9083 * raid */ 9084 set_mask_bits(&mddev->sb_flags, 0, 9085 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); 9086 9087 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9088 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 9089 mddev->delta_disks > 0 && 9090 mddev->pers->finish_reshape && 9091 mddev->pers->size && 9092 mddev->queue) { 9093 mddev_lock_nointr(mddev); 9094 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); 9095 mddev_unlock(mddev); 9096 if (!mddev_is_clustered(mddev)) 9097 set_capacity_and_notify(mddev->gendisk, 9098 mddev->array_sectors); 9099 } 9100 9101 spin_lock(&mddev->lock); 9102 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 9103 /* We completed so min/max setting can be forgotten if used. */ 9104 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 9105 mddev->resync_min = 0; 9106 mddev->resync_max = MaxSector; 9107 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 9108 mddev->resync_min = mddev->curr_resync_completed; 9109 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 9110 mddev->curr_resync = MD_RESYNC_NONE; 9111 spin_unlock(&mddev->lock); 9112 9113 wake_up(&resync_wait); 9114 md_wakeup_thread(mddev->thread); 9115 return; 9116 } 9117 EXPORT_SYMBOL_GPL(md_do_sync); 9118 9119 static int remove_and_add_spares(struct mddev *mddev, 9120 struct md_rdev *this) 9121 { 9122 struct md_rdev *rdev; 9123 int spares = 0; 9124 int removed = 0; 9125 bool remove_some = false; 9126 9127 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 9128 /* Mustn't remove devices when resync thread is running */ 9129 return 0; 9130 9131 rdev_for_each(rdev, mddev) { 9132 if ((this == NULL || rdev == this) && 9133 rdev->raid_disk >= 0 && 9134 !test_bit(Blocked, &rdev->flags) && 9135 test_bit(Faulty, &rdev->flags) && 9136 atomic_read(&rdev->nr_pending)==0) { 9137 /* Faulty non-Blocked devices with nr_pending == 0 9138 * never get nr_pending incremented, 9139 * never get Faulty cleared, and never get Blocked set. 9140 * So we can synchronize_rcu now rather than once per device 9141 */ 9142 remove_some = true; 9143 set_bit(RemoveSynchronized, &rdev->flags); 9144 } 9145 } 9146 9147 if (remove_some) 9148 synchronize_rcu(); 9149 rdev_for_each(rdev, mddev) { 9150 if ((this == NULL || rdev == this) && 9151 rdev->raid_disk >= 0 && 9152 !test_bit(Blocked, &rdev->flags) && 9153 ((test_bit(RemoveSynchronized, &rdev->flags) || 9154 (!test_bit(In_sync, &rdev->flags) && 9155 !test_bit(Journal, &rdev->flags))) && 9156 atomic_read(&rdev->nr_pending)==0)) { 9157 if (mddev->pers->hot_remove_disk( 9158 mddev, rdev) == 0) { 9159 sysfs_unlink_rdev(mddev, rdev); 9160 rdev->saved_raid_disk = rdev->raid_disk; 9161 rdev->raid_disk = -1; 9162 removed++; 9163 } 9164 } 9165 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags)) 9166 clear_bit(RemoveSynchronized, &rdev->flags); 9167 } 9168 9169 if (removed && mddev->kobj.sd) 9170 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 9171 9172 if (this && removed) 9173 goto no_add; 9174 9175 rdev_for_each(rdev, mddev) { 9176 if (this && this != rdev) 9177 continue; 9178 if (test_bit(Candidate, &rdev->flags)) 9179 continue; 9180 if (rdev->raid_disk >= 0 && 9181 !test_bit(In_sync, &rdev->flags) && 9182 !test_bit(Journal, &rdev->flags) && 9183 !test_bit(Faulty, &rdev->flags)) 9184 spares++; 9185 if (rdev->raid_disk >= 0) 9186 continue; 9187 if (test_bit(Faulty, &rdev->flags)) 9188 continue; 9189 if (!test_bit(Journal, &rdev->flags)) { 9190 if (mddev->ro && 9191 ! (rdev->saved_raid_disk >= 0 && 9192 !test_bit(Bitmap_sync, &rdev->flags))) 9193 continue; 9194 9195 rdev->recovery_offset = 0; 9196 } 9197 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) { 9198 /* failure here is OK */ 9199 sysfs_link_rdev(mddev, rdev); 9200 if (!test_bit(Journal, &rdev->flags)) 9201 spares++; 9202 md_new_event(); 9203 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 9204 } 9205 } 9206 no_add: 9207 if (removed) 9208 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 9209 return spares; 9210 } 9211 9212 static void md_start_sync(struct work_struct *ws) 9213 { 9214 struct mddev *mddev = container_of(ws, struct mddev, del_work); 9215 9216 mddev->sync_thread = md_register_thread(md_do_sync, 9217 mddev, 9218 "resync"); 9219 if (!mddev->sync_thread) { 9220 pr_warn("%s: could not start resync thread...\n", 9221 mdname(mddev)); 9222 /* leave the spares where they are, it shouldn't hurt */ 9223 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9224 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9225 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9226 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9227 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9228 wake_up(&resync_wait); 9229 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 9230 &mddev->recovery)) 9231 if (mddev->sysfs_action) 9232 sysfs_notify_dirent_safe(mddev->sysfs_action); 9233 } else 9234 md_wakeup_thread(mddev->sync_thread); 9235 sysfs_notify_dirent_safe(mddev->sysfs_action); 9236 md_new_event(); 9237 } 9238 9239 /* 9240 * This routine is regularly called by all per-raid-array threads to 9241 * deal with generic issues like resync and super-block update. 9242 * Raid personalities that don't have a thread (linear/raid0) do not 9243 * need this as they never do any recovery or update the superblock. 9244 * 9245 * It does not do any resync itself, but rather "forks" off other threads 9246 * to do that as needed. 9247 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 9248 * "->recovery" and create a thread at ->sync_thread. 9249 * When the thread finishes it sets MD_RECOVERY_DONE 9250 * and wakeups up this thread which will reap the thread and finish up. 9251 * This thread also removes any faulty devices (with nr_pending == 0). 9252 * 9253 * The overall approach is: 9254 * 1/ if the superblock needs updating, update it. 9255 * 2/ If a recovery thread is running, don't do anything else. 9256 * 3/ If recovery has finished, clean up, possibly marking spares active. 9257 * 4/ If there are any faulty devices, remove them. 9258 * 5/ If array is degraded, try to add spares devices 9259 * 6/ If array has spares or is not in-sync, start a resync thread. 9260 */ 9261 void md_check_recovery(struct mddev *mddev) 9262 { 9263 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { 9264 /* Write superblock - thread that called mddev_suspend() 9265 * holds reconfig_mutex for us. 9266 */ 9267 set_bit(MD_UPDATING_SB, &mddev->flags); 9268 smp_mb__after_atomic(); 9269 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) 9270 md_update_sb(mddev, 0); 9271 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); 9272 wake_up(&mddev->sb_wait); 9273 } 9274 9275 if (mddev->suspended) 9276 return; 9277 9278 if (mddev->bitmap) 9279 md_bitmap_daemon_work(mddev); 9280 9281 if (signal_pending(current)) { 9282 if (mddev->pers->sync_request && !mddev->external) { 9283 pr_debug("md: %s in immediate safe mode\n", 9284 mdname(mddev)); 9285 mddev->safemode = 2; 9286 } 9287 flush_signals(current); 9288 } 9289 9290 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 9291 return; 9292 if ( ! ( 9293 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) || 9294 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 9295 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 9296 (mddev->external == 0 && mddev->safemode == 1) || 9297 (mddev->safemode == 2 9298 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 9299 )) 9300 return; 9301 9302 if (mddev_trylock(mddev)) { 9303 int spares = 0; 9304 bool try_set_sync = mddev->safemode != 0; 9305 9306 if (!mddev->external && mddev->safemode == 1) 9307 mddev->safemode = 0; 9308 9309 if (mddev->ro) { 9310 struct md_rdev *rdev; 9311 if (!mddev->external && mddev->in_sync) 9312 /* 'Blocked' flag not needed as failed devices 9313 * will be recorded if array switched to read/write. 9314 * Leaving it set will prevent the device 9315 * from being removed. 9316 */ 9317 rdev_for_each(rdev, mddev) 9318 clear_bit(Blocked, &rdev->flags); 9319 /* On a read-only array we can: 9320 * - remove failed devices 9321 * - add already-in_sync devices if the array itself 9322 * is in-sync. 9323 * As we only add devices that are already in-sync, 9324 * we can activate the spares immediately. 9325 */ 9326 remove_and_add_spares(mddev, NULL); 9327 /* There is no thread, but we need to call 9328 * ->spare_active and clear saved_raid_disk 9329 */ 9330 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 9331 md_unregister_thread(&mddev->sync_thread); 9332 md_reap_sync_thread(mddev); 9333 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9334 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9335 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 9336 goto unlock; 9337 } 9338 9339 if (mddev_is_clustered(mddev)) { 9340 struct md_rdev *rdev, *tmp; 9341 /* kick the device if another node issued a 9342 * remove disk. 9343 */ 9344 rdev_for_each_safe(rdev, tmp, mddev) { 9345 if (test_and_clear_bit(ClusterRemove, &rdev->flags) && 9346 rdev->raid_disk < 0) 9347 md_kick_rdev_from_array(rdev); 9348 } 9349 } 9350 9351 if (try_set_sync && !mddev->external && !mddev->in_sync) { 9352 spin_lock(&mddev->lock); 9353 set_in_sync(mddev); 9354 spin_unlock(&mddev->lock); 9355 } 9356 9357 if (mddev->sb_flags) 9358 md_update_sb(mddev, 0); 9359 9360 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 9361 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 9362 /* resync/recovery still happening */ 9363 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9364 goto unlock; 9365 } 9366 if (mddev->sync_thread) { 9367 md_unregister_thread(&mddev->sync_thread); 9368 md_reap_sync_thread(mddev); 9369 goto unlock; 9370 } 9371 /* Set RUNNING before clearing NEEDED to avoid 9372 * any transients in the value of "sync_action". 9373 */ 9374 mddev->curr_resync_completed = 0; 9375 spin_lock(&mddev->lock); 9376 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9377 spin_unlock(&mddev->lock); 9378 /* Clear some bits that don't mean anything, but 9379 * might be left set 9380 */ 9381 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 9382 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 9383 9384 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 9385 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 9386 goto not_running; 9387 /* no recovery is running. 9388 * remove any failed drives, then 9389 * add spares if possible. 9390 * Spares are also removed and re-added, to allow 9391 * the personality to fail the re-add. 9392 */ 9393 9394 if (mddev->reshape_position != MaxSector) { 9395 if (mddev->pers->check_reshape == NULL || 9396 mddev->pers->check_reshape(mddev) != 0) 9397 /* Cannot proceed */ 9398 goto not_running; 9399 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9400 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9401 } else if ((spares = remove_and_add_spares(mddev, NULL))) { 9402 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9403 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9404 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9405 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9406 } else if (mddev->recovery_cp < MaxSector) { 9407 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9408 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9409 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 9410 /* nothing to be done ... */ 9411 goto not_running; 9412 9413 if (mddev->pers->sync_request) { 9414 if (spares) { 9415 /* We are adding a device or devices to an array 9416 * which has the bitmap stored on all devices. 9417 * So make sure all bitmap pages get written 9418 */ 9419 md_bitmap_write_all(mddev->bitmap); 9420 } 9421 INIT_WORK(&mddev->del_work, md_start_sync); 9422 queue_work(md_misc_wq, &mddev->del_work); 9423 goto unlock; 9424 } 9425 not_running: 9426 if (!mddev->sync_thread) { 9427 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9428 wake_up(&resync_wait); 9429 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 9430 &mddev->recovery)) 9431 if (mddev->sysfs_action) 9432 sysfs_notify_dirent_safe(mddev->sysfs_action); 9433 } 9434 unlock: 9435 wake_up(&mddev->sb_wait); 9436 mddev_unlock(mddev); 9437 } 9438 } 9439 EXPORT_SYMBOL(md_check_recovery); 9440 9441 void md_reap_sync_thread(struct mddev *mddev) 9442 { 9443 struct md_rdev *rdev; 9444 sector_t old_dev_sectors = mddev->dev_sectors; 9445 bool is_reshaped = false; 9446 9447 /* sync_thread should be unregistered, collect result */ 9448 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 9449 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 9450 mddev->degraded != mddev->raid_disks) { 9451 /* success...*/ 9452 /* activate any spares */ 9453 if (mddev->pers->spare_active(mddev)) { 9454 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 9455 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 9456 } 9457 } 9458 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9459 mddev->pers->finish_reshape) { 9460 mddev->pers->finish_reshape(mddev); 9461 if (mddev_is_clustered(mddev)) 9462 is_reshaped = true; 9463 } 9464 9465 /* If array is no-longer degraded, then any saved_raid_disk 9466 * information must be scrapped. 9467 */ 9468 if (!mddev->degraded) 9469 rdev_for_each(rdev, mddev) 9470 rdev->saved_raid_disk = -1; 9471 9472 md_update_sb(mddev, 1); 9473 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can 9474 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by 9475 * clustered raid */ 9476 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) 9477 md_cluster_ops->resync_finish(mddev); 9478 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9479 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 9480 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9481 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9482 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9483 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9484 /* 9485 * We call md_cluster_ops->update_size here because sync_size could 9486 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared, 9487 * so it is time to update size across cluster. 9488 */ 9489 if (mddev_is_clustered(mddev) && is_reshaped 9490 && !test_bit(MD_CLOSING, &mddev->flags)) 9491 md_cluster_ops->update_size(mddev, old_dev_sectors); 9492 wake_up(&resync_wait); 9493 /* flag recovery needed just to double check */ 9494 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9495 sysfs_notify_dirent_safe(mddev->sysfs_completed); 9496 sysfs_notify_dirent_safe(mddev->sysfs_action); 9497 md_new_event(); 9498 if (mddev->event_work.func) 9499 queue_work(md_misc_wq, &mddev->event_work); 9500 } 9501 EXPORT_SYMBOL(md_reap_sync_thread); 9502 9503 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) 9504 { 9505 sysfs_notify_dirent_safe(rdev->sysfs_state); 9506 wait_event_timeout(rdev->blocked_wait, 9507 !test_bit(Blocked, &rdev->flags) && 9508 !test_bit(BlockedBadBlocks, &rdev->flags), 9509 msecs_to_jiffies(5000)); 9510 rdev_dec_pending(rdev, mddev); 9511 } 9512 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 9513 9514 void md_finish_reshape(struct mddev *mddev) 9515 { 9516 /* called be personality module when reshape completes. */ 9517 struct md_rdev *rdev; 9518 9519 rdev_for_each(rdev, mddev) { 9520 if (rdev->data_offset > rdev->new_data_offset) 9521 rdev->sectors += rdev->data_offset - rdev->new_data_offset; 9522 else 9523 rdev->sectors -= rdev->new_data_offset - rdev->data_offset; 9524 rdev->data_offset = rdev->new_data_offset; 9525 } 9526 } 9527 EXPORT_SYMBOL(md_finish_reshape); 9528 9529 /* Bad block management */ 9530 9531 /* Returns 1 on success, 0 on failure */ 9532 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 9533 int is_new) 9534 { 9535 struct mddev *mddev = rdev->mddev; 9536 int rv; 9537 if (is_new) 9538 s += rdev->new_data_offset; 9539 else 9540 s += rdev->data_offset; 9541 rv = badblocks_set(&rdev->badblocks, s, sectors, 0); 9542 if (rv == 0) { 9543 /* Make sure they get written out promptly */ 9544 if (test_bit(ExternalBbl, &rdev->flags)) 9545 sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks); 9546 sysfs_notify_dirent_safe(rdev->sysfs_state); 9547 set_mask_bits(&mddev->sb_flags, 0, 9548 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING)); 9549 md_wakeup_thread(rdev->mddev->thread); 9550 return 1; 9551 } else 9552 return 0; 9553 } 9554 EXPORT_SYMBOL_GPL(rdev_set_badblocks); 9555 9556 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 9557 int is_new) 9558 { 9559 int rv; 9560 if (is_new) 9561 s += rdev->new_data_offset; 9562 else 9563 s += rdev->data_offset; 9564 rv = badblocks_clear(&rdev->badblocks, s, sectors); 9565 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags)) 9566 sysfs_notify_dirent_safe(rdev->sysfs_badblocks); 9567 return rv; 9568 } 9569 EXPORT_SYMBOL_GPL(rdev_clear_badblocks); 9570 9571 static int md_notify_reboot(struct notifier_block *this, 9572 unsigned long code, void *x) 9573 { 9574 struct mddev *mddev, *n; 9575 int need_delay = 0; 9576 9577 spin_lock(&all_mddevs_lock); 9578 list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { 9579 if (!mddev_get(mddev)) 9580 continue; 9581 spin_unlock(&all_mddevs_lock); 9582 if (mddev_trylock(mddev)) { 9583 if (mddev->pers) 9584 __md_stop_writes(mddev); 9585 if (mddev->persistent) 9586 mddev->safemode = 2; 9587 mddev_unlock(mddev); 9588 } 9589 need_delay = 1; 9590 mddev_put(mddev); 9591 spin_lock(&all_mddevs_lock); 9592 } 9593 spin_unlock(&all_mddevs_lock); 9594 9595 /* 9596 * certain more exotic SCSI devices are known to be 9597 * volatile wrt too early system reboots. While the 9598 * right place to handle this issue is the given 9599 * driver, we do want to have a safe RAID driver ... 9600 */ 9601 if (need_delay) 9602 msleep(1000); 9603 9604 return NOTIFY_DONE; 9605 } 9606 9607 static struct notifier_block md_notifier = { 9608 .notifier_call = md_notify_reboot, 9609 .next = NULL, 9610 .priority = INT_MAX, /* before any real devices */ 9611 }; 9612 9613 static void md_geninit(void) 9614 { 9615 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 9616 9617 proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops); 9618 } 9619 9620 static int __init md_init(void) 9621 { 9622 int ret = -ENOMEM; 9623 9624 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); 9625 if (!md_wq) 9626 goto err_wq; 9627 9628 md_misc_wq = alloc_workqueue("md_misc", 0, 0); 9629 if (!md_misc_wq) 9630 goto err_misc_wq; 9631 9632 md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0); 9633 if (!md_rdev_misc_wq) 9634 goto err_rdev_misc_wq; 9635 9636 ret = __register_blkdev(MD_MAJOR, "md", md_probe); 9637 if (ret < 0) 9638 goto err_md; 9639 9640 ret = __register_blkdev(0, "mdp", md_probe); 9641 if (ret < 0) 9642 goto err_mdp; 9643 mdp_major = ret; 9644 9645 register_reboot_notifier(&md_notifier); 9646 raid_table_header = register_sysctl_table(raid_root_table); 9647 9648 md_geninit(); 9649 return 0; 9650 9651 err_mdp: 9652 unregister_blkdev(MD_MAJOR, "md"); 9653 err_md: 9654 destroy_workqueue(md_rdev_misc_wq); 9655 err_rdev_misc_wq: 9656 destroy_workqueue(md_misc_wq); 9657 err_misc_wq: 9658 destroy_workqueue(md_wq); 9659 err_wq: 9660 return ret; 9661 } 9662 9663 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) 9664 { 9665 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 9666 struct md_rdev *rdev2, *tmp; 9667 int role, ret; 9668 9669 /* 9670 * If size is changed in another node then we need to 9671 * do resize as well. 9672 */ 9673 if (mddev->dev_sectors != le64_to_cpu(sb->size)) { 9674 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size)); 9675 if (ret) 9676 pr_info("md-cluster: resize failed\n"); 9677 else 9678 md_bitmap_update_sb(mddev->bitmap); 9679 } 9680 9681 /* Check for change of roles in the active devices */ 9682 rdev_for_each_safe(rdev2, tmp, mddev) { 9683 if (test_bit(Faulty, &rdev2->flags)) 9684 continue; 9685 9686 /* Check if the roles changed */ 9687 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]); 9688 9689 if (test_bit(Candidate, &rdev2->flags)) { 9690 if (role == MD_DISK_ROLE_FAULTY) { 9691 pr_info("md: Removing Candidate device %pg because add failed\n", 9692 rdev2->bdev); 9693 md_kick_rdev_from_array(rdev2); 9694 continue; 9695 } 9696 else 9697 clear_bit(Candidate, &rdev2->flags); 9698 } 9699 9700 if (role != rdev2->raid_disk) { 9701 /* 9702 * got activated except reshape is happening. 9703 */ 9704 if (rdev2->raid_disk == -1 && role != MD_DISK_ROLE_SPARE && 9705 !(le32_to_cpu(sb->feature_map) & 9706 MD_FEATURE_RESHAPE_ACTIVE)) { 9707 rdev2->saved_raid_disk = role; 9708 ret = remove_and_add_spares(mddev, rdev2); 9709 pr_info("Activated spare: %pg\n", 9710 rdev2->bdev); 9711 /* wakeup mddev->thread here, so array could 9712 * perform resync with the new activated disk */ 9713 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9714 md_wakeup_thread(mddev->thread); 9715 } 9716 /* device faulty 9717 * We just want to do the minimum to mark the disk 9718 * as faulty. The recovery is performed by the 9719 * one who initiated the error. 9720 */ 9721 if (role == MD_DISK_ROLE_FAULTY || 9722 role == MD_DISK_ROLE_JOURNAL) { 9723 md_error(mddev, rdev2); 9724 clear_bit(Blocked, &rdev2->flags); 9725 } 9726 } 9727 } 9728 9729 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) { 9730 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); 9731 if (ret) 9732 pr_warn("md: updating array disks failed. %d\n", ret); 9733 } 9734 9735 /* 9736 * Since mddev->delta_disks has already updated in update_raid_disks, 9737 * so it is time to check reshape. 9738 */ 9739 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && 9740 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 9741 /* 9742 * reshape is happening in the remote node, we need to 9743 * update reshape_position and call start_reshape. 9744 */ 9745 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 9746 if (mddev->pers->update_reshape_pos) 9747 mddev->pers->update_reshape_pos(mddev); 9748 if (mddev->pers->start_reshape) 9749 mddev->pers->start_reshape(mddev); 9750 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && 9751 mddev->reshape_position != MaxSector && 9752 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 9753 /* reshape is just done in another node. */ 9754 mddev->reshape_position = MaxSector; 9755 if (mddev->pers->update_reshape_pos) 9756 mddev->pers->update_reshape_pos(mddev); 9757 } 9758 9759 /* Finally set the event to be up to date */ 9760 mddev->events = le64_to_cpu(sb->events); 9761 } 9762 9763 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) 9764 { 9765 int err; 9766 struct page *swapout = rdev->sb_page; 9767 struct mdp_superblock_1 *sb; 9768 9769 /* Store the sb page of the rdev in the swapout temporary 9770 * variable in case we err in the future 9771 */ 9772 rdev->sb_page = NULL; 9773 err = alloc_disk_sb(rdev); 9774 if (err == 0) { 9775 ClearPageUptodate(rdev->sb_page); 9776 rdev->sb_loaded = 0; 9777 err = super_types[mddev->major_version]. 9778 load_super(rdev, NULL, mddev->minor_version); 9779 } 9780 if (err < 0) { 9781 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n", 9782 __func__, __LINE__, rdev->desc_nr, err); 9783 if (rdev->sb_page) 9784 put_page(rdev->sb_page); 9785 rdev->sb_page = swapout; 9786 rdev->sb_loaded = 1; 9787 return err; 9788 } 9789 9790 sb = page_address(rdev->sb_page); 9791 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET 9792 * is not set 9793 */ 9794 9795 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET)) 9796 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 9797 9798 /* The other node finished recovery, call spare_active to set 9799 * device In_sync and mddev->degraded 9800 */ 9801 if (rdev->recovery_offset == MaxSector && 9802 !test_bit(In_sync, &rdev->flags) && 9803 mddev->pers->spare_active(mddev)) 9804 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 9805 9806 put_page(swapout); 9807 return 0; 9808 } 9809 9810 void md_reload_sb(struct mddev *mddev, int nr) 9811 { 9812 struct md_rdev *rdev = NULL, *iter; 9813 int err; 9814 9815 /* Find the rdev */ 9816 rdev_for_each_rcu(iter, mddev) { 9817 if (iter->desc_nr == nr) { 9818 rdev = iter; 9819 break; 9820 } 9821 } 9822 9823 if (!rdev) { 9824 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr); 9825 return; 9826 } 9827 9828 err = read_rdev(mddev, rdev); 9829 if (err < 0) 9830 return; 9831 9832 check_sb_changes(mddev, rdev); 9833 9834 /* Read all rdev's to update recovery_offset */ 9835 rdev_for_each_rcu(rdev, mddev) { 9836 if (!test_bit(Faulty, &rdev->flags)) 9837 read_rdev(mddev, rdev); 9838 } 9839 } 9840 EXPORT_SYMBOL(md_reload_sb); 9841 9842 #ifndef MODULE 9843 9844 /* 9845 * Searches all registered partitions for autorun RAID arrays 9846 * at boot time. 9847 */ 9848 9849 static DEFINE_MUTEX(detected_devices_mutex); 9850 static LIST_HEAD(all_detected_devices); 9851 struct detected_devices_node { 9852 struct list_head list; 9853 dev_t dev; 9854 }; 9855 9856 void md_autodetect_dev(dev_t dev) 9857 { 9858 struct detected_devices_node *node_detected_dev; 9859 9860 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 9861 if (node_detected_dev) { 9862 node_detected_dev->dev = dev; 9863 mutex_lock(&detected_devices_mutex); 9864 list_add_tail(&node_detected_dev->list, &all_detected_devices); 9865 mutex_unlock(&detected_devices_mutex); 9866 } 9867 } 9868 9869 void md_autostart_arrays(int part) 9870 { 9871 struct md_rdev *rdev; 9872 struct detected_devices_node *node_detected_dev; 9873 dev_t dev; 9874 int i_scanned, i_passed; 9875 9876 i_scanned = 0; 9877 i_passed = 0; 9878 9879 pr_info("md: Autodetecting RAID arrays.\n"); 9880 9881 mutex_lock(&detected_devices_mutex); 9882 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 9883 i_scanned++; 9884 node_detected_dev = list_entry(all_detected_devices.next, 9885 struct detected_devices_node, list); 9886 list_del(&node_detected_dev->list); 9887 dev = node_detected_dev->dev; 9888 kfree(node_detected_dev); 9889 mutex_unlock(&detected_devices_mutex); 9890 rdev = md_import_device(dev,0, 90); 9891 mutex_lock(&detected_devices_mutex); 9892 if (IS_ERR(rdev)) 9893 continue; 9894 9895 if (test_bit(Faulty, &rdev->flags)) 9896 continue; 9897 9898 set_bit(AutoDetected, &rdev->flags); 9899 list_add(&rdev->same_set, &pending_raid_disks); 9900 i_passed++; 9901 } 9902 mutex_unlock(&detected_devices_mutex); 9903 9904 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed); 9905 9906 autorun_devices(part); 9907 } 9908 9909 #endif /* !MODULE */ 9910 9911 static __exit void md_exit(void) 9912 { 9913 struct mddev *mddev, *n; 9914 int delay = 1; 9915 9916 unregister_blkdev(MD_MAJOR,"md"); 9917 unregister_blkdev(mdp_major, "mdp"); 9918 unregister_reboot_notifier(&md_notifier); 9919 unregister_sysctl_table(raid_table_header); 9920 9921 /* We cannot unload the modules while some process is 9922 * waiting for us in select() or poll() - wake them up 9923 */ 9924 md_unloading = 1; 9925 while (waitqueue_active(&md_event_waiters)) { 9926 /* not safe to leave yet */ 9927 wake_up(&md_event_waiters); 9928 msleep(delay); 9929 delay += delay; 9930 } 9931 remove_proc_entry("mdstat", NULL); 9932 9933 spin_lock(&all_mddevs_lock); 9934 list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { 9935 if (!mddev_get(mddev)) 9936 continue; 9937 spin_unlock(&all_mddevs_lock); 9938 export_array(mddev); 9939 mddev->ctime = 0; 9940 mddev->hold_active = 0; 9941 /* 9942 * As the mddev is now fully clear, mddev_put will schedule 9943 * the mddev for destruction by a workqueue, and the 9944 * destroy_workqueue() below will wait for that to complete. 9945 */ 9946 mddev_put(mddev); 9947 spin_lock(&all_mddevs_lock); 9948 } 9949 spin_unlock(&all_mddevs_lock); 9950 9951 destroy_workqueue(md_rdev_misc_wq); 9952 destroy_workqueue(md_misc_wq); 9953 destroy_workqueue(md_wq); 9954 } 9955 9956 subsys_initcall(md_init); 9957 module_exit(md_exit) 9958 9959 static int get_ro(char *buffer, const struct kernel_param *kp) 9960 { 9961 return sprintf(buffer, "%d\n", start_readonly); 9962 } 9963 static int set_ro(const char *val, const struct kernel_param *kp) 9964 { 9965 return kstrtouint(val, 10, (unsigned int *)&start_readonly); 9966 } 9967 9968 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 9969 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 9970 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 9971 module_param(create_on_open, bool, S_IRUSR|S_IWUSR); 9972 9973 MODULE_LICENSE("GPL"); 9974 MODULE_DESCRIPTION("MD RAID framework"); 9975 MODULE_ALIAS("md"); 9976 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 9977