1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 md.c : Multiple Devices driver for Linux 4 Copyright (C) 1998, 1999, 2000 Ingo Molnar 5 6 completely rewritten, based on the MD driver code from Marc Zyngier 7 8 Changes: 9 10 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 11 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 12 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 13 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 14 - kmod support by: Cyrus Durgin 15 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 16 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 17 18 - lots of fixes and improvements to the RAID1/RAID5 and generic 19 RAID code (such as request based resynchronization): 20 21 Neil Brown <neilb@cse.unsw.edu.au>. 22 23 - persistent bitmap code 24 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 25 26 27 Errors, Warnings, etc. 28 Please use: 29 pr_crit() for error conditions that risk data loss 30 pr_err() for error conditions that are unexpected, like an IO error 31 or internal inconsistency 32 pr_warn() for error conditions that could have been predicated, like 33 adding a device to an array when it has incompatible metadata 34 pr_info() for every interesting, very rare events, like an array starting 35 or stopping, or resync starting or stopping 36 pr_debug() for everything else. 37 38 */ 39 40 #include <linux/sched/mm.h> 41 #include <linux/sched/signal.h> 42 #include <linux/kthread.h> 43 #include <linux/blkdev.h> 44 #include <linux/badblocks.h> 45 #include <linux/sysctl.h> 46 #include <linux/seq_file.h> 47 #include <linux/fs.h> 48 #include <linux/poll.h> 49 #include <linux/ctype.h> 50 #include <linux/string.h> 51 #include <linux/hdreg.h> 52 #include <linux/proc_fs.h> 53 #include <linux/random.h> 54 #include <linux/module.h> 55 #include <linux/reboot.h> 56 #include <linux/file.h> 57 #include <linux/compat.h> 58 #include <linux/delay.h> 59 #include <linux/raid/md_p.h> 60 #include <linux/raid/md_u.h> 61 #include <linux/raid/detect.h> 62 #include <linux/slab.h> 63 #include <linux/percpu-refcount.h> 64 #include <linux/part_stat.h> 65 66 #include <trace/events/block.h> 67 #include "md.h" 68 #include "md-bitmap.h" 69 #include "md-cluster.h" 70 71 #ifndef MODULE 72 static void autostart_arrays(int part); 73 #endif 74 75 /* pers_list is a list of registered personalities protected 76 * by pers_lock. 77 * pers_lock does extra service to protect accesses to 78 * mddev->thread when the mutex cannot be held. 79 */ 80 static LIST_HEAD(pers_list); 81 static DEFINE_SPINLOCK(pers_lock); 82 83 static struct kobj_type md_ktype; 84 85 struct md_cluster_operations *md_cluster_ops; 86 EXPORT_SYMBOL(md_cluster_ops); 87 static struct module *md_cluster_mod; 88 89 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 90 static struct workqueue_struct *md_wq; 91 static struct workqueue_struct *md_misc_wq; 92 static struct workqueue_struct *md_rdev_misc_wq; 93 94 static int remove_and_add_spares(struct mddev *mddev, 95 struct md_rdev *this); 96 static void mddev_detach(struct mddev *mddev); 97 98 /* 99 * Default number of read corrections we'll attempt on an rdev 100 * before ejecting it from the array. We divide the read error 101 * count by 2 for every hour elapsed between read errors. 102 */ 103 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 104 /* 105 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 106 * is 1000 KB/sec, so the extra system load does not show up that much. 107 * Increase it if you want to have more _guaranteed_ speed. Note that 108 * the RAID driver will use the maximum available bandwidth if the IO 109 * subsystem is idle. There is also an 'absolute maximum' reconstruction 110 * speed limit - in case reconstruction slows down your system despite 111 * idle IO detection. 112 * 113 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 114 * or /sys/block/mdX/md/sync_speed_{min,max} 115 */ 116 117 static int sysctl_speed_limit_min = 1000; 118 static int sysctl_speed_limit_max = 200000; 119 static inline int speed_min(struct mddev *mddev) 120 { 121 return mddev->sync_speed_min ? 122 mddev->sync_speed_min : sysctl_speed_limit_min; 123 } 124 125 static inline int speed_max(struct mddev *mddev) 126 { 127 return mddev->sync_speed_max ? 128 mddev->sync_speed_max : sysctl_speed_limit_max; 129 } 130 131 static void rdev_uninit_serial(struct md_rdev *rdev) 132 { 133 if (!test_and_clear_bit(CollisionCheck, &rdev->flags)) 134 return; 135 136 kvfree(rdev->serial); 137 rdev->serial = NULL; 138 } 139 140 static void rdevs_uninit_serial(struct mddev *mddev) 141 { 142 struct md_rdev *rdev; 143 144 rdev_for_each(rdev, mddev) 145 rdev_uninit_serial(rdev); 146 } 147 148 static int rdev_init_serial(struct md_rdev *rdev) 149 { 150 /* serial_nums equals with BARRIER_BUCKETS_NR */ 151 int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t)))); 152 struct serial_in_rdev *serial = NULL; 153 154 if (test_bit(CollisionCheck, &rdev->flags)) 155 return 0; 156 157 serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums, 158 GFP_KERNEL); 159 if (!serial) 160 return -ENOMEM; 161 162 for (i = 0; i < serial_nums; i++) { 163 struct serial_in_rdev *serial_tmp = &serial[i]; 164 165 spin_lock_init(&serial_tmp->serial_lock); 166 serial_tmp->serial_rb = RB_ROOT_CACHED; 167 init_waitqueue_head(&serial_tmp->serial_io_wait); 168 } 169 170 rdev->serial = serial; 171 set_bit(CollisionCheck, &rdev->flags); 172 173 return 0; 174 } 175 176 static int rdevs_init_serial(struct mddev *mddev) 177 { 178 struct md_rdev *rdev; 179 int ret = 0; 180 181 rdev_for_each(rdev, mddev) { 182 ret = rdev_init_serial(rdev); 183 if (ret) 184 break; 185 } 186 187 /* Free all resources if pool is not existed */ 188 if (ret && !mddev->serial_info_pool) 189 rdevs_uninit_serial(mddev); 190 191 return ret; 192 } 193 194 /* 195 * rdev needs to enable serial stuffs if it meets the conditions: 196 * 1. it is multi-queue device flaged with writemostly. 197 * 2. the write-behind mode is enabled. 198 */ 199 static int rdev_need_serial(struct md_rdev *rdev) 200 { 201 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && 202 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 && 203 test_bit(WriteMostly, &rdev->flags)); 204 } 205 206 /* 207 * Init resource for rdev(s), then create serial_info_pool if: 208 * 1. rdev is the first device which return true from rdev_enable_serial. 209 * 2. rdev is NULL, means we want to enable serialization for all rdevs. 210 */ 211 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, 212 bool is_suspend) 213 { 214 int ret = 0; 215 216 if (rdev && !rdev_need_serial(rdev) && 217 !test_bit(CollisionCheck, &rdev->flags)) 218 return; 219 220 if (!is_suspend) 221 mddev_suspend(mddev); 222 223 if (!rdev) 224 ret = rdevs_init_serial(mddev); 225 else 226 ret = rdev_init_serial(rdev); 227 if (ret) 228 goto abort; 229 230 if (mddev->serial_info_pool == NULL) { 231 /* 232 * already in memalloc noio context by 233 * mddev_suspend() 234 */ 235 mddev->serial_info_pool = 236 mempool_create_kmalloc_pool(NR_SERIAL_INFOS, 237 sizeof(struct serial_info)); 238 if (!mddev->serial_info_pool) { 239 rdevs_uninit_serial(mddev); 240 pr_err("can't alloc memory pool for serialization\n"); 241 } 242 } 243 244 abort: 245 if (!is_suspend) 246 mddev_resume(mddev); 247 } 248 249 /* 250 * Free resource from rdev(s), and destroy serial_info_pool under conditions: 251 * 1. rdev is the last device flaged with CollisionCheck. 252 * 2. when bitmap is destroyed while policy is not enabled. 253 * 3. for disable policy, the pool is destroyed only when no rdev needs it. 254 */ 255 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, 256 bool is_suspend) 257 { 258 if (rdev && !test_bit(CollisionCheck, &rdev->flags)) 259 return; 260 261 if (mddev->serial_info_pool) { 262 struct md_rdev *temp; 263 int num = 0; /* used to track if other rdevs need the pool */ 264 265 if (!is_suspend) 266 mddev_suspend(mddev); 267 rdev_for_each(temp, mddev) { 268 if (!rdev) { 269 if (!mddev->serialize_policy || 270 !rdev_need_serial(temp)) 271 rdev_uninit_serial(temp); 272 else 273 num++; 274 } else if (temp != rdev && 275 test_bit(CollisionCheck, &temp->flags)) 276 num++; 277 } 278 279 if (rdev) 280 rdev_uninit_serial(rdev); 281 282 if (num) 283 pr_info("The mempool could be used by other devices\n"); 284 else { 285 mempool_destroy(mddev->serial_info_pool); 286 mddev->serial_info_pool = NULL; 287 } 288 if (!is_suspend) 289 mddev_resume(mddev); 290 } 291 } 292 293 static struct ctl_table_header *raid_table_header; 294 295 static struct ctl_table raid_table[] = { 296 { 297 .procname = "speed_limit_min", 298 .data = &sysctl_speed_limit_min, 299 .maxlen = sizeof(int), 300 .mode = S_IRUGO|S_IWUSR, 301 .proc_handler = proc_dointvec, 302 }, 303 { 304 .procname = "speed_limit_max", 305 .data = &sysctl_speed_limit_max, 306 .maxlen = sizeof(int), 307 .mode = S_IRUGO|S_IWUSR, 308 .proc_handler = proc_dointvec, 309 }, 310 { } 311 }; 312 313 static struct ctl_table raid_dir_table[] = { 314 { 315 .procname = "raid", 316 .maxlen = 0, 317 .mode = S_IRUGO|S_IXUGO, 318 .child = raid_table, 319 }, 320 { } 321 }; 322 323 static struct ctl_table raid_root_table[] = { 324 { 325 .procname = "dev", 326 .maxlen = 0, 327 .mode = 0555, 328 .child = raid_dir_table, 329 }, 330 { } 331 }; 332 333 static const struct block_device_operations md_fops; 334 335 static int start_readonly; 336 337 /* 338 * The original mechanism for creating an md device is to create 339 * a device node in /dev and to open it. This causes races with device-close. 340 * The preferred method is to write to the "new_array" module parameter. 341 * This can avoid races. 342 * Setting create_on_open to false disables the original mechanism 343 * so all the races disappear. 344 */ 345 static bool create_on_open = true; 346 347 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 348 struct mddev *mddev) 349 { 350 if (!mddev || !bioset_initialized(&mddev->bio_set)) 351 return bio_alloc(gfp_mask, nr_iovecs); 352 353 return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); 354 } 355 EXPORT_SYMBOL_GPL(bio_alloc_mddev); 356 357 static struct bio *md_bio_alloc_sync(struct mddev *mddev) 358 { 359 if (!mddev || !bioset_initialized(&mddev->sync_set)) 360 return bio_alloc(GFP_NOIO, 1); 361 362 return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set); 363 } 364 365 /* 366 * We have a system wide 'event count' that is incremented 367 * on any 'interesting' event, and readers of /proc/mdstat 368 * can use 'poll' or 'select' to find out when the event 369 * count increases. 370 * 371 * Events are: 372 * start array, stop array, error, add device, remove device, 373 * start build, activate spare 374 */ 375 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 376 static atomic_t md_event_count; 377 void md_new_event(struct mddev *mddev) 378 { 379 atomic_inc(&md_event_count); 380 wake_up(&md_event_waiters); 381 } 382 EXPORT_SYMBOL_GPL(md_new_event); 383 384 /* 385 * Enables to iterate over all existing md arrays 386 * all_mddevs_lock protects this list. 387 */ 388 static LIST_HEAD(all_mddevs); 389 static DEFINE_SPINLOCK(all_mddevs_lock); 390 391 /* 392 * iterates through all used mddevs in the system. 393 * We take care to grab the all_mddevs_lock whenever navigating 394 * the list, and to always hold a refcount when unlocked. 395 * Any code which breaks out of this loop while own 396 * a reference to the current mddev and must mddev_put it. 397 */ 398 #define for_each_mddev(_mddev,_tmp) \ 399 \ 400 for (({ spin_lock(&all_mddevs_lock); \ 401 _tmp = all_mddevs.next; \ 402 _mddev = NULL;}); \ 403 ({ if (_tmp != &all_mddevs) \ 404 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\ 405 spin_unlock(&all_mddevs_lock); \ 406 if (_mddev) mddev_put(_mddev); \ 407 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \ 408 _tmp != &all_mddevs;}); \ 409 ({ spin_lock(&all_mddevs_lock); \ 410 _tmp = _tmp->next;}) \ 411 ) 412 413 /* Rather than calling directly into the personality make_request function, 414 * IO requests come here first so that we can check if the device is 415 * being suspended pending a reconfiguration. 416 * We hold a refcount over the call to ->make_request. By the time that 417 * call has finished, the bio has been linked into some internal structure 418 * and so is visible to ->quiesce(), so we don't need the refcount any more. 419 */ 420 static bool is_suspended(struct mddev *mddev, struct bio *bio) 421 { 422 if (mddev->suspended) 423 return true; 424 if (bio_data_dir(bio) != WRITE) 425 return false; 426 if (mddev->suspend_lo >= mddev->suspend_hi) 427 return false; 428 if (bio->bi_iter.bi_sector >= mddev->suspend_hi) 429 return false; 430 if (bio_end_sector(bio) < mddev->suspend_lo) 431 return false; 432 return true; 433 } 434 435 void md_handle_request(struct mddev *mddev, struct bio *bio) 436 { 437 check_suspended: 438 rcu_read_lock(); 439 if (is_suspended(mddev, bio)) { 440 DEFINE_WAIT(__wait); 441 for (;;) { 442 prepare_to_wait(&mddev->sb_wait, &__wait, 443 TASK_UNINTERRUPTIBLE); 444 if (!is_suspended(mddev, bio)) 445 break; 446 rcu_read_unlock(); 447 schedule(); 448 rcu_read_lock(); 449 } 450 finish_wait(&mddev->sb_wait, &__wait); 451 } 452 atomic_inc(&mddev->active_io); 453 rcu_read_unlock(); 454 455 if (!mddev->pers->make_request(mddev, bio)) { 456 atomic_dec(&mddev->active_io); 457 wake_up(&mddev->sb_wait); 458 goto check_suspended; 459 } 460 461 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 462 wake_up(&mddev->sb_wait); 463 } 464 EXPORT_SYMBOL(md_handle_request); 465 466 static blk_qc_t md_submit_bio(struct bio *bio) 467 { 468 const int rw = bio_data_dir(bio); 469 const int sgrp = op_stat_group(bio_op(bio)); 470 struct mddev *mddev = bio->bi_disk->private_data; 471 unsigned int sectors; 472 473 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { 474 bio_io_error(bio); 475 return BLK_QC_T_NONE; 476 } 477 478 blk_queue_split(&bio); 479 480 if (mddev == NULL || mddev->pers == NULL) { 481 bio_io_error(bio); 482 return BLK_QC_T_NONE; 483 } 484 if (mddev->ro == 1 && unlikely(rw == WRITE)) { 485 if (bio_sectors(bio) != 0) 486 bio->bi_status = BLK_STS_IOERR; 487 bio_endio(bio); 488 return BLK_QC_T_NONE; 489 } 490 491 /* 492 * save the sectors now since our bio can 493 * go away inside make_request 494 */ 495 sectors = bio_sectors(bio); 496 /* bio could be mergeable after passing to underlayer */ 497 bio->bi_opf &= ~REQ_NOMERGE; 498 499 md_handle_request(mddev, bio); 500 501 part_stat_lock(); 502 part_stat_inc(&mddev->gendisk->part0, ios[sgrp]); 503 part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors); 504 part_stat_unlock(); 505 506 return BLK_QC_T_NONE; 507 } 508 509 /* mddev_suspend makes sure no new requests are submitted 510 * to the device, and that any requests that have been submitted 511 * are completely handled. 512 * Once mddev_detach() is called and completes, the module will be 513 * completely unused. 514 */ 515 void mddev_suspend(struct mddev *mddev) 516 { 517 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); 518 lockdep_assert_held(&mddev->reconfig_mutex); 519 if (mddev->suspended++) 520 return; 521 synchronize_rcu(); 522 wake_up(&mddev->sb_wait); 523 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags); 524 smp_mb__after_atomic(); 525 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 526 mddev->pers->quiesce(mddev, 1); 527 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); 528 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags)); 529 530 del_timer_sync(&mddev->safemode_timer); 531 /* restrict memory reclaim I/O during raid array is suspend */ 532 mddev->noio_flag = memalloc_noio_save(); 533 } 534 EXPORT_SYMBOL_GPL(mddev_suspend); 535 536 void mddev_resume(struct mddev *mddev) 537 { 538 /* entred the memalloc scope from mddev_suspend() */ 539 memalloc_noio_restore(mddev->noio_flag); 540 lockdep_assert_held(&mddev->reconfig_mutex); 541 if (--mddev->suspended) 542 return; 543 wake_up(&mddev->sb_wait); 544 mddev->pers->quiesce(mddev, 0); 545 546 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 547 md_wakeup_thread(mddev->thread); 548 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 549 } 550 EXPORT_SYMBOL_GPL(mddev_resume); 551 552 /* 553 * Generic flush handling for md 554 */ 555 556 static void md_end_flush(struct bio *bio) 557 { 558 struct md_rdev *rdev = bio->bi_private; 559 struct mddev *mddev = rdev->mddev; 560 561 rdev_dec_pending(rdev, mddev); 562 563 if (atomic_dec_and_test(&mddev->flush_pending)) { 564 /* The pre-request flush has finished */ 565 queue_work(md_wq, &mddev->flush_work); 566 } 567 bio_put(bio); 568 } 569 570 static void md_submit_flush_data(struct work_struct *ws); 571 572 static void submit_flushes(struct work_struct *ws) 573 { 574 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 575 struct md_rdev *rdev; 576 577 mddev->start_flush = ktime_get_boottime(); 578 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 579 atomic_set(&mddev->flush_pending, 1); 580 rcu_read_lock(); 581 rdev_for_each_rcu(rdev, mddev) 582 if (rdev->raid_disk >= 0 && 583 !test_bit(Faulty, &rdev->flags)) { 584 /* Take two references, one is dropped 585 * when request finishes, one after 586 * we reclaim rcu_read_lock 587 */ 588 struct bio *bi; 589 atomic_inc(&rdev->nr_pending); 590 atomic_inc(&rdev->nr_pending); 591 rcu_read_unlock(); 592 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); 593 bi->bi_end_io = md_end_flush; 594 bi->bi_private = rdev; 595 bio_set_dev(bi, rdev->bdev); 596 bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 597 atomic_inc(&mddev->flush_pending); 598 submit_bio(bi); 599 rcu_read_lock(); 600 rdev_dec_pending(rdev, mddev); 601 } 602 rcu_read_unlock(); 603 if (atomic_dec_and_test(&mddev->flush_pending)) 604 queue_work(md_wq, &mddev->flush_work); 605 } 606 607 static void md_submit_flush_data(struct work_struct *ws) 608 { 609 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 610 struct bio *bio = mddev->flush_bio; 611 612 /* 613 * must reset flush_bio before calling into md_handle_request to avoid a 614 * deadlock, because other bios passed md_handle_request suspend check 615 * could wait for this and below md_handle_request could wait for those 616 * bios because of suspend check 617 */ 618 mddev->last_flush = mddev->start_flush; 619 mddev->flush_bio = NULL; 620 wake_up(&mddev->sb_wait); 621 622 if (bio->bi_iter.bi_size == 0) { 623 /* an empty barrier - all done */ 624 bio_endio(bio); 625 } else { 626 bio->bi_opf &= ~REQ_PREFLUSH; 627 md_handle_request(mddev, bio); 628 } 629 } 630 631 /* 632 * Manages consolidation of flushes and submitting any flushes needed for 633 * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is 634 * being finished in another context. Returns false if the flushing is 635 * complete but still needs the I/O portion of the bio to be processed. 636 */ 637 bool md_flush_request(struct mddev *mddev, struct bio *bio) 638 { 639 ktime_t start = ktime_get_boottime(); 640 spin_lock_irq(&mddev->lock); 641 wait_event_lock_irq(mddev->sb_wait, 642 !mddev->flush_bio || 643 ktime_after(mddev->last_flush, start), 644 mddev->lock); 645 if (!ktime_after(mddev->last_flush, start)) { 646 WARN_ON(mddev->flush_bio); 647 mddev->flush_bio = bio; 648 bio = NULL; 649 } 650 spin_unlock_irq(&mddev->lock); 651 652 if (!bio) { 653 INIT_WORK(&mddev->flush_work, submit_flushes); 654 queue_work(md_wq, &mddev->flush_work); 655 } else { 656 /* flush was performed for some other bio while we waited. */ 657 if (bio->bi_iter.bi_size == 0) 658 /* an empty barrier - all done */ 659 bio_endio(bio); 660 else { 661 bio->bi_opf &= ~REQ_PREFLUSH; 662 return false; 663 } 664 } 665 return true; 666 } 667 EXPORT_SYMBOL(md_flush_request); 668 669 static inline struct mddev *mddev_get(struct mddev *mddev) 670 { 671 atomic_inc(&mddev->active); 672 return mddev; 673 } 674 675 static void mddev_delayed_delete(struct work_struct *ws); 676 677 static void mddev_put(struct mddev *mddev) 678 { 679 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 680 return; 681 if (!mddev->raid_disks && list_empty(&mddev->disks) && 682 mddev->ctime == 0 && !mddev->hold_active) { 683 /* Array is not configured at all, and not held active, 684 * so destroy it */ 685 list_del_init(&mddev->all_mddevs); 686 687 /* 688 * Call queue_work inside the spinlock so that 689 * flush_workqueue() after mddev_find will succeed in waiting 690 * for the work to be done. 691 */ 692 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 693 queue_work(md_misc_wq, &mddev->del_work); 694 } 695 spin_unlock(&all_mddevs_lock); 696 } 697 698 static void md_safemode_timeout(struct timer_list *t); 699 700 void mddev_init(struct mddev *mddev) 701 { 702 kobject_init(&mddev->kobj, &md_ktype); 703 mutex_init(&mddev->open_mutex); 704 mutex_init(&mddev->reconfig_mutex); 705 mutex_init(&mddev->bitmap_info.mutex); 706 INIT_LIST_HEAD(&mddev->disks); 707 INIT_LIST_HEAD(&mddev->all_mddevs); 708 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); 709 atomic_set(&mddev->active, 1); 710 atomic_set(&mddev->openers, 0); 711 atomic_set(&mddev->active_io, 0); 712 spin_lock_init(&mddev->lock); 713 atomic_set(&mddev->flush_pending, 0); 714 init_waitqueue_head(&mddev->sb_wait); 715 init_waitqueue_head(&mddev->recovery_wait); 716 mddev->reshape_position = MaxSector; 717 mddev->reshape_backwards = 0; 718 mddev->last_sync_action = "none"; 719 mddev->resync_min = 0; 720 mddev->resync_max = MaxSector; 721 mddev->level = LEVEL_NONE; 722 } 723 EXPORT_SYMBOL_GPL(mddev_init); 724 725 static struct mddev *mddev_find(dev_t unit) 726 { 727 struct mddev *mddev, *new = NULL; 728 729 if (unit && MAJOR(unit) != MD_MAJOR) 730 unit &= ~((1<<MdpMinorShift)-1); 731 732 retry: 733 spin_lock(&all_mddevs_lock); 734 735 if (unit) { 736 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 737 if (mddev->unit == unit) { 738 mddev_get(mddev); 739 spin_unlock(&all_mddevs_lock); 740 kfree(new); 741 return mddev; 742 } 743 744 if (new) { 745 list_add(&new->all_mddevs, &all_mddevs); 746 spin_unlock(&all_mddevs_lock); 747 new->hold_active = UNTIL_IOCTL; 748 return new; 749 } 750 } else if (new) { 751 /* find an unused unit number */ 752 static int next_minor = 512; 753 int start = next_minor; 754 int is_free = 0; 755 int dev = 0; 756 while (!is_free) { 757 dev = MKDEV(MD_MAJOR, next_minor); 758 next_minor++; 759 if (next_minor > MINORMASK) 760 next_minor = 0; 761 if (next_minor == start) { 762 /* Oh dear, all in use. */ 763 spin_unlock(&all_mddevs_lock); 764 kfree(new); 765 return NULL; 766 } 767 768 is_free = 1; 769 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 770 if (mddev->unit == dev) { 771 is_free = 0; 772 break; 773 } 774 } 775 new->unit = dev; 776 new->md_minor = MINOR(dev); 777 new->hold_active = UNTIL_STOP; 778 list_add(&new->all_mddevs, &all_mddevs); 779 spin_unlock(&all_mddevs_lock); 780 return new; 781 } 782 spin_unlock(&all_mddevs_lock); 783 784 new = kzalloc(sizeof(*new), GFP_KERNEL); 785 if (!new) 786 return NULL; 787 788 new->unit = unit; 789 if (MAJOR(unit) == MD_MAJOR) 790 new->md_minor = MINOR(unit); 791 else 792 new->md_minor = MINOR(unit) >> MdpMinorShift; 793 794 mddev_init(new); 795 796 goto retry; 797 } 798 799 static struct attribute_group md_redundancy_group; 800 801 void mddev_unlock(struct mddev *mddev) 802 { 803 if (mddev->to_remove) { 804 /* These cannot be removed under reconfig_mutex as 805 * an access to the files will try to take reconfig_mutex 806 * while holding the file unremovable, which leads to 807 * a deadlock. 808 * So hold set sysfs_active while the remove in happeing, 809 * and anything else which might set ->to_remove or my 810 * otherwise change the sysfs namespace will fail with 811 * -EBUSY if sysfs_active is still set. 812 * We set sysfs_active under reconfig_mutex and elsewhere 813 * test it under the same mutex to ensure its correct value 814 * is seen. 815 */ 816 struct attribute_group *to_remove = mddev->to_remove; 817 mddev->to_remove = NULL; 818 mddev->sysfs_active = 1; 819 mutex_unlock(&mddev->reconfig_mutex); 820 821 if (mddev->kobj.sd) { 822 if (to_remove != &md_redundancy_group) 823 sysfs_remove_group(&mddev->kobj, to_remove); 824 if (mddev->pers == NULL || 825 mddev->pers->sync_request == NULL) { 826 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 827 if (mddev->sysfs_action) 828 sysfs_put(mddev->sysfs_action); 829 mddev->sysfs_action = NULL; 830 } 831 } 832 mddev->sysfs_active = 0; 833 } else 834 mutex_unlock(&mddev->reconfig_mutex); 835 836 /* As we've dropped the mutex we need a spinlock to 837 * make sure the thread doesn't disappear 838 */ 839 spin_lock(&pers_lock); 840 md_wakeup_thread(mddev->thread); 841 wake_up(&mddev->sb_wait); 842 spin_unlock(&pers_lock); 843 } 844 EXPORT_SYMBOL_GPL(mddev_unlock); 845 846 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) 847 { 848 struct md_rdev *rdev; 849 850 rdev_for_each_rcu(rdev, mddev) 851 if (rdev->desc_nr == nr) 852 return rdev; 853 854 return NULL; 855 } 856 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu); 857 858 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) 859 { 860 struct md_rdev *rdev; 861 862 rdev_for_each(rdev, mddev) 863 if (rdev->bdev->bd_dev == dev) 864 return rdev; 865 866 return NULL; 867 } 868 869 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev) 870 { 871 struct md_rdev *rdev; 872 873 rdev_for_each_rcu(rdev, mddev) 874 if (rdev->bdev->bd_dev == dev) 875 return rdev; 876 877 return NULL; 878 } 879 EXPORT_SYMBOL_GPL(md_find_rdev_rcu); 880 881 static struct md_personality *find_pers(int level, char *clevel) 882 { 883 struct md_personality *pers; 884 list_for_each_entry(pers, &pers_list, list) { 885 if (level != LEVEL_NONE && pers->level == level) 886 return pers; 887 if (strcmp(pers->name, clevel)==0) 888 return pers; 889 } 890 return NULL; 891 } 892 893 /* return the offset of the super block in 512byte sectors */ 894 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) 895 { 896 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; 897 return MD_NEW_SIZE_SECTORS(num_sectors); 898 } 899 900 static int alloc_disk_sb(struct md_rdev *rdev) 901 { 902 rdev->sb_page = alloc_page(GFP_KERNEL); 903 if (!rdev->sb_page) 904 return -ENOMEM; 905 return 0; 906 } 907 908 void md_rdev_clear(struct md_rdev *rdev) 909 { 910 if (rdev->sb_page) { 911 put_page(rdev->sb_page); 912 rdev->sb_loaded = 0; 913 rdev->sb_page = NULL; 914 rdev->sb_start = 0; 915 rdev->sectors = 0; 916 } 917 if (rdev->bb_page) { 918 put_page(rdev->bb_page); 919 rdev->bb_page = NULL; 920 } 921 badblocks_exit(&rdev->badblocks); 922 } 923 EXPORT_SYMBOL_GPL(md_rdev_clear); 924 925 static void super_written(struct bio *bio) 926 { 927 struct md_rdev *rdev = bio->bi_private; 928 struct mddev *mddev = rdev->mddev; 929 930 if (bio->bi_status) { 931 pr_err("md: super_written gets error=%d\n", bio->bi_status); 932 md_error(mddev, rdev); 933 if (!test_bit(Faulty, &rdev->flags) 934 && (bio->bi_opf & MD_FAILFAST)) { 935 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); 936 set_bit(LastDev, &rdev->flags); 937 } 938 } else 939 clear_bit(LastDev, &rdev->flags); 940 941 if (atomic_dec_and_test(&mddev->pending_writes)) 942 wake_up(&mddev->sb_wait); 943 rdev_dec_pending(rdev, mddev); 944 bio_put(bio); 945 } 946 947 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, 948 sector_t sector, int size, struct page *page) 949 { 950 /* write first size bytes of page to sector of rdev 951 * Increment mddev->pending_writes before returning 952 * and decrement it on completion, waking up sb_wait 953 * if zero is reached. 954 * If an error occurred, call md_error 955 */ 956 struct bio *bio; 957 int ff = 0; 958 959 if (!page) 960 return; 961 962 if (test_bit(Faulty, &rdev->flags)) 963 return; 964 965 bio = md_bio_alloc_sync(mddev); 966 967 atomic_inc(&rdev->nr_pending); 968 969 bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev); 970 bio->bi_iter.bi_sector = sector; 971 bio_add_page(bio, page, size, 0); 972 bio->bi_private = rdev; 973 bio->bi_end_io = super_written; 974 975 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && 976 test_bit(FailFast, &rdev->flags) && 977 !test_bit(LastDev, &rdev->flags)) 978 ff = MD_FAILFAST; 979 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff; 980 981 atomic_inc(&mddev->pending_writes); 982 submit_bio(bio); 983 } 984 985 int md_super_wait(struct mddev *mddev) 986 { 987 /* wait for all superblock writes that were scheduled to complete */ 988 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); 989 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) 990 return -EAGAIN; 991 return 0; 992 } 993 994 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 995 struct page *page, int op, int op_flags, bool metadata_op) 996 { 997 struct bio *bio = md_bio_alloc_sync(rdev->mddev); 998 int ret; 999 1000 if (metadata_op && rdev->meta_bdev) 1001 bio_set_dev(bio, rdev->meta_bdev); 1002 else 1003 bio_set_dev(bio, rdev->bdev); 1004 bio_set_op_attrs(bio, op, op_flags); 1005 if (metadata_op) 1006 bio->bi_iter.bi_sector = sector + rdev->sb_start; 1007 else if (rdev->mddev->reshape_position != MaxSector && 1008 (rdev->mddev->reshape_backwards == 1009 (sector >= rdev->mddev->reshape_position))) 1010 bio->bi_iter.bi_sector = sector + rdev->new_data_offset; 1011 else 1012 bio->bi_iter.bi_sector = sector + rdev->data_offset; 1013 bio_add_page(bio, page, size, 0); 1014 1015 submit_bio_wait(bio); 1016 1017 ret = !bio->bi_status; 1018 bio_put(bio); 1019 return ret; 1020 } 1021 EXPORT_SYMBOL_GPL(sync_page_io); 1022 1023 static int read_disk_sb(struct md_rdev *rdev, int size) 1024 { 1025 char b[BDEVNAME_SIZE]; 1026 1027 if (rdev->sb_loaded) 1028 return 0; 1029 1030 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) 1031 goto fail; 1032 rdev->sb_loaded = 1; 1033 return 0; 1034 1035 fail: 1036 pr_err("md: disabled device %s, could not read superblock.\n", 1037 bdevname(rdev->bdev,b)); 1038 return -EINVAL; 1039 } 1040 1041 static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 1042 { 1043 return sb1->set_uuid0 == sb2->set_uuid0 && 1044 sb1->set_uuid1 == sb2->set_uuid1 && 1045 sb1->set_uuid2 == sb2->set_uuid2 && 1046 sb1->set_uuid3 == sb2->set_uuid3; 1047 } 1048 1049 static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 1050 { 1051 int ret; 1052 mdp_super_t *tmp1, *tmp2; 1053 1054 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 1055 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 1056 1057 if (!tmp1 || !tmp2) { 1058 ret = 0; 1059 goto abort; 1060 } 1061 1062 *tmp1 = *sb1; 1063 *tmp2 = *sb2; 1064 1065 /* 1066 * nr_disks is not constant 1067 */ 1068 tmp1->nr_disks = 0; 1069 tmp2->nr_disks = 0; 1070 1071 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 1072 abort: 1073 kfree(tmp1); 1074 kfree(tmp2); 1075 return ret; 1076 } 1077 1078 static u32 md_csum_fold(u32 csum) 1079 { 1080 csum = (csum & 0xffff) + (csum >> 16); 1081 return (csum & 0xffff) + (csum >> 16); 1082 } 1083 1084 static unsigned int calc_sb_csum(mdp_super_t *sb) 1085 { 1086 u64 newcsum = 0; 1087 u32 *sb32 = (u32*)sb; 1088 int i; 1089 unsigned int disk_csum, csum; 1090 1091 disk_csum = sb->sb_csum; 1092 sb->sb_csum = 0; 1093 1094 for (i = 0; i < MD_SB_BYTES/4 ; i++) 1095 newcsum += sb32[i]; 1096 csum = (newcsum & 0xffffffff) + (newcsum>>32); 1097 1098 #ifdef CONFIG_ALPHA 1099 /* This used to use csum_partial, which was wrong for several 1100 * reasons including that different results are returned on 1101 * different architectures. It isn't critical that we get exactly 1102 * the same return value as before (we always csum_fold before 1103 * testing, and that removes any differences). However as we 1104 * know that csum_partial always returned a 16bit value on 1105 * alphas, do a fold to maximise conformity to previous behaviour. 1106 */ 1107 sb->sb_csum = md_csum_fold(disk_csum); 1108 #else 1109 sb->sb_csum = disk_csum; 1110 #endif 1111 return csum; 1112 } 1113 1114 /* 1115 * Handle superblock details. 1116 * We want to be able to handle multiple superblock formats 1117 * so we have a common interface to them all, and an array of 1118 * different handlers. 1119 * We rely on user-space to write the initial superblock, and support 1120 * reading and updating of superblocks. 1121 * Interface methods are: 1122 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version) 1123 * loads and validates a superblock on dev. 1124 * if refdev != NULL, compare superblocks on both devices 1125 * Return: 1126 * 0 - dev has a superblock that is compatible with refdev 1127 * 1 - dev has a superblock that is compatible and newer than refdev 1128 * so dev should be used as the refdev in future 1129 * -EINVAL superblock incompatible or invalid 1130 * -othererror e.g. -EIO 1131 * 1132 * int validate_super(struct mddev *mddev, struct md_rdev *dev) 1133 * Verify that dev is acceptable into mddev. 1134 * The first time, mddev->raid_disks will be 0, and data from 1135 * dev should be merged in. Subsequent calls check that dev 1136 * is new enough. Return 0 or -EINVAL 1137 * 1138 * void sync_super(struct mddev *mddev, struct md_rdev *dev) 1139 * Update the superblock for rdev with data in mddev 1140 * This does not write to disc. 1141 * 1142 */ 1143 1144 struct super_type { 1145 char *name; 1146 struct module *owner; 1147 int (*load_super)(struct md_rdev *rdev, 1148 struct md_rdev *refdev, 1149 int minor_version); 1150 int (*validate_super)(struct mddev *mddev, 1151 struct md_rdev *rdev); 1152 void (*sync_super)(struct mddev *mddev, 1153 struct md_rdev *rdev); 1154 unsigned long long (*rdev_size_change)(struct md_rdev *rdev, 1155 sector_t num_sectors); 1156 int (*allow_new_offset)(struct md_rdev *rdev, 1157 unsigned long long new_offset); 1158 }; 1159 1160 /* 1161 * Check that the given mddev has no bitmap. 1162 * 1163 * This function is called from the run method of all personalities that do not 1164 * support bitmaps. It prints an error message and returns non-zero if mddev 1165 * has a bitmap. Otherwise, it returns 0. 1166 * 1167 */ 1168 int md_check_no_bitmap(struct mddev *mddev) 1169 { 1170 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 1171 return 0; 1172 pr_warn("%s: bitmaps are not supported for %s\n", 1173 mdname(mddev), mddev->pers->name); 1174 return 1; 1175 } 1176 EXPORT_SYMBOL(md_check_no_bitmap); 1177 1178 /* 1179 * load_super for 0.90.0 1180 */ 1181 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1182 { 1183 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1184 mdp_super_t *sb; 1185 int ret; 1186 bool spare_disk = true; 1187 1188 /* 1189 * Calculate the position of the superblock (512byte sectors), 1190 * it's at the end of the disk. 1191 * 1192 * It also happens to be a multiple of 4Kb. 1193 */ 1194 rdev->sb_start = calc_dev_sboffset(rdev); 1195 1196 ret = read_disk_sb(rdev, MD_SB_BYTES); 1197 if (ret) 1198 return ret; 1199 1200 ret = -EINVAL; 1201 1202 bdevname(rdev->bdev, b); 1203 sb = page_address(rdev->sb_page); 1204 1205 if (sb->md_magic != MD_SB_MAGIC) { 1206 pr_warn("md: invalid raid superblock magic on %s\n", b); 1207 goto abort; 1208 } 1209 1210 if (sb->major_version != 0 || 1211 sb->minor_version < 90 || 1212 sb->minor_version > 91) { 1213 pr_warn("Bad version number %d.%d on %s\n", 1214 sb->major_version, sb->minor_version, b); 1215 goto abort; 1216 } 1217 1218 if (sb->raid_disks <= 0) 1219 goto abort; 1220 1221 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 1222 pr_warn("md: invalid superblock checksum on %s\n", b); 1223 goto abort; 1224 } 1225 1226 rdev->preferred_minor = sb->md_minor; 1227 rdev->data_offset = 0; 1228 rdev->new_data_offset = 0; 1229 rdev->sb_size = MD_SB_BYTES; 1230 rdev->badblocks.shift = -1; 1231 1232 if (sb->level == LEVEL_MULTIPATH) 1233 rdev->desc_nr = -1; 1234 else 1235 rdev->desc_nr = sb->this_disk.number; 1236 1237 /* not spare disk, or LEVEL_MULTIPATH */ 1238 if (sb->level == LEVEL_MULTIPATH || 1239 (rdev->desc_nr >= 0 && 1240 rdev->desc_nr < MD_SB_DISKS && 1241 sb->disks[rdev->desc_nr].state & 1242 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))) 1243 spare_disk = false; 1244 1245 if (!refdev) { 1246 if (!spare_disk) 1247 ret = 1; 1248 else 1249 ret = 0; 1250 } else { 1251 __u64 ev1, ev2; 1252 mdp_super_t *refsb = page_address(refdev->sb_page); 1253 if (!md_uuid_equal(refsb, sb)) { 1254 pr_warn("md: %s has different UUID to %s\n", 1255 b, bdevname(refdev->bdev,b2)); 1256 goto abort; 1257 } 1258 if (!md_sb_equal(refsb, sb)) { 1259 pr_warn("md: %s has same UUID but different superblock to %s\n", 1260 b, bdevname(refdev->bdev, b2)); 1261 goto abort; 1262 } 1263 ev1 = md_event(sb); 1264 ev2 = md_event(refsb); 1265 1266 if (!spare_disk && ev1 > ev2) 1267 ret = 1; 1268 else 1269 ret = 0; 1270 } 1271 rdev->sectors = rdev->sb_start; 1272 /* Limit to 4TB as metadata cannot record more than that. 1273 * (not needed for Linear and RAID0 as metadata doesn't 1274 * record this size) 1275 */ 1276 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1) 1277 rdev->sectors = (sector_t)(2ULL << 32) - 2; 1278 1279 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) 1280 /* "this cannot possibly happen" ... */ 1281 ret = -EINVAL; 1282 1283 abort: 1284 return ret; 1285 } 1286 1287 /* 1288 * validate_super for 0.90.0 1289 */ 1290 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) 1291 { 1292 mdp_disk_t *desc; 1293 mdp_super_t *sb = page_address(rdev->sb_page); 1294 __u64 ev1 = md_event(sb); 1295 1296 rdev->raid_disk = -1; 1297 clear_bit(Faulty, &rdev->flags); 1298 clear_bit(In_sync, &rdev->flags); 1299 clear_bit(Bitmap_sync, &rdev->flags); 1300 clear_bit(WriteMostly, &rdev->flags); 1301 1302 if (mddev->raid_disks == 0) { 1303 mddev->major_version = 0; 1304 mddev->minor_version = sb->minor_version; 1305 mddev->patch_version = sb->patch_version; 1306 mddev->external = 0; 1307 mddev->chunk_sectors = sb->chunk_size >> 9; 1308 mddev->ctime = sb->ctime; 1309 mddev->utime = sb->utime; 1310 mddev->level = sb->level; 1311 mddev->clevel[0] = 0; 1312 mddev->layout = sb->layout; 1313 mddev->raid_disks = sb->raid_disks; 1314 mddev->dev_sectors = ((sector_t)sb->size) * 2; 1315 mddev->events = ev1; 1316 mddev->bitmap_info.offset = 0; 1317 mddev->bitmap_info.space = 0; 1318 /* bitmap can use 60 K after the 4K superblocks */ 1319 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1320 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 1321 mddev->reshape_backwards = 0; 1322 1323 if (mddev->minor_version >= 91) { 1324 mddev->reshape_position = sb->reshape_position; 1325 mddev->delta_disks = sb->delta_disks; 1326 mddev->new_level = sb->new_level; 1327 mddev->new_layout = sb->new_layout; 1328 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1329 if (mddev->delta_disks < 0) 1330 mddev->reshape_backwards = 1; 1331 } else { 1332 mddev->reshape_position = MaxSector; 1333 mddev->delta_disks = 0; 1334 mddev->new_level = mddev->level; 1335 mddev->new_layout = mddev->layout; 1336 mddev->new_chunk_sectors = mddev->chunk_sectors; 1337 } 1338 if (mddev->level == 0) 1339 mddev->layout = -1; 1340 1341 if (sb->state & (1<<MD_SB_CLEAN)) 1342 mddev->recovery_cp = MaxSector; 1343 else { 1344 if (sb->events_hi == sb->cp_events_hi && 1345 sb->events_lo == sb->cp_events_lo) { 1346 mddev->recovery_cp = sb->recovery_cp; 1347 } else 1348 mddev->recovery_cp = 0; 1349 } 1350 1351 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1352 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1353 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1354 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1355 1356 mddev->max_disks = MD_SB_DISKS; 1357 1358 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1359 mddev->bitmap_info.file == NULL) { 1360 mddev->bitmap_info.offset = 1361 mddev->bitmap_info.default_offset; 1362 mddev->bitmap_info.space = 1363 mddev->bitmap_info.default_space; 1364 } 1365 1366 } else if (mddev->pers == NULL) { 1367 /* Insist on good event counter while assembling, except 1368 * for spares (which don't need an event count) */ 1369 ++ev1; 1370 if (sb->disks[rdev->desc_nr].state & ( 1371 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1372 if (ev1 < mddev->events) 1373 return -EINVAL; 1374 } else if (mddev->bitmap) { 1375 /* if adding to array with a bitmap, then we can accept an 1376 * older device ... but not too old. 1377 */ 1378 if (ev1 < mddev->bitmap->events_cleared) 1379 return 0; 1380 if (ev1 < mddev->events) 1381 set_bit(Bitmap_sync, &rdev->flags); 1382 } else { 1383 if (ev1 < mddev->events) 1384 /* just a hot-add of a new device, leave raid_disk at -1 */ 1385 return 0; 1386 } 1387 1388 if (mddev->level != LEVEL_MULTIPATH) { 1389 desc = sb->disks + rdev->desc_nr; 1390 1391 if (desc->state & (1<<MD_DISK_FAULTY)) 1392 set_bit(Faulty, &rdev->flags); 1393 else if (desc->state & (1<<MD_DISK_SYNC) /* && 1394 desc->raid_disk < mddev->raid_disks */) { 1395 set_bit(In_sync, &rdev->flags); 1396 rdev->raid_disk = desc->raid_disk; 1397 rdev->saved_raid_disk = desc->raid_disk; 1398 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1399 /* active but not in sync implies recovery up to 1400 * reshape position. We don't know exactly where 1401 * that is, so set to zero for now */ 1402 if (mddev->minor_version >= 91) { 1403 rdev->recovery_offset = 0; 1404 rdev->raid_disk = desc->raid_disk; 1405 } 1406 } 1407 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1408 set_bit(WriteMostly, &rdev->flags); 1409 if (desc->state & (1<<MD_DISK_FAILFAST)) 1410 set_bit(FailFast, &rdev->flags); 1411 } else /* MULTIPATH are always insync */ 1412 set_bit(In_sync, &rdev->flags); 1413 return 0; 1414 } 1415 1416 /* 1417 * sync_super for 0.90.0 1418 */ 1419 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) 1420 { 1421 mdp_super_t *sb; 1422 struct md_rdev *rdev2; 1423 int next_spare = mddev->raid_disks; 1424 1425 /* make rdev->sb match mddev data.. 1426 * 1427 * 1/ zero out disks 1428 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1429 * 3/ any empty disks < next_spare become removed 1430 * 1431 * disks[0] gets initialised to REMOVED because 1432 * we cannot be sure from other fields if it has 1433 * been initialised or not. 1434 */ 1435 int i; 1436 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1437 1438 rdev->sb_size = MD_SB_BYTES; 1439 1440 sb = page_address(rdev->sb_page); 1441 1442 memset(sb, 0, sizeof(*sb)); 1443 1444 sb->md_magic = MD_SB_MAGIC; 1445 sb->major_version = mddev->major_version; 1446 sb->patch_version = mddev->patch_version; 1447 sb->gvalid_words = 0; /* ignored */ 1448 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1449 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1450 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1451 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1452 1453 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); 1454 sb->level = mddev->level; 1455 sb->size = mddev->dev_sectors / 2; 1456 sb->raid_disks = mddev->raid_disks; 1457 sb->md_minor = mddev->md_minor; 1458 sb->not_persistent = 0; 1459 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); 1460 sb->state = 0; 1461 sb->events_hi = (mddev->events>>32); 1462 sb->events_lo = (u32)mddev->events; 1463 1464 if (mddev->reshape_position == MaxSector) 1465 sb->minor_version = 90; 1466 else { 1467 sb->minor_version = 91; 1468 sb->reshape_position = mddev->reshape_position; 1469 sb->new_level = mddev->new_level; 1470 sb->delta_disks = mddev->delta_disks; 1471 sb->new_layout = mddev->new_layout; 1472 sb->new_chunk = mddev->new_chunk_sectors << 9; 1473 } 1474 mddev->minor_version = sb->minor_version; 1475 if (mddev->in_sync) 1476 { 1477 sb->recovery_cp = mddev->recovery_cp; 1478 sb->cp_events_hi = (mddev->events>>32); 1479 sb->cp_events_lo = (u32)mddev->events; 1480 if (mddev->recovery_cp == MaxSector) 1481 sb->state = (1<< MD_SB_CLEAN); 1482 } else 1483 sb->recovery_cp = 0; 1484 1485 sb->layout = mddev->layout; 1486 sb->chunk_size = mddev->chunk_sectors << 9; 1487 1488 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1489 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1490 1491 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1492 rdev_for_each(rdev2, mddev) { 1493 mdp_disk_t *d; 1494 int desc_nr; 1495 int is_active = test_bit(In_sync, &rdev2->flags); 1496 1497 if (rdev2->raid_disk >= 0 && 1498 sb->minor_version >= 91) 1499 /* we have nowhere to store the recovery_offset, 1500 * but if it is not below the reshape_position, 1501 * we can piggy-back on that. 1502 */ 1503 is_active = 1; 1504 if (rdev2->raid_disk < 0 || 1505 test_bit(Faulty, &rdev2->flags)) 1506 is_active = 0; 1507 if (is_active) 1508 desc_nr = rdev2->raid_disk; 1509 else 1510 desc_nr = next_spare++; 1511 rdev2->desc_nr = desc_nr; 1512 d = &sb->disks[rdev2->desc_nr]; 1513 nr_disks++; 1514 d->number = rdev2->desc_nr; 1515 d->major = MAJOR(rdev2->bdev->bd_dev); 1516 d->minor = MINOR(rdev2->bdev->bd_dev); 1517 if (is_active) 1518 d->raid_disk = rdev2->raid_disk; 1519 else 1520 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1521 if (test_bit(Faulty, &rdev2->flags)) 1522 d->state = (1<<MD_DISK_FAULTY); 1523 else if (is_active) { 1524 d->state = (1<<MD_DISK_ACTIVE); 1525 if (test_bit(In_sync, &rdev2->flags)) 1526 d->state |= (1<<MD_DISK_SYNC); 1527 active++; 1528 working++; 1529 } else { 1530 d->state = 0; 1531 spare++; 1532 working++; 1533 } 1534 if (test_bit(WriteMostly, &rdev2->flags)) 1535 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1536 if (test_bit(FailFast, &rdev2->flags)) 1537 d->state |= (1<<MD_DISK_FAILFAST); 1538 } 1539 /* now set the "removed" and "faulty" bits on any missing devices */ 1540 for (i=0 ; i < mddev->raid_disks ; i++) { 1541 mdp_disk_t *d = &sb->disks[i]; 1542 if (d->state == 0 && d->number == 0) { 1543 d->number = i; 1544 d->raid_disk = i; 1545 d->state = (1<<MD_DISK_REMOVED); 1546 d->state |= (1<<MD_DISK_FAULTY); 1547 failed++; 1548 } 1549 } 1550 sb->nr_disks = nr_disks; 1551 sb->active_disks = active; 1552 sb->working_disks = working; 1553 sb->failed_disks = failed; 1554 sb->spare_disks = spare; 1555 1556 sb->this_disk = sb->disks[rdev->desc_nr]; 1557 sb->sb_csum = calc_sb_csum(sb); 1558 } 1559 1560 /* 1561 * rdev_size_change for 0.90.0 1562 */ 1563 static unsigned long long 1564 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1565 { 1566 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1567 return 0; /* component must fit device */ 1568 if (rdev->mddev->bitmap_info.offset) 1569 return 0; /* can't move bitmap */ 1570 rdev->sb_start = calc_dev_sboffset(rdev); 1571 if (!num_sectors || num_sectors > rdev->sb_start) 1572 num_sectors = rdev->sb_start; 1573 /* Limit to 4TB as metadata cannot record more than that. 1574 * 4TB == 2^32 KB, or 2*2^32 sectors. 1575 */ 1576 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) 1577 num_sectors = (sector_t)(2ULL << 32) - 2; 1578 do { 1579 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1580 rdev->sb_page); 1581 } while (md_super_wait(rdev->mddev) < 0); 1582 return num_sectors; 1583 } 1584 1585 static int 1586 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) 1587 { 1588 /* non-zero offset changes not possible with v0.90 */ 1589 return new_offset == 0; 1590 } 1591 1592 /* 1593 * version 1 superblock 1594 */ 1595 1596 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb) 1597 { 1598 __le32 disk_csum; 1599 u32 csum; 1600 unsigned long long newcsum; 1601 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1602 __le32 *isuper = (__le32*)sb; 1603 1604 disk_csum = sb->sb_csum; 1605 sb->sb_csum = 0; 1606 newcsum = 0; 1607 for (; size >= 4; size -= 4) 1608 newcsum += le32_to_cpu(*isuper++); 1609 1610 if (size == 2) 1611 newcsum += le16_to_cpu(*(__le16*) isuper); 1612 1613 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1614 sb->sb_csum = disk_csum; 1615 return cpu_to_le32(csum); 1616 } 1617 1618 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1619 { 1620 struct mdp_superblock_1 *sb; 1621 int ret; 1622 sector_t sb_start; 1623 sector_t sectors; 1624 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1625 int bmask; 1626 bool spare_disk = true; 1627 1628 /* 1629 * Calculate the position of the superblock in 512byte sectors. 1630 * It is always aligned to a 4K boundary and 1631 * depeding on minor_version, it can be: 1632 * 0: At least 8K, but less than 12K, from end of device 1633 * 1: At start of device 1634 * 2: 4K from start of device. 1635 */ 1636 switch(minor_version) { 1637 case 0: 1638 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; 1639 sb_start -= 8*2; 1640 sb_start &= ~(sector_t)(4*2-1); 1641 break; 1642 case 1: 1643 sb_start = 0; 1644 break; 1645 case 2: 1646 sb_start = 8; 1647 break; 1648 default: 1649 return -EINVAL; 1650 } 1651 rdev->sb_start = sb_start; 1652 1653 /* superblock is rarely larger than 1K, but it can be larger, 1654 * and it is safe to read 4k, so we do that 1655 */ 1656 ret = read_disk_sb(rdev, 4096); 1657 if (ret) return ret; 1658 1659 sb = page_address(rdev->sb_page); 1660 1661 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1662 sb->major_version != cpu_to_le32(1) || 1663 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1664 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1665 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1666 return -EINVAL; 1667 1668 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1669 pr_warn("md: invalid superblock checksum on %s\n", 1670 bdevname(rdev->bdev,b)); 1671 return -EINVAL; 1672 } 1673 if (le64_to_cpu(sb->data_size) < 10) { 1674 pr_warn("md: data_size too small on %s\n", 1675 bdevname(rdev->bdev,b)); 1676 return -EINVAL; 1677 } 1678 if (sb->pad0 || 1679 sb->pad3[0] || 1680 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) 1681 /* Some padding is non-zero, might be a new feature */ 1682 return -EINVAL; 1683 1684 rdev->preferred_minor = 0xffff; 1685 rdev->data_offset = le64_to_cpu(sb->data_offset); 1686 rdev->new_data_offset = rdev->data_offset; 1687 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) && 1688 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) 1689 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); 1690 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1691 1692 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1693 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1694 if (rdev->sb_size & bmask) 1695 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1696 1697 if (minor_version 1698 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1699 return -EINVAL; 1700 if (minor_version 1701 && rdev->new_data_offset < sb_start + (rdev->sb_size/512)) 1702 return -EINVAL; 1703 1704 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1705 rdev->desc_nr = -1; 1706 else 1707 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1708 1709 if (!rdev->bb_page) { 1710 rdev->bb_page = alloc_page(GFP_KERNEL); 1711 if (!rdev->bb_page) 1712 return -ENOMEM; 1713 } 1714 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && 1715 rdev->badblocks.count == 0) { 1716 /* need to load the bad block list. 1717 * Currently we limit it to one page. 1718 */ 1719 s32 offset; 1720 sector_t bb_sector; 1721 __le64 *bbp; 1722 int i; 1723 int sectors = le16_to_cpu(sb->bblog_size); 1724 if (sectors > (PAGE_SIZE / 512)) 1725 return -EINVAL; 1726 offset = le32_to_cpu(sb->bblog_offset); 1727 if (offset == 0) 1728 return -EINVAL; 1729 bb_sector = (long long)offset; 1730 if (!sync_page_io(rdev, bb_sector, sectors << 9, 1731 rdev->bb_page, REQ_OP_READ, 0, true)) 1732 return -EIO; 1733 bbp = (__le64 *)page_address(rdev->bb_page); 1734 rdev->badblocks.shift = sb->bblog_shift; 1735 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { 1736 u64 bb = le64_to_cpu(*bbp); 1737 int count = bb & (0x3ff); 1738 u64 sector = bb >> 10; 1739 sector <<= sb->bblog_shift; 1740 count <<= sb->bblog_shift; 1741 if (bb + 1 == 0) 1742 break; 1743 if (badblocks_set(&rdev->badblocks, sector, count, 1)) 1744 return -EINVAL; 1745 } 1746 } else if (sb->bblog_offset != 0) 1747 rdev->badblocks.shift = 0; 1748 1749 if ((le32_to_cpu(sb->feature_map) & 1750 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) { 1751 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset); 1752 rdev->ppl.size = le16_to_cpu(sb->ppl.size); 1753 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset; 1754 } 1755 1756 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) && 1757 sb->level != 0) 1758 return -EINVAL; 1759 1760 /* not spare disk, or LEVEL_MULTIPATH */ 1761 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) || 1762 (rdev->desc_nr >= 0 && 1763 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1764 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || 1765 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))) 1766 spare_disk = false; 1767 1768 if (!refdev) { 1769 if (!spare_disk) 1770 ret = 1; 1771 else 1772 ret = 0; 1773 } else { 1774 __u64 ev1, ev2; 1775 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); 1776 1777 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1778 sb->level != refsb->level || 1779 sb->layout != refsb->layout || 1780 sb->chunksize != refsb->chunksize) { 1781 pr_warn("md: %s has strangely different superblock to %s\n", 1782 bdevname(rdev->bdev,b), 1783 bdevname(refdev->bdev,b2)); 1784 return -EINVAL; 1785 } 1786 ev1 = le64_to_cpu(sb->events); 1787 ev2 = le64_to_cpu(refsb->events); 1788 1789 if (!spare_disk && ev1 > ev2) 1790 ret = 1; 1791 else 1792 ret = 0; 1793 } 1794 if (minor_version) { 1795 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); 1796 sectors -= rdev->data_offset; 1797 } else 1798 sectors = rdev->sb_start; 1799 if (sectors < le64_to_cpu(sb->data_size)) 1800 return -EINVAL; 1801 rdev->sectors = le64_to_cpu(sb->data_size); 1802 return ret; 1803 } 1804 1805 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) 1806 { 1807 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 1808 __u64 ev1 = le64_to_cpu(sb->events); 1809 1810 rdev->raid_disk = -1; 1811 clear_bit(Faulty, &rdev->flags); 1812 clear_bit(In_sync, &rdev->flags); 1813 clear_bit(Bitmap_sync, &rdev->flags); 1814 clear_bit(WriteMostly, &rdev->flags); 1815 1816 if (mddev->raid_disks == 0) { 1817 mddev->major_version = 1; 1818 mddev->patch_version = 0; 1819 mddev->external = 0; 1820 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1821 mddev->ctime = le64_to_cpu(sb->ctime); 1822 mddev->utime = le64_to_cpu(sb->utime); 1823 mddev->level = le32_to_cpu(sb->level); 1824 mddev->clevel[0] = 0; 1825 mddev->layout = le32_to_cpu(sb->layout); 1826 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1827 mddev->dev_sectors = le64_to_cpu(sb->size); 1828 mddev->events = ev1; 1829 mddev->bitmap_info.offset = 0; 1830 mddev->bitmap_info.space = 0; 1831 /* Default location for bitmap is 1K after superblock 1832 * using 3K - total of 4K 1833 */ 1834 mddev->bitmap_info.default_offset = 1024 >> 9; 1835 mddev->bitmap_info.default_space = (4096-1024) >> 9; 1836 mddev->reshape_backwards = 0; 1837 1838 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1839 memcpy(mddev->uuid, sb->set_uuid, 16); 1840 1841 mddev->max_disks = (4096-256)/2; 1842 1843 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1844 mddev->bitmap_info.file == NULL) { 1845 mddev->bitmap_info.offset = 1846 (__s32)le32_to_cpu(sb->bitmap_offset); 1847 /* Metadata doesn't record how much space is available. 1848 * For 1.0, we assume we can use up to the superblock 1849 * if before, else to 4K beyond superblock. 1850 * For others, assume no change is possible. 1851 */ 1852 if (mddev->minor_version > 0) 1853 mddev->bitmap_info.space = 0; 1854 else if (mddev->bitmap_info.offset > 0) 1855 mddev->bitmap_info.space = 1856 8 - mddev->bitmap_info.offset; 1857 else 1858 mddev->bitmap_info.space = 1859 -mddev->bitmap_info.offset; 1860 } 1861 1862 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1863 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1864 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1865 mddev->new_level = le32_to_cpu(sb->new_level); 1866 mddev->new_layout = le32_to_cpu(sb->new_layout); 1867 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1868 if (mddev->delta_disks < 0 || 1869 (mddev->delta_disks == 0 && 1870 (le32_to_cpu(sb->feature_map) 1871 & MD_FEATURE_RESHAPE_BACKWARDS))) 1872 mddev->reshape_backwards = 1; 1873 } else { 1874 mddev->reshape_position = MaxSector; 1875 mddev->delta_disks = 0; 1876 mddev->new_level = mddev->level; 1877 mddev->new_layout = mddev->layout; 1878 mddev->new_chunk_sectors = mddev->chunk_sectors; 1879 } 1880 1881 if (mddev->level == 0 && 1882 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT)) 1883 mddev->layout = -1; 1884 1885 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) 1886 set_bit(MD_HAS_JOURNAL, &mddev->flags); 1887 1888 if (le32_to_cpu(sb->feature_map) & 1889 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) { 1890 if (le32_to_cpu(sb->feature_map) & 1891 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL)) 1892 return -EINVAL; 1893 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) && 1894 (le32_to_cpu(sb->feature_map) & 1895 MD_FEATURE_MULTIPLE_PPLS)) 1896 return -EINVAL; 1897 set_bit(MD_HAS_PPL, &mddev->flags); 1898 } 1899 } else if (mddev->pers == NULL) { 1900 /* Insist of good event counter while assembling, except for 1901 * spares (which don't need an event count) */ 1902 ++ev1; 1903 if (rdev->desc_nr >= 0 && 1904 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1905 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || 1906 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)) 1907 if (ev1 < mddev->events) 1908 return -EINVAL; 1909 } else if (mddev->bitmap) { 1910 /* If adding to array with a bitmap, then we can accept an 1911 * older device, but not too old. 1912 */ 1913 if (ev1 < mddev->bitmap->events_cleared) 1914 return 0; 1915 if (ev1 < mddev->events) 1916 set_bit(Bitmap_sync, &rdev->flags); 1917 } else { 1918 if (ev1 < mddev->events) 1919 /* just a hot-add of a new device, leave raid_disk at -1 */ 1920 return 0; 1921 } 1922 if (mddev->level != LEVEL_MULTIPATH) { 1923 int role; 1924 if (rdev->desc_nr < 0 || 1925 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1926 role = MD_DISK_ROLE_SPARE; 1927 rdev->desc_nr = -1; 1928 } else 1929 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1930 switch(role) { 1931 case MD_DISK_ROLE_SPARE: /* spare */ 1932 break; 1933 case MD_DISK_ROLE_FAULTY: /* faulty */ 1934 set_bit(Faulty, &rdev->flags); 1935 break; 1936 case MD_DISK_ROLE_JOURNAL: /* journal device */ 1937 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) { 1938 /* journal device without journal feature */ 1939 pr_warn("md: journal device provided without journal feature, ignoring the device\n"); 1940 return -EINVAL; 1941 } 1942 set_bit(Journal, &rdev->flags); 1943 rdev->journal_tail = le64_to_cpu(sb->journal_tail); 1944 rdev->raid_disk = 0; 1945 break; 1946 default: 1947 rdev->saved_raid_disk = role; 1948 if ((le32_to_cpu(sb->feature_map) & 1949 MD_FEATURE_RECOVERY_OFFSET)) { 1950 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1951 if (!(le32_to_cpu(sb->feature_map) & 1952 MD_FEATURE_RECOVERY_BITMAP)) 1953 rdev->saved_raid_disk = -1; 1954 } else { 1955 /* 1956 * If the array is FROZEN, then the device can't 1957 * be in_sync with rest of array. 1958 */ 1959 if (!test_bit(MD_RECOVERY_FROZEN, 1960 &mddev->recovery)) 1961 set_bit(In_sync, &rdev->flags); 1962 } 1963 rdev->raid_disk = role; 1964 break; 1965 } 1966 if (sb->devflags & WriteMostly1) 1967 set_bit(WriteMostly, &rdev->flags); 1968 if (sb->devflags & FailFast1) 1969 set_bit(FailFast, &rdev->flags); 1970 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) 1971 set_bit(Replacement, &rdev->flags); 1972 } else /* MULTIPATH are always insync */ 1973 set_bit(In_sync, &rdev->flags); 1974 1975 return 0; 1976 } 1977 1978 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) 1979 { 1980 struct mdp_superblock_1 *sb; 1981 struct md_rdev *rdev2; 1982 int max_dev, i; 1983 /* make rdev->sb match mddev and rdev data. */ 1984 1985 sb = page_address(rdev->sb_page); 1986 1987 sb->feature_map = 0; 1988 sb->pad0 = 0; 1989 sb->recovery_offset = cpu_to_le64(0); 1990 memset(sb->pad3, 0, sizeof(sb->pad3)); 1991 1992 sb->utime = cpu_to_le64((__u64)mddev->utime); 1993 sb->events = cpu_to_le64(mddev->events); 1994 if (mddev->in_sync) 1995 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1996 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) 1997 sb->resync_offset = cpu_to_le64(MaxSector); 1998 else 1999 sb->resync_offset = cpu_to_le64(0); 2000 2001 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 2002 2003 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 2004 sb->size = cpu_to_le64(mddev->dev_sectors); 2005 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 2006 sb->level = cpu_to_le32(mddev->level); 2007 sb->layout = cpu_to_le32(mddev->layout); 2008 if (test_bit(FailFast, &rdev->flags)) 2009 sb->devflags |= FailFast1; 2010 else 2011 sb->devflags &= ~FailFast1; 2012 2013 if (test_bit(WriteMostly, &rdev->flags)) 2014 sb->devflags |= WriteMostly1; 2015 else 2016 sb->devflags &= ~WriteMostly1; 2017 sb->data_offset = cpu_to_le64(rdev->data_offset); 2018 sb->data_size = cpu_to_le64(rdev->sectors); 2019 2020 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 2021 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 2022 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 2023 } 2024 2025 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) && 2026 !test_bit(In_sync, &rdev->flags)) { 2027 sb->feature_map |= 2028 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 2029 sb->recovery_offset = 2030 cpu_to_le64(rdev->recovery_offset); 2031 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) 2032 sb->feature_map |= 2033 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP); 2034 } 2035 /* Note: recovery_offset and journal_tail share space */ 2036 if (test_bit(Journal, &rdev->flags)) 2037 sb->journal_tail = cpu_to_le64(rdev->journal_tail); 2038 if (test_bit(Replacement, &rdev->flags)) 2039 sb->feature_map |= 2040 cpu_to_le32(MD_FEATURE_REPLACEMENT); 2041 2042 if (mddev->reshape_position != MaxSector) { 2043 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 2044 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 2045 sb->new_layout = cpu_to_le32(mddev->new_layout); 2046 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 2047 sb->new_level = cpu_to_le32(mddev->new_level); 2048 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 2049 if (mddev->delta_disks == 0 && 2050 mddev->reshape_backwards) 2051 sb->feature_map 2052 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS); 2053 if (rdev->new_data_offset != rdev->data_offset) { 2054 sb->feature_map 2055 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET); 2056 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset 2057 - rdev->data_offset)); 2058 } 2059 } 2060 2061 if (mddev_is_clustered(mddev)) 2062 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED); 2063 2064 if (rdev->badblocks.count == 0) 2065 /* Nothing to do for bad blocks*/ ; 2066 else if (sb->bblog_offset == 0) 2067 /* Cannot record bad blocks on this device */ 2068 md_error(mddev, rdev); 2069 else { 2070 struct badblocks *bb = &rdev->badblocks; 2071 __le64 *bbp = (__le64 *)page_address(rdev->bb_page); 2072 u64 *p = bb->page; 2073 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); 2074 if (bb->changed) { 2075 unsigned seq; 2076 2077 retry: 2078 seq = read_seqbegin(&bb->lock); 2079 2080 memset(bbp, 0xff, PAGE_SIZE); 2081 2082 for (i = 0 ; i < bb->count ; i++) { 2083 u64 internal_bb = p[i]; 2084 u64 store_bb = ((BB_OFFSET(internal_bb) << 10) 2085 | BB_LEN(internal_bb)); 2086 bbp[i] = cpu_to_le64(store_bb); 2087 } 2088 bb->changed = 0; 2089 if (read_seqretry(&bb->lock, seq)) 2090 goto retry; 2091 2092 bb->sector = (rdev->sb_start + 2093 (int)le32_to_cpu(sb->bblog_offset)); 2094 bb->size = le16_to_cpu(sb->bblog_size); 2095 } 2096 } 2097 2098 max_dev = 0; 2099 rdev_for_each(rdev2, mddev) 2100 if (rdev2->desc_nr+1 > max_dev) 2101 max_dev = rdev2->desc_nr+1; 2102 2103 if (max_dev > le32_to_cpu(sb->max_dev)) { 2104 int bmask; 2105 sb->max_dev = cpu_to_le32(max_dev); 2106 rdev->sb_size = max_dev * 2 + 256; 2107 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 2108 if (rdev->sb_size & bmask) 2109 rdev->sb_size = (rdev->sb_size | bmask) + 1; 2110 } else 2111 max_dev = le32_to_cpu(sb->max_dev); 2112 2113 for (i=0; i<max_dev;i++) 2114 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 2115 2116 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) 2117 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL); 2118 2119 if (test_bit(MD_HAS_PPL, &mddev->flags)) { 2120 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags)) 2121 sb->feature_map |= 2122 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS); 2123 else 2124 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL); 2125 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset); 2126 sb->ppl.size = cpu_to_le16(rdev->ppl.size); 2127 } 2128 2129 rdev_for_each(rdev2, mddev) { 2130 i = rdev2->desc_nr; 2131 if (test_bit(Faulty, &rdev2->flags)) 2132 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); 2133 else if (test_bit(In_sync, &rdev2->flags)) 2134 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 2135 else if (test_bit(Journal, &rdev2->flags)) 2136 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL); 2137 else if (rdev2->raid_disk >= 0) 2138 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 2139 else 2140 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 2141 } 2142 2143 sb->sb_csum = calc_sb_1_csum(sb); 2144 } 2145 2146 static unsigned long long 2147 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 2148 { 2149 struct mdp_superblock_1 *sb; 2150 sector_t max_sectors; 2151 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 2152 return 0; /* component must fit device */ 2153 if (rdev->data_offset != rdev->new_data_offset) 2154 return 0; /* too confusing */ 2155 if (rdev->sb_start < rdev->data_offset) { 2156 /* minor versions 1 and 2; superblock before data */ 2157 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; 2158 max_sectors -= rdev->data_offset; 2159 if (!num_sectors || num_sectors > max_sectors) 2160 num_sectors = max_sectors; 2161 } else if (rdev->mddev->bitmap_info.offset) { 2162 /* minor version 0 with bitmap we can't move */ 2163 return 0; 2164 } else { 2165 /* minor version 0; superblock after data */ 2166 sector_t sb_start; 2167 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2; 2168 sb_start &= ~(sector_t)(4*2 - 1); 2169 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 2170 if (!num_sectors || num_sectors > max_sectors) 2171 num_sectors = max_sectors; 2172 rdev->sb_start = sb_start; 2173 } 2174 sb = page_address(rdev->sb_page); 2175 sb->data_size = cpu_to_le64(num_sectors); 2176 sb->super_offset = cpu_to_le64(rdev->sb_start); 2177 sb->sb_csum = calc_sb_1_csum(sb); 2178 do { 2179 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 2180 rdev->sb_page); 2181 } while (md_super_wait(rdev->mddev) < 0); 2182 return num_sectors; 2183 2184 } 2185 2186 static int 2187 super_1_allow_new_offset(struct md_rdev *rdev, 2188 unsigned long long new_offset) 2189 { 2190 /* All necessary checks on new >= old have been done */ 2191 struct bitmap *bitmap; 2192 if (new_offset >= rdev->data_offset) 2193 return 1; 2194 2195 /* with 1.0 metadata, there is no metadata to tread on 2196 * so we can always move back */ 2197 if (rdev->mddev->minor_version == 0) 2198 return 1; 2199 2200 /* otherwise we must be sure not to step on 2201 * any metadata, so stay: 2202 * 36K beyond start of superblock 2203 * beyond end of badblocks 2204 * beyond write-intent bitmap 2205 */ 2206 if (rdev->sb_start + (32+4)*2 > new_offset) 2207 return 0; 2208 bitmap = rdev->mddev->bitmap; 2209 if (bitmap && !rdev->mddev->bitmap_info.file && 2210 rdev->sb_start + rdev->mddev->bitmap_info.offset + 2211 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset) 2212 return 0; 2213 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) 2214 return 0; 2215 2216 return 1; 2217 } 2218 2219 static struct super_type super_types[] = { 2220 [0] = { 2221 .name = "0.90.0", 2222 .owner = THIS_MODULE, 2223 .load_super = super_90_load, 2224 .validate_super = super_90_validate, 2225 .sync_super = super_90_sync, 2226 .rdev_size_change = super_90_rdev_size_change, 2227 .allow_new_offset = super_90_allow_new_offset, 2228 }, 2229 [1] = { 2230 .name = "md-1", 2231 .owner = THIS_MODULE, 2232 .load_super = super_1_load, 2233 .validate_super = super_1_validate, 2234 .sync_super = super_1_sync, 2235 .rdev_size_change = super_1_rdev_size_change, 2236 .allow_new_offset = super_1_allow_new_offset, 2237 }, 2238 }; 2239 2240 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) 2241 { 2242 if (mddev->sync_super) { 2243 mddev->sync_super(mddev, rdev); 2244 return; 2245 } 2246 2247 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); 2248 2249 super_types[mddev->major_version].sync_super(mddev, rdev); 2250 } 2251 2252 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) 2253 { 2254 struct md_rdev *rdev, *rdev2; 2255 2256 rcu_read_lock(); 2257 rdev_for_each_rcu(rdev, mddev1) { 2258 if (test_bit(Faulty, &rdev->flags) || 2259 test_bit(Journal, &rdev->flags) || 2260 rdev->raid_disk == -1) 2261 continue; 2262 rdev_for_each_rcu(rdev2, mddev2) { 2263 if (test_bit(Faulty, &rdev2->flags) || 2264 test_bit(Journal, &rdev2->flags) || 2265 rdev2->raid_disk == -1) 2266 continue; 2267 if (rdev->bdev->bd_contains == 2268 rdev2->bdev->bd_contains) { 2269 rcu_read_unlock(); 2270 return 1; 2271 } 2272 } 2273 } 2274 rcu_read_unlock(); 2275 return 0; 2276 } 2277 2278 static LIST_HEAD(pending_raid_disks); 2279 2280 /* 2281 * Try to register data integrity profile for an mddev 2282 * 2283 * This is called when an array is started and after a disk has been kicked 2284 * from the array. It only succeeds if all working and active component devices 2285 * are integrity capable with matching profiles. 2286 */ 2287 int md_integrity_register(struct mddev *mddev) 2288 { 2289 struct md_rdev *rdev, *reference = NULL; 2290 2291 if (list_empty(&mddev->disks)) 2292 return 0; /* nothing to do */ 2293 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) 2294 return 0; /* shouldn't register, or already is */ 2295 rdev_for_each(rdev, mddev) { 2296 /* skip spares and non-functional disks */ 2297 if (test_bit(Faulty, &rdev->flags)) 2298 continue; 2299 if (rdev->raid_disk < 0) 2300 continue; 2301 if (!reference) { 2302 /* Use the first rdev as the reference */ 2303 reference = rdev; 2304 continue; 2305 } 2306 /* does this rdev's profile match the reference profile? */ 2307 if (blk_integrity_compare(reference->bdev->bd_disk, 2308 rdev->bdev->bd_disk) < 0) 2309 return -EINVAL; 2310 } 2311 if (!reference || !bdev_get_integrity(reference->bdev)) 2312 return 0; 2313 /* 2314 * All component devices are integrity capable and have matching 2315 * profiles, register the common profile for the md device. 2316 */ 2317 blk_integrity_register(mddev->gendisk, 2318 bdev_get_integrity(reference->bdev)); 2319 2320 pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); 2321 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) { 2322 pr_err("md: failed to create integrity pool for %s\n", 2323 mdname(mddev)); 2324 return -EINVAL; 2325 } 2326 return 0; 2327 } 2328 EXPORT_SYMBOL(md_integrity_register); 2329 2330 /* 2331 * Attempt to add an rdev, but only if it is consistent with the current 2332 * integrity profile 2333 */ 2334 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) 2335 { 2336 struct blk_integrity *bi_mddev; 2337 char name[BDEVNAME_SIZE]; 2338 2339 if (!mddev->gendisk) 2340 return 0; 2341 2342 bi_mddev = blk_get_integrity(mddev->gendisk); 2343 2344 if (!bi_mddev) /* nothing to do */ 2345 return 0; 2346 2347 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { 2348 pr_err("%s: incompatible integrity profile for %s\n", 2349 mdname(mddev), bdevname(rdev->bdev, name)); 2350 return -ENXIO; 2351 } 2352 2353 return 0; 2354 } 2355 EXPORT_SYMBOL(md_integrity_add_rdev); 2356 2357 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) 2358 { 2359 char b[BDEVNAME_SIZE]; 2360 struct kobject *ko; 2361 int err; 2362 2363 /* prevent duplicates */ 2364 if (find_rdev(mddev, rdev->bdev->bd_dev)) 2365 return -EEXIST; 2366 2367 if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) && 2368 mddev->pers) 2369 return -EROFS; 2370 2371 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 2372 if (!test_bit(Journal, &rdev->flags) && 2373 rdev->sectors && 2374 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { 2375 if (mddev->pers) { 2376 /* Cannot change size, so fail 2377 * If mddev->level <= 0, then we don't care 2378 * about aligning sizes (e.g. linear) 2379 */ 2380 if (mddev->level > 0) 2381 return -ENOSPC; 2382 } else 2383 mddev->dev_sectors = rdev->sectors; 2384 } 2385 2386 /* Verify rdev->desc_nr is unique. 2387 * If it is -1, assign a free number, else 2388 * check number is not in use 2389 */ 2390 rcu_read_lock(); 2391 if (rdev->desc_nr < 0) { 2392 int choice = 0; 2393 if (mddev->pers) 2394 choice = mddev->raid_disks; 2395 while (md_find_rdev_nr_rcu(mddev, choice)) 2396 choice++; 2397 rdev->desc_nr = choice; 2398 } else { 2399 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { 2400 rcu_read_unlock(); 2401 return -EBUSY; 2402 } 2403 } 2404 rcu_read_unlock(); 2405 if (!test_bit(Journal, &rdev->flags) && 2406 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 2407 pr_warn("md: %s: array is limited to %d devices\n", 2408 mdname(mddev), mddev->max_disks); 2409 return -EBUSY; 2410 } 2411 bdevname(rdev->bdev,b); 2412 strreplace(b, '/', '!'); 2413 2414 rdev->mddev = mddev; 2415 pr_debug("md: bind<%s>\n", b); 2416 2417 if (mddev->raid_disks) 2418 mddev_create_serial_pool(mddev, rdev, false); 2419 2420 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 2421 goto fail; 2422 2423 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 2424 if (sysfs_create_link(&rdev->kobj, ko, "block")) 2425 /* failure here is OK */; 2426 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 2427 2428 list_add_rcu(&rdev->same_set, &mddev->disks); 2429 bd_link_disk_holder(rdev->bdev, mddev->gendisk); 2430 2431 /* May as well allow recovery to be retried once */ 2432 mddev->recovery_disabled++; 2433 2434 return 0; 2435 2436 fail: 2437 pr_warn("md: failed to register dev-%s for %s\n", 2438 b, mdname(mddev)); 2439 return err; 2440 } 2441 2442 static void rdev_delayed_delete(struct work_struct *ws) 2443 { 2444 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); 2445 kobject_del(&rdev->kobj); 2446 kobject_put(&rdev->kobj); 2447 } 2448 2449 static void unbind_rdev_from_array(struct md_rdev *rdev) 2450 { 2451 char b[BDEVNAME_SIZE]; 2452 2453 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); 2454 list_del_rcu(&rdev->same_set); 2455 pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b)); 2456 mddev_destroy_serial_pool(rdev->mddev, rdev, false); 2457 rdev->mddev = NULL; 2458 sysfs_remove_link(&rdev->kobj, "block"); 2459 sysfs_put(rdev->sysfs_state); 2460 rdev->sysfs_state = NULL; 2461 rdev->badblocks.count = 0; 2462 /* We need to delay this, otherwise we can deadlock when 2463 * writing to 'remove' to "dev/state". We also need 2464 * to delay it due to rcu usage. 2465 */ 2466 synchronize_rcu(); 2467 INIT_WORK(&rdev->del_work, rdev_delayed_delete); 2468 kobject_get(&rdev->kobj); 2469 queue_work(md_rdev_misc_wq, &rdev->del_work); 2470 } 2471 2472 /* 2473 * prevent the device from being mounted, repartitioned or 2474 * otherwise reused by a RAID array (or any other kernel 2475 * subsystem), by bd_claiming the device. 2476 */ 2477 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) 2478 { 2479 int err = 0; 2480 struct block_device *bdev; 2481 2482 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2483 shared ? (struct md_rdev *)lock_rdev : rdev); 2484 if (IS_ERR(bdev)) { 2485 pr_warn("md: could not open device unknown-block(%u,%u).\n", 2486 MAJOR(dev), MINOR(dev)); 2487 return PTR_ERR(bdev); 2488 } 2489 rdev->bdev = bdev; 2490 return err; 2491 } 2492 2493 static void unlock_rdev(struct md_rdev *rdev) 2494 { 2495 struct block_device *bdev = rdev->bdev; 2496 rdev->bdev = NULL; 2497 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2498 } 2499 2500 void md_autodetect_dev(dev_t dev); 2501 2502 static void export_rdev(struct md_rdev *rdev) 2503 { 2504 char b[BDEVNAME_SIZE]; 2505 2506 pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b)); 2507 md_rdev_clear(rdev); 2508 #ifndef MODULE 2509 if (test_bit(AutoDetected, &rdev->flags)) 2510 md_autodetect_dev(rdev->bdev->bd_dev); 2511 #endif 2512 unlock_rdev(rdev); 2513 kobject_put(&rdev->kobj); 2514 } 2515 2516 void md_kick_rdev_from_array(struct md_rdev *rdev) 2517 { 2518 unbind_rdev_from_array(rdev); 2519 export_rdev(rdev); 2520 } 2521 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array); 2522 2523 static void export_array(struct mddev *mddev) 2524 { 2525 struct md_rdev *rdev; 2526 2527 while (!list_empty(&mddev->disks)) { 2528 rdev = list_first_entry(&mddev->disks, struct md_rdev, 2529 same_set); 2530 md_kick_rdev_from_array(rdev); 2531 } 2532 mddev->raid_disks = 0; 2533 mddev->major_version = 0; 2534 } 2535 2536 static bool set_in_sync(struct mddev *mddev) 2537 { 2538 lockdep_assert_held(&mddev->lock); 2539 if (!mddev->in_sync) { 2540 mddev->sync_checkers++; 2541 spin_unlock(&mddev->lock); 2542 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending); 2543 spin_lock(&mddev->lock); 2544 if (!mddev->in_sync && 2545 percpu_ref_is_zero(&mddev->writes_pending)) { 2546 mddev->in_sync = 1; 2547 /* 2548 * Ensure ->in_sync is visible before we clear 2549 * ->sync_checkers. 2550 */ 2551 smp_mb(); 2552 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 2553 sysfs_notify_dirent_safe(mddev->sysfs_state); 2554 } 2555 if (--mddev->sync_checkers == 0) 2556 percpu_ref_switch_to_percpu(&mddev->writes_pending); 2557 } 2558 if (mddev->safemode == 1) 2559 mddev->safemode = 0; 2560 return mddev->in_sync; 2561 } 2562 2563 static void sync_sbs(struct mddev *mddev, int nospares) 2564 { 2565 /* Update each superblock (in-memory image), but 2566 * if we are allowed to, skip spares which already 2567 * have the right event counter, or have one earlier 2568 * (which would mean they aren't being marked as dirty 2569 * with the rest of the array) 2570 */ 2571 struct md_rdev *rdev; 2572 rdev_for_each(rdev, mddev) { 2573 if (rdev->sb_events == mddev->events || 2574 (nospares && 2575 rdev->raid_disk < 0 && 2576 rdev->sb_events+1 == mddev->events)) { 2577 /* Don't update this superblock */ 2578 rdev->sb_loaded = 2; 2579 } else { 2580 sync_super(mddev, rdev); 2581 rdev->sb_loaded = 1; 2582 } 2583 } 2584 } 2585 2586 static bool does_sb_need_changing(struct mddev *mddev) 2587 { 2588 struct md_rdev *rdev; 2589 struct mdp_superblock_1 *sb; 2590 int role; 2591 2592 /* Find a good rdev */ 2593 rdev_for_each(rdev, mddev) 2594 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags)) 2595 break; 2596 2597 /* No good device found. */ 2598 if (!rdev) 2599 return false; 2600 2601 sb = page_address(rdev->sb_page); 2602 /* Check if a device has become faulty or a spare become active */ 2603 rdev_for_each(rdev, mddev) { 2604 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 2605 /* Device activated? */ 2606 if (role == 0xffff && rdev->raid_disk >=0 && 2607 !test_bit(Faulty, &rdev->flags)) 2608 return true; 2609 /* Device turned faulty? */ 2610 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd)) 2611 return true; 2612 } 2613 2614 /* Check if any mddev parameters have changed */ 2615 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || 2616 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || 2617 (mddev->layout != le32_to_cpu(sb->layout)) || 2618 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || 2619 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) 2620 return true; 2621 2622 return false; 2623 } 2624 2625 void md_update_sb(struct mddev *mddev, int force_change) 2626 { 2627 struct md_rdev *rdev; 2628 int sync_req; 2629 int nospares = 0; 2630 int any_badblocks_changed = 0; 2631 int ret = -1; 2632 2633 if (mddev->ro) { 2634 if (force_change) 2635 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2636 return; 2637 } 2638 2639 repeat: 2640 if (mddev_is_clustered(mddev)) { 2641 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) 2642 force_change = 1; 2643 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) 2644 nospares = 1; 2645 ret = md_cluster_ops->metadata_update_start(mddev); 2646 /* Has someone else has updated the sb */ 2647 if (!does_sb_need_changing(mddev)) { 2648 if (ret == 0) 2649 md_cluster_ops->metadata_update_cancel(mddev); 2650 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), 2651 BIT(MD_SB_CHANGE_DEVS) | 2652 BIT(MD_SB_CHANGE_CLEAN)); 2653 return; 2654 } 2655 } 2656 2657 /* 2658 * First make sure individual recovery_offsets are correct 2659 * curr_resync_completed can only be used during recovery. 2660 * During reshape/resync it might use array-addresses rather 2661 * that device addresses. 2662 */ 2663 rdev_for_each(rdev, mddev) { 2664 if (rdev->raid_disk >= 0 && 2665 mddev->delta_disks >= 0 && 2666 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 2667 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && 2668 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 2669 !test_bit(Journal, &rdev->flags) && 2670 !test_bit(In_sync, &rdev->flags) && 2671 mddev->curr_resync_completed > rdev->recovery_offset) 2672 rdev->recovery_offset = mddev->curr_resync_completed; 2673 2674 } 2675 if (!mddev->persistent) { 2676 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 2677 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2678 if (!mddev->external) { 2679 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 2680 rdev_for_each(rdev, mddev) { 2681 if (rdev->badblocks.changed) { 2682 rdev->badblocks.changed = 0; 2683 ack_all_badblocks(&rdev->badblocks); 2684 md_error(mddev, rdev); 2685 } 2686 clear_bit(Blocked, &rdev->flags); 2687 clear_bit(BlockedBadBlocks, &rdev->flags); 2688 wake_up(&rdev->blocked_wait); 2689 } 2690 } 2691 wake_up(&mddev->sb_wait); 2692 return; 2693 } 2694 2695 spin_lock(&mddev->lock); 2696 2697 mddev->utime = ktime_get_real_seconds(); 2698 2699 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) 2700 force_change = 1; 2701 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) 2702 /* just a clean<-> dirty transition, possibly leave spares alone, 2703 * though if events isn't the right even/odd, we will have to do 2704 * spares after all 2705 */ 2706 nospares = 1; 2707 if (force_change) 2708 nospares = 0; 2709 if (mddev->degraded) 2710 /* If the array is degraded, then skipping spares is both 2711 * dangerous and fairly pointless. 2712 * Dangerous because a device that was removed from the array 2713 * might have a event_count that still looks up-to-date, 2714 * so it can be re-added without a resync. 2715 * Pointless because if there are any spares to skip, 2716 * then a recovery will happen and soon that array won't 2717 * be degraded any more and the spare can go back to sleep then. 2718 */ 2719 nospares = 0; 2720 2721 sync_req = mddev->in_sync; 2722 2723 /* If this is just a dirty<->clean transition, and the array is clean 2724 * and 'events' is odd, we can roll back to the previous clean state */ 2725 if (nospares 2726 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2727 && mddev->can_decrease_events 2728 && mddev->events != 1) { 2729 mddev->events--; 2730 mddev->can_decrease_events = 0; 2731 } else { 2732 /* otherwise we have to go forward and ... */ 2733 mddev->events ++; 2734 mddev->can_decrease_events = nospares; 2735 } 2736 2737 /* 2738 * This 64-bit counter should never wrap. 2739 * Either we are in around ~1 trillion A.C., assuming 2740 * 1 reboot per second, or we have a bug... 2741 */ 2742 WARN_ON(mddev->events == 0); 2743 2744 rdev_for_each(rdev, mddev) { 2745 if (rdev->badblocks.changed) 2746 any_badblocks_changed++; 2747 if (test_bit(Faulty, &rdev->flags)) 2748 set_bit(FaultRecorded, &rdev->flags); 2749 } 2750 2751 sync_sbs(mddev, nospares); 2752 spin_unlock(&mddev->lock); 2753 2754 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", 2755 mdname(mddev), mddev->in_sync); 2756 2757 if (mddev->queue) 2758 blk_add_trace_msg(mddev->queue, "md md_update_sb"); 2759 rewrite: 2760 md_bitmap_update_sb(mddev->bitmap); 2761 rdev_for_each(rdev, mddev) { 2762 char b[BDEVNAME_SIZE]; 2763 2764 if (rdev->sb_loaded != 1) 2765 continue; /* no noise on spare devices */ 2766 2767 if (!test_bit(Faulty, &rdev->flags)) { 2768 md_super_write(mddev,rdev, 2769 rdev->sb_start, rdev->sb_size, 2770 rdev->sb_page); 2771 pr_debug("md: (write) %s's sb offset: %llu\n", 2772 bdevname(rdev->bdev, b), 2773 (unsigned long long)rdev->sb_start); 2774 rdev->sb_events = mddev->events; 2775 if (rdev->badblocks.size) { 2776 md_super_write(mddev, rdev, 2777 rdev->badblocks.sector, 2778 rdev->badblocks.size << 9, 2779 rdev->bb_page); 2780 rdev->badblocks.size = 0; 2781 } 2782 2783 } else 2784 pr_debug("md: %s (skipping faulty)\n", 2785 bdevname(rdev->bdev, b)); 2786 2787 if (mddev->level == LEVEL_MULTIPATH) 2788 /* only need to write one superblock... */ 2789 break; 2790 } 2791 if (md_super_wait(mddev) < 0) 2792 goto rewrite; 2793 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */ 2794 2795 if (mddev_is_clustered(mddev) && ret == 0) 2796 md_cluster_ops->metadata_update_finish(mddev); 2797 2798 if (mddev->in_sync != sync_req || 2799 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), 2800 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN))) 2801 /* have to write it out again */ 2802 goto repeat; 2803 wake_up(&mddev->sb_wait); 2804 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2805 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2806 2807 rdev_for_each(rdev, mddev) { 2808 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) 2809 clear_bit(Blocked, &rdev->flags); 2810 2811 if (any_badblocks_changed) 2812 ack_all_badblocks(&rdev->badblocks); 2813 clear_bit(BlockedBadBlocks, &rdev->flags); 2814 wake_up(&rdev->blocked_wait); 2815 } 2816 } 2817 EXPORT_SYMBOL(md_update_sb); 2818 2819 static int add_bound_rdev(struct md_rdev *rdev) 2820 { 2821 struct mddev *mddev = rdev->mddev; 2822 int err = 0; 2823 bool add_journal = test_bit(Journal, &rdev->flags); 2824 2825 if (!mddev->pers->hot_remove_disk || add_journal) { 2826 /* If there is hot_add_disk but no hot_remove_disk 2827 * then added disks for geometry changes, 2828 * and should be added immediately. 2829 */ 2830 super_types[mddev->major_version]. 2831 validate_super(mddev, rdev); 2832 if (add_journal) 2833 mddev_suspend(mddev); 2834 err = mddev->pers->hot_add_disk(mddev, rdev); 2835 if (add_journal) 2836 mddev_resume(mddev); 2837 if (err) { 2838 md_kick_rdev_from_array(rdev); 2839 return err; 2840 } 2841 } 2842 sysfs_notify_dirent_safe(rdev->sysfs_state); 2843 2844 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2845 if (mddev->degraded) 2846 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 2847 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2848 md_new_event(mddev); 2849 md_wakeup_thread(mddev->thread); 2850 return 0; 2851 } 2852 2853 /* words written to sysfs files may, or may not, be \n terminated. 2854 * We want to accept with case. For this we use cmd_match. 2855 */ 2856 static int cmd_match(const char *cmd, const char *str) 2857 { 2858 /* See if cmd, written into a sysfs file, matches 2859 * str. They must either be the same, or cmd can 2860 * have a trailing newline 2861 */ 2862 while (*cmd && *str && *cmd == *str) { 2863 cmd++; 2864 str++; 2865 } 2866 if (*cmd == '\n') 2867 cmd++; 2868 if (*str || *cmd) 2869 return 0; 2870 return 1; 2871 } 2872 2873 struct rdev_sysfs_entry { 2874 struct attribute attr; 2875 ssize_t (*show)(struct md_rdev *, char *); 2876 ssize_t (*store)(struct md_rdev *, const char *, size_t); 2877 }; 2878 2879 static ssize_t 2880 state_show(struct md_rdev *rdev, char *page) 2881 { 2882 char *sep = ","; 2883 size_t len = 0; 2884 unsigned long flags = READ_ONCE(rdev->flags); 2885 2886 if (test_bit(Faulty, &flags) || 2887 (!test_bit(ExternalBbl, &flags) && 2888 rdev->badblocks.unacked_exist)) 2889 len += sprintf(page+len, "faulty%s", sep); 2890 if (test_bit(In_sync, &flags)) 2891 len += sprintf(page+len, "in_sync%s", sep); 2892 if (test_bit(Journal, &flags)) 2893 len += sprintf(page+len, "journal%s", sep); 2894 if (test_bit(WriteMostly, &flags)) 2895 len += sprintf(page+len, "write_mostly%s", sep); 2896 if (test_bit(Blocked, &flags) || 2897 (rdev->badblocks.unacked_exist 2898 && !test_bit(Faulty, &flags))) 2899 len += sprintf(page+len, "blocked%s", sep); 2900 if (!test_bit(Faulty, &flags) && 2901 !test_bit(Journal, &flags) && 2902 !test_bit(In_sync, &flags)) 2903 len += sprintf(page+len, "spare%s", sep); 2904 if (test_bit(WriteErrorSeen, &flags)) 2905 len += sprintf(page+len, "write_error%s", sep); 2906 if (test_bit(WantReplacement, &flags)) 2907 len += sprintf(page+len, "want_replacement%s", sep); 2908 if (test_bit(Replacement, &flags)) 2909 len += sprintf(page+len, "replacement%s", sep); 2910 if (test_bit(ExternalBbl, &flags)) 2911 len += sprintf(page+len, "external_bbl%s", sep); 2912 if (test_bit(FailFast, &flags)) 2913 len += sprintf(page+len, "failfast%s", sep); 2914 2915 if (len) 2916 len -= strlen(sep); 2917 2918 return len+sprintf(page+len, "\n"); 2919 } 2920 2921 static ssize_t 2922 state_store(struct md_rdev *rdev, const char *buf, size_t len) 2923 { 2924 /* can write 2925 * faulty - simulates an error 2926 * remove - disconnects the device 2927 * writemostly - sets write_mostly 2928 * -writemostly - clears write_mostly 2929 * blocked - sets the Blocked flags 2930 * -blocked - clears the Blocked and possibly simulates an error 2931 * insync - sets Insync providing device isn't active 2932 * -insync - clear Insync for a device with a slot assigned, 2933 * so that it gets rebuilt based on bitmap 2934 * write_error - sets WriteErrorSeen 2935 * -write_error - clears WriteErrorSeen 2936 * {,-}failfast - set/clear FailFast 2937 */ 2938 int err = -EINVAL; 2939 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2940 md_error(rdev->mddev, rdev); 2941 if (test_bit(Faulty, &rdev->flags)) 2942 err = 0; 2943 else 2944 err = -EBUSY; 2945 } else if (cmd_match(buf, "remove")) { 2946 if (rdev->mddev->pers) { 2947 clear_bit(Blocked, &rdev->flags); 2948 remove_and_add_spares(rdev->mddev, rdev); 2949 } 2950 if (rdev->raid_disk >= 0) 2951 err = -EBUSY; 2952 else { 2953 struct mddev *mddev = rdev->mddev; 2954 err = 0; 2955 if (mddev_is_clustered(mddev)) 2956 err = md_cluster_ops->remove_disk(mddev, rdev); 2957 2958 if (err == 0) { 2959 md_kick_rdev_from_array(rdev); 2960 if (mddev->pers) { 2961 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2962 md_wakeup_thread(mddev->thread); 2963 } 2964 md_new_event(mddev); 2965 } 2966 } 2967 } else if (cmd_match(buf, "writemostly")) { 2968 set_bit(WriteMostly, &rdev->flags); 2969 mddev_create_serial_pool(rdev->mddev, rdev, false); 2970 err = 0; 2971 } else if (cmd_match(buf, "-writemostly")) { 2972 mddev_destroy_serial_pool(rdev->mddev, rdev, false); 2973 clear_bit(WriteMostly, &rdev->flags); 2974 err = 0; 2975 } else if (cmd_match(buf, "blocked")) { 2976 set_bit(Blocked, &rdev->flags); 2977 err = 0; 2978 } else if (cmd_match(buf, "-blocked")) { 2979 if (!test_bit(Faulty, &rdev->flags) && 2980 !test_bit(ExternalBbl, &rdev->flags) && 2981 rdev->badblocks.unacked_exist) { 2982 /* metadata handler doesn't understand badblocks, 2983 * so we need to fail the device 2984 */ 2985 md_error(rdev->mddev, rdev); 2986 } 2987 clear_bit(Blocked, &rdev->flags); 2988 clear_bit(BlockedBadBlocks, &rdev->flags); 2989 wake_up(&rdev->blocked_wait); 2990 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2991 md_wakeup_thread(rdev->mddev->thread); 2992 2993 err = 0; 2994 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2995 set_bit(In_sync, &rdev->flags); 2996 err = 0; 2997 } else if (cmd_match(buf, "failfast")) { 2998 set_bit(FailFast, &rdev->flags); 2999 err = 0; 3000 } else if (cmd_match(buf, "-failfast")) { 3001 clear_bit(FailFast, &rdev->flags); 3002 err = 0; 3003 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 && 3004 !test_bit(Journal, &rdev->flags)) { 3005 if (rdev->mddev->pers == NULL) { 3006 clear_bit(In_sync, &rdev->flags); 3007 rdev->saved_raid_disk = rdev->raid_disk; 3008 rdev->raid_disk = -1; 3009 err = 0; 3010 } 3011 } else if (cmd_match(buf, "write_error")) { 3012 set_bit(WriteErrorSeen, &rdev->flags); 3013 err = 0; 3014 } else if (cmd_match(buf, "-write_error")) { 3015 clear_bit(WriteErrorSeen, &rdev->flags); 3016 err = 0; 3017 } else if (cmd_match(buf, "want_replacement")) { 3018 /* Any non-spare device that is not a replacement can 3019 * become want_replacement at any time, but we then need to 3020 * check if recovery is needed. 3021 */ 3022 if (rdev->raid_disk >= 0 && 3023 !test_bit(Journal, &rdev->flags) && 3024 !test_bit(Replacement, &rdev->flags)) 3025 set_bit(WantReplacement, &rdev->flags); 3026 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 3027 md_wakeup_thread(rdev->mddev->thread); 3028 err = 0; 3029 } else if (cmd_match(buf, "-want_replacement")) { 3030 /* Clearing 'want_replacement' is always allowed. 3031 * Once replacements starts it is too late though. 3032 */ 3033 err = 0; 3034 clear_bit(WantReplacement, &rdev->flags); 3035 } else if (cmd_match(buf, "replacement")) { 3036 /* Can only set a device as a replacement when array has not 3037 * yet been started. Once running, replacement is automatic 3038 * from spares, or by assigning 'slot'. 3039 */ 3040 if (rdev->mddev->pers) 3041 err = -EBUSY; 3042 else { 3043 set_bit(Replacement, &rdev->flags); 3044 err = 0; 3045 } 3046 } else if (cmd_match(buf, "-replacement")) { 3047 /* Similarly, can only clear Replacement before start */ 3048 if (rdev->mddev->pers) 3049 err = -EBUSY; 3050 else { 3051 clear_bit(Replacement, &rdev->flags); 3052 err = 0; 3053 } 3054 } else if (cmd_match(buf, "re-add")) { 3055 if (!rdev->mddev->pers) 3056 err = -EINVAL; 3057 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) && 3058 rdev->saved_raid_disk >= 0) { 3059 /* clear_bit is performed _after_ all the devices 3060 * have their local Faulty bit cleared. If any writes 3061 * happen in the meantime in the local node, they 3062 * will land in the local bitmap, which will be synced 3063 * by this node eventually 3064 */ 3065 if (!mddev_is_clustered(rdev->mddev) || 3066 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) { 3067 clear_bit(Faulty, &rdev->flags); 3068 err = add_bound_rdev(rdev); 3069 } 3070 } else 3071 err = -EBUSY; 3072 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) { 3073 set_bit(ExternalBbl, &rdev->flags); 3074 rdev->badblocks.shift = 0; 3075 err = 0; 3076 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) { 3077 clear_bit(ExternalBbl, &rdev->flags); 3078 err = 0; 3079 } 3080 if (!err) 3081 sysfs_notify_dirent_safe(rdev->sysfs_state); 3082 return err ? err : len; 3083 } 3084 static struct rdev_sysfs_entry rdev_state = 3085 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store); 3086 3087 static ssize_t 3088 errors_show(struct md_rdev *rdev, char *page) 3089 { 3090 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 3091 } 3092 3093 static ssize_t 3094 errors_store(struct md_rdev *rdev, const char *buf, size_t len) 3095 { 3096 unsigned int n; 3097 int rv; 3098 3099 rv = kstrtouint(buf, 10, &n); 3100 if (rv < 0) 3101 return rv; 3102 atomic_set(&rdev->corrected_errors, n); 3103 return len; 3104 } 3105 static struct rdev_sysfs_entry rdev_errors = 3106 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 3107 3108 static ssize_t 3109 slot_show(struct md_rdev *rdev, char *page) 3110 { 3111 if (test_bit(Journal, &rdev->flags)) 3112 return sprintf(page, "journal\n"); 3113 else if (rdev->raid_disk < 0) 3114 return sprintf(page, "none\n"); 3115 else 3116 return sprintf(page, "%d\n", rdev->raid_disk); 3117 } 3118 3119 static ssize_t 3120 slot_store(struct md_rdev *rdev, const char *buf, size_t len) 3121 { 3122 int slot; 3123 int err; 3124 3125 if (test_bit(Journal, &rdev->flags)) 3126 return -EBUSY; 3127 if (strncmp(buf, "none", 4)==0) 3128 slot = -1; 3129 else { 3130 err = kstrtouint(buf, 10, (unsigned int *)&slot); 3131 if (err < 0) 3132 return err; 3133 } 3134 if (rdev->mddev->pers && slot == -1) { 3135 /* Setting 'slot' on an active array requires also 3136 * updating the 'rd%d' link, and communicating 3137 * with the personality with ->hot_*_disk. 3138 * For now we only support removing 3139 * failed/spare devices. This normally happens automatically, 3140 * but not when the metadata is externally managed. 3141 */ 3142 if (rdev->raid_disk == -1) 3143 return -EEXIST; 3144 /* personality does all needed checks */ 3145 if (rdev->mddev->pers->hot_remove_disk == NULL) 3146 return -EINVAL; 3147 clear_bit(Blocked, &rdev->flags); 3148 remove_and_add_spares(rdev->mddev, rdev); 3149 if (rdev->raid_disk >= 0) 3150 return -EBUSY; 3151 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 3152 md_wakeup_thread(rdev->mddev->thread); 3153 } else if (rdev->mddev->pers) { 3154 /* Activating a spare .. or possibly reactivating 3155 * if we ever get bitmaps working here. 3156 */ 3157 int err; 3158 3159 if (rdev->raid_disk != -1) 3160 return -EBUSY; 3161 3162 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) 3163 return -EBUSY; 3164 3165 if (rdev->mddev->pers->hot_add_disk == NULL) 3166 return -EINVAL; 3167 3168 if (slot >= rdev->mddev->raid_disks && 3169 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 3170 return -ENOSPC; 3171 3172 rdev->raid_disk = slot; 3173 if (test_bit(In_sync, &rdev->flags)) 3174 rdev->saved_raid_disk = slot; 3175 else 3176 rdev->saved_raid_disk = -1; 3177 clear_bit(In_sync, &rdev->flags); 3178 clear_bit(Bitmap_sync, &rdev->flags); 3179 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev); 3180 if (err) { 3181 rdev->raid_disk = -1; 3182 return err; 3183 } else 3184 sysfs_notify_dirent_safe(rdev->sysfs_state); 3185 if (sysfs_link_rdev(rdev->mddev, rdev)) 3186 /* failure here is OK */; 3187 /* don't wakeup anyone, leave that to userspace. */ 3188 } else { 3189 if (slot >= rdev->mddev->raid_disks && 3190 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 3191 return -ENOSPC; 3192 rdev->raid_disk = slot; 3193 /* assume it is working */ 3194 clear_bit(Faulty, &rdev->flags); 3195 clear_bit(WriteMostly, &rdev->flags); 3196 set_bit(In_sync, &rdev->flags); 3197 sysfs_notify_dirent_safe(rdev->sysfs_state); 3198 } 3199 return len; 3200 } 3201 3202 static struct rdev_sysfs_entry rdev_slot = 3203 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 3204 3205 static ssize_t 3206 offset_show(struct md_rdev *rdev, char *page) 3207 { 3208 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 3209 } 3210 3211 static ssize_t 3212 offset_store(struct md_rdev *rdev, const char *buf, size_t len) 3213 { 3214 unsigned long long offset; 3215 if (kstrtoull(buf, 10, &offset) < 0) 3216 return -EINVAL; 3217 if (rdev->mddev->pers && rdev->raid_disk >= 0) 3218 return -EBUSY; 3219 if (rdev->sectors && rdev->mddev->external) 3220 /* Must set offset before size, so overlap checks 3221 * can be sane */ 3222 return -EBUSY; 3223 rdev->data_offset = offset; 3224 rdev->new_data_offset = offset; 3225 return len; 3226 } 3227 3228 static struct rdev_sysfs_entry rdev_offset = 3229 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 3230 3231 static ssize_t new_offset_show(struct md_rdev *rdev, char *page) 3232 { 3233 return sprintf(page, "%llu\n", 3234 (unsigned long long)rdev->new_data_offset); 3235 } 3236 3237 static ssize_t new_offset_store(struct md_rdev *rdev, 3238 const char *buf, size_t len) 3239 { 3240 unsigned long long new_offset; 3241 struct mddev *mddev = rdev->mddev; 3242 3243 if (kstrtoull(buf, 10, &new_offset) < 0) 3244 return -EINVAL; 3245 3246 if (mddev->sync_thread || 3247 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) 3248 return -EBUSY; 3249 if (new_offset == rdev->data_offset) 3250 /* reset is always permitted */ 3251 ; 3252 else if (new_offset > rdev->data_offset) { 3253 /* must not push array size beyond rdev_sectors */ 3254 if (new_offset - rdev->data_offset 3255 + mddev->dev_sectors > rdev->sectors) 3256 return -E2BIG; 3257 } 3258 /* Metadata worries about other space details. */ 3259 3260 /* decreasing the offset is inconsistent with a backwards 3261 * reshape. 3262 */ 3263 if (new_offset < rdev->data_offset && 3264 mddev->reshape_backwards) 3265 return -EINVAL; 3266 /* Increasing offset is inconsistent with forwards 3267 * reshape. reshape_direction should be set to 3268 * 'backwards' first. 3269 */ 3270 if (new_offset > rdev->data_offset && 3271 !mddev->reshape_backwards) 3272 return -EINVAL; 3273 3274 if (mddev->pers && mddev->persistent && 3275 !super_types[mddev->major_version] 3276 .allow_new_offset(rdev, new_offset)) 3277 return -E2BIG; 3278 rdev->new_data_offset = new_offset; 3279 if (new_offset > rdev->data_offset) 3280 mddev->reshape_backwards = 1; 3281 else if (new_offset < rdev->data_offset) 3282 mddev->reshape_backwards = 0; 3283 3284 return len; 3285 } 3286 static struct rdev_sysfs_entry rdev_new_offset = 3287 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store); 3288 3289 static ssize_t 3290 rdev_size_show(struct md_rdev *rdev, char *page) 3291 { 3292 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 3293 } 3294 3295 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 3296 { 3297 /* check if two start/length pairs overlap */ 3298 if (s1+l1 <= s2) 3299 return 0; 3300 if (s2+l2 <= s1) 3301 return 0; 3302 return 1; 3303 } 3304 3305 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 3306 { 3307 unsigned long long blocks; 3308 sector_t new; 3309 3310 if (kstrtoull(buf, 10, &blocks) < 0) 3311 return -EINVAL; 3312 3313 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 3314 return -EINVAL; /* sector conversion overflow */ 3315 3316 new = blocks * 2; 3317 if (new != blocks * 2) 3318 return -EINVAL; /* unsigned long long to sector_t overflow */ 3319 3320 *sectors = new; 3321 return 0; 3322 } 3323 3324 static ssize_t 3325 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) 3326 { 3327 struct mddev *my_mddev = rdev->mddev; 3328 sector_t oldsectors = rdev->sectors; 3329 sector_t sectors; 3330 3331 if (test_bit(Journal, &rdev->flags)) 3332 return -EBUSY; 3333 if (strict_blocks_to_sectors(buf, §ors) < 0) 3334 return -EINVAL; 3335 if (rdev->data_offset != rdev->new_data_offset) 3336 return -EINVAL; /* too confusing */ 3337 if (my_mddev->pers && rdev->raid_disk >= 0) { 3338 if (my_mddev->persistent) { 3339 sectors = super_types[my_mddev->major_version]. 3340 rdev_size_change(rdev, sectors); 3341 if (!sectors) 3342 return -EBUSY; 3343 } else if (!sectors) 3344 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 3345 rdev->data_offset; 3346 if (!my_mddev->pers->resize) 3347 /* Cannot change size for RAID0 or Linear etc */ 3348 return -EINVAL; 3349 } 3350 if (sectors < my_mddev->dev_sectors) 3351 return -EINVAL; /* component must fit device */ 3352 3353 rdev->sectors = sectors; 3354 if (sectors > oldsectors && my_mddev->external) { 3355 /* Need to check that all other rdevs with the same 3356 * ->bdev do not overlap. 'rcu' is sufficient to walk 3357 * the rdev lists safely. 3358 * This check does not provide a hard guarantee, it 3359 * just helps avoid dangerous mistakes. 3360 */ 3361 struct mddev *mddev; 3362 int overlap = 0; 3363 struct list_head *tmp; 3364 3365 rcu_read_lock(); 3366 for_each_mddev(mddev, tmp) { 3367 struct md_rdev *rdev2; 3368 3369 rdev_for_each(rdev2, mddev) 3370 if (rdev->bdev == rdev2->bdev && 3371 rdev != rdev2 && 3372 overlaps(rdev->data_offset, rdev->sectors, 3373 rdev2->data_offset, 3374 rdev2->sectors)) { 3375 overlap = 1; 3376 break; 3377 } 3378 if (overlap) { 3379 mddev_put(mddev); 3380 break; 3381 } 3382 } 3383 rcu_read_unlock(); 3384 if (overlap) { 3385 /* Someone else could have slipped in a size 3386 * change here, but doing so is just silly. 3387 * We put oldsectors back because we *know* it is 3388 * safe, and trust userspace not to race with 3389 * itself 3390 */ 3391 rdev->sectors = oldsectors; 3392 return -EBUSY; 3393 } 3394 } 3395 return len; 3396 } 3397 3398 static struct rdev_sysfs_entry rdev_size = 3399 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 3400 3401 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) 3402 { 3403 unsigned long long recovery_start = rdev->recovery_offset; 3404 3405 if (test_bit(In_sync, &rdev->flags) || 3406 recovery_start == MaxSector) 3407 return sprintf(page, "none\n"); 3408 3409 return sprintf(page, "%llu\n", recovery_start); 3410 } 3411 3412 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) 3413 { 3414 unsigned long long recovery_start; 3415 3416 if (cmd_match(buf, "none")) 3417 recovery_start = MaxSector; 3418 else if (kstrtoull(buf, 10, &recovery_start)) 3419 return -EINVAL; 3420 3421 if (rdev->mddev->pers && 3422 rdev->raid_disk >= 0) 3423 return -EBUSY; 3424 3425 rdev->recovery_offset = recovery_start; 3426 if (recovery_start == MaxSector) 3427 set_bit(In_sync, &rdev->flags); 3428 else 3429 clear_bit(In_sync, &rdev->flags); 3430 return len; 3431 } 3432 3433 static struct rdev_sysfs_entry rdev_recovery_start = 3434 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 3435 3436 /* sysfs access to bad-blocks list. 3437 * We present two files. 3438 * 'bad-blocks' lists sector numbers and lengths of ranges that 3439 * are recorded as bad. The list is truncated to fit within 3440 * the one-page limit of sysfs. 3441 * Writing "sector length" to this file adds an acknowledged 3442 * bad block list. 3443 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet 3444 * been acknowledged. Writing to this file adds bad blocks 3445 * without acknowledging them. This is largely for testing. 3446 */ 3447 static ssize_t bb_show(struct md_rdev *rdev, char *page) 3448 { 3449 return badblocks_show(&rdev->badblocks, page, 0); 3450 } 3451 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) 3452 { 3453 int rv = badblocks_store(&rdev->badblocks, page, len, 0); 3454 /* Maybe that ack was all we needed */ 3455 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) 3456 wake_up(&rdev->blocked_wait); 3457 return rv; 3458 } 3459 static struct rdev_sysfs_entry rdev_bad_blocks = 3460 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); 3461 3462 static ssize_t ubb_show(struct md_rdev *rdev, char *page) 3463 { 3464 return badblocks_show(&rdev->badblocks, page, 1); 3465 } 3466 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) 3467 { 3468 return badblocks_store(&rdev->badblocks, page, len, 1); 3469 } 3470 static struct rdev_sysfs_entry rdev_unack_bad_blocks = 3471 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store); 3472 3473 static ssize_t 3474 ppl_sector_show(struct md_rdev *rdev, char *page) 3475 { 3476 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector); 3477 } 3478 3479 static ssize_t 3480 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len) 3481 { 3482 unsigned long long sector; 3483 3484 if (kstrtoull(buf, 10, §or) < 0) 3485 return -EINVAL; 3486 if (sector != (sector_t)sector) 3487 return -EINVAL; 3488 3489 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && 3490 rdev->raid_disk >= 0) 3491 return -EBUSY; 3492 3493 if (rdev->mddev->persistent) { 3494 if (rdev->mddev->major_version == 0) 3495 return -EINVAL; 3496 if ((sector > rdev->sb_start && 3497 sector - rdev->sb_start > S16_MAX) || 3498 (sector < rdev->sb_start && 3499 rdev->sb_start - sector > -S16_MIN)) 3500 return -EINVAL; 3501 rdev->ppl.offset = sector - rdev->sb_start; 3502 } else if (!rdev->mddev->external) { 3503 return -EBUSY; 3504 } 3505 rdev->ppl.sector = sector; 3506 return len; 3507 } 3508 3509 static struct rdev_sysfs_entry rdev_ppl_sector = 3510 __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store); 3511 3512 static ssize_t 3513 ppl_size_show(struct md_rdev *rdev, char *page) 3514 { 3515 return sprintf(page, "%u\n", rdev->ppl.size); 3516 } 3517 3518 static ssize_t 3519 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len) 3520 { 3521 unsigned int size; 3522 3523 if (kstrtouint(buf, 10, &size) < 0) 3524 return -EINVAL; 3525 3526 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && 3527 rdev->raid_disk >= 0) 3528 return -EBUSY; 3529 3530 if (rdev->mddev->persistent) { 3531 if (rdev->mddev->major_version == 0) 3532 return -EINVAL; 3533 if (size > U16_MAX) 3534 return -EINVAL; 3535 } else if (!rdev->mddev->external) { 3536 return -EBUSY; 3537 } 3538 rdev->ppl.size = size; 3539 return len; 3540 } 3541 3542 static struct rdev_sysfs_entry rdev_ppl_size = 3543 __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store); 3544 3545 static struct attribute *rdev_default_attrs[] = { 3546 &rdev_state.attr, 3547 &rdev_errors.attr, 3548 &rdev_slot.attr, 3549 &rdev_offset.attr, 3550 &rdev_new_offset.attr, 3551 &rdev_size.attr, 3552 &rdev_recovery_start.attr, 3553 &rdev_bad_blocks.attr, 3554 &rdev_unack_bad_blocks.attr, 3555 &rdev_ppl_sector.attr, 3556 &rdev_ppl_size.attr, 3557 NULL, 3558 }; 3559 static ssize_t 3560 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3561 { 3562 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3563 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3564 3565 if (!entry->show) 3566 return -EIO; 3567 if (!rdev->mddev) 3568 return -ENODEV; 3569 return entry->show(rdev, page); 3570 } 3571 3572 static ssize_t 3573 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 3574 const char *page, size_t length) 3575 { 3576 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3577 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3578 ssize_t rv; 3579 struct mddev *mddev = rdev->mddev; 3580 3581 if (!entry->store) 3582 return -EIO; 3583 if (!capable(CAP_SYS_ADMIN)) 3584 return -EACCES; 3585 rv = mddev ? mddev_lock(mddev) : -ENODEV; 3586 if (!rv) { 3587 if (rdev->mddev == NULL) 3588 rv = -ENODEV; 3589 else 3590 rv = entry->store(rdev, page, length); 3591 mddev_unlock(mddev); 3592 } 3593 return rv; 3594 } 3595 3596 static void rdev_free(struct kobject *ko) 3597 { 3598 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); 3599 kfree(rdev); 3600 } 3601 static const struct sysfs_ops rdev_sysfs_ops = { 3602 .show = rdev_attr_show, 3603 .store = rdev_attr_store, 3604 }; 3605 static struct kobj_type rdev_ktype = { 3606 .release = rdev_free, 3607 .sysfs_ops = &rdev_sysfs_ops, 3608 .default_attrs = rdev_default_attrs, 3609 }; 3610 3611 int md_rdev_init(struct md_rdev *rdev) 3612 { 3613 rdev->desc_nr = -1; 3614 rdev->saved_raid_disk = -1; 3615 rdev->raid_disk = -1; 3616 rdev->flags = 0; 3617 rdev->data_offset = 0; 3618 rdev->new_data_offset = 0; 3619 rdev->sb_events = 0; 3620 rdev->last_read_error = 0; 3621 rdev->sb_loaded = 0; 3622 rdev->bb_page = NULL; 3623 atomic_set(&rdev->nr_pending, 0); 3624 atomic_set(&rdev->read_errors, 0); 3625 atomic_set(&rdev->corrected_errors, 0); 3626 3627 INIT_LIST_HEAD(&rdev->same_set); 3628 init_waitqueue_head(&rdev->blocked_wait); 3629 3630 /* Add space to store bad block list. 3631 * This reserves the space even on arrays where it cannot 3632 * be used - I wonder if that matters 3633 */ 3634 return badblocks_init(&rdev->badblocks, 0); 3635 } 3636 EXPORT_SYMBOL_GPL(md_rdev_init); 3637 /* 3638 * Import a device. If 'super_format' >= 0, then sanity check the superblock 3639 * 3640 * mark the device faulty if: 3641 * 3642 * - the device is nonexistent (zero size) 3643 * - the device has no valid superblock 3644 * 3645 * a faulty rdev _never_ has rdev->sb set. 3646 */ 3647 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) 3648 { 3649 char b[BDEVNAME_SIZE]; 3650 int err; 3651 struct md_rdev *rdev; 3652 sector_t size; 3653 3654 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 3655 if (!rdev) 3656 return ERR_PTR(-ENOMEM); 3657 3658 err = md_rdev_init(rdev); 3659 if (err) 3660 goto abort_free; 3661 err = alloc_disk_sb(rdev); 3662 if (err) 3663 goto abort_free; 3664 3665 err = lock_rdev(rdev, newdev, super_format == -2); 3666 if (err) 3667 goto abort_free; 3668 3669 kobject_init(&rdev->kobj, &rdev_ktype); 3670 3671 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; 3672 if (!size) { 3673 pr_warn("md: %s has zero or unknown size, marking faulty!\n", 3674 bdevname(rdev->bdev,b)); 3675 err = -EINVAL; 3676 goto abort_free; 3677 } 3678 3679 if (super_format >= 0) { 3680 err = super_types[super_format]. 3681 load_super(rdev, NULL, super_minor); 3682 if (err == -EINVAL) { 3683 pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n", 3684 bdevname(rdev->bdev,b), 3685 super_format, super_minor); 3686 goto abort_free; 3687 } 3688 if (err < 0) { 3689 pr_warn("md: could not read %s's sb, not importing!\n", 3690 bdevname(rdev->bdev,b)); 3691 goto abort_free; 3692 } 3693 } 3694 3695 return rdev; 3696 3697 abort_free: 3698 if (rdev->bdev) 3699 unlock_rdev(rdev); 3700 md_rdev_clear(rdev); 3701 kfree(rdev); 3702 return ERR_PTR(err); 3703 } 3704 3705 /* 3706 * Check a full RAID array for plausibility 3707 */ 3708 3709 static int analyze_sbs(struct mddev *mddev) 3710 { 3711 int i; 3712 struct md_rdev *rdev, *freshest, *tmp; 3713 char b[BDEVNAME_SIZE]; 3714 3715 freshest = NULL; 3716 rdev_for_each_safe(rdev, tmp, mddev) 3717 switch (super_types[mddev->major_version]. 3718 load_super(rdev, freshest, mddev->minor_version)) { 3719 case 1: 3720 freshest = rdev; 3721 break; 3722 case 0: 3723 break; 3724 default: 3725 pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n", 3726 bdevname(rdev->bdev,b)); 3727 md_kick_rdev_from_array(rdev); 3728 } 3729 3730 /* Cannot find a valid fresh disk */ 3731 if (!freshest) { 3732 pr_warn("md: cannot find a valid disk\n"); 3733 return -EINVAL; 3734 } 3735 3736 super_types[mddev->major_version]. 3737 validate_super(mddev, freshest); 3738 3739 i = 0; 3740 rdev_for_each_safe(rdev, tmp, mddev) { 3741 if (mddev->max_disks && 3742 (rdev->desc_nr >= mddev->max_disks || 3743 i > mddev->max_disks)) { 3744 pr_warn("md: %s: %s: only %d devices permitted\n", 3745 mdname(mddev), bdevname(rdev->bdev, b), 3746 mddev->max_disks); 3747 md_kick_rdev_from_array(rdev); 3748 continue; 3749 } 3750 if (rdev != freshest) { 3751 if (super_types[mddev->major_version]. 3752 validate_super(mddev, rdev)) { 3753 pr_warn("md: kicking non-fresh %s from array!\n", 3754 bdevname(rdev->bdev,b)); 3755 md_kick_rdev_from_array(rdev); 3756 continue; 3757 } 3758 } 3759 if (mddev->level == LEVEL_MULTIPATH) { 3760 rdev->desc_nr = i++; 3761 rdev->raid_disk = rdev->desc_nr; 3762 set_bit(In_sync, &rdev->flags); 3763 } else if (rdev->raid_disk >= 3764 (mddev->raid_disks - min(0, mddev->delta_disks)) && 3765 !test_bit(Journal, &rdev->flags)) { 3766 rdev->raid_disk = -1; 3767 clear_bit(In_sync, &rdev->flags); 3768 } 3769 } 3770 3771 return 0; 3772 } 3773 3774 /* Read a fixed-point number. 3775 * Numbers in sysfs attributes should be in "standard" units where 3776 * possible, so time should be in seconds. 3777 * However we internally use a a much smaller unit such as 3778 * milliseconds or jiffies. 3779 * This function takes a decimal number with a possible fractional 3780 * component, and produces an integer which is the result of 3781 * multiplying that number by 10^'scale'. 3782 * all without any floating-point arithmetic. 3783 */ 3784 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 3785 { 3786 unsigned long result = 0; 3787 long decimals = -1; 3788 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 3789 if (*cp == '.') 3790 decimals = 0; 3791 else if (decimals < scale) { 3792 unsigned int value; 3793 value = *cp - '0'; 3794 result = result * 10 + value; 3795 if (decimals >= 0) 3796 decimals++; 3797 } 3798 cp++; 3799 } 3800 if (*cp == '\n') 3801 cp++; 3802 if (*cp) 3803 return -EINVAL; 3804 if (decimals < 0) 3805 decimals = 0; 3806 *res = result * int_pow(10, scale - decimals); 3807 return 0; 3808 } 3809 3810 static ssize_t 3811 safe_delay_show(struct mddev *mddev, char *page) 3812 { 3813 int msec = (mddev->safemode_delay*1000)/HZ; 3814 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 3815 } 3816 static ssize_t 3817 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) 3818 { 3819 unsigned long msec; 3820 3821 if (mddev_is_clustered(mddev)) { 3822 pr_warn("md: Safemode is disabled for clustered mode\n"); 3823 return -EINVAL; 3824 } 3825 3826 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) 3827 return -EINVAL; 3828 if (msec == 0) 3829 mddev->safemode_delay = 0; 3830 else { 3831 unsigned long old_delay = mddev->safemode_delay; 3832 unsigned long new_delay = (msec*HZ)/1000; 3833 3834 if (new_delay == 0) 3835 new_delay = 1; 3836 mddev->safemode_delay = new_delay; 3837 if (new_delay < old_delay || old_delay == 0) 3838 mod_timer(&mddev->safemode_timer, jiffies+1); 3839 } 3840 return len; 3841 } 3842 static struct md_sysfs_entry md_safe_delay = 3843 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 3844 3845 static ssize_t 3846 level_show(struct mddev *mddev, char *page) 3847 { 3848 struct md_personality *p; 3849 int ret; 3850 spin_lock(&mddev->lock); 3851 p = mddev->pers; 3852 if (p) 3853 ret = sprintf(page, "%s\n", p->name); 3854 else if (mddev->clevel[0]) 3855 ret = sprintf(page, "%s\n", mddev->clevel); 3856 else if (mddev->level != LEVEL_NONE) 3857 ret = sprintf(page, "%d\n", mddev->level); 3858 else 3859 ret = 0; 3860 spin_unlock(&mddev->lock); 3861 return ret; 3862 } 3863 3864 static ssize_t 3865 level_store(struct mddev *mddev, const char *buf, size_t len) 3866 { 3867 char clevel[16]; 3868 ssize_t rv; 3869 size_t slen = len; 3870 struct md_personality *pers, *oldpers; 3871 long level; 3872 void *priv, *oldpriv; 3873 struct md_rdev *rdev; 3874 3875 if (slen == 0 || slen >= sizeof(clevel)) 3876 return -EINVAL; 3877 3878 rv = mddev_lock(mddev); 3879 if (rv) 3880 return rv; 3881 3882 if (mddev->pers == NULL) { 3883 strncpy(mddev->clevel, buf, slen); 3884 if (mddev->clevel[slen-1] == '\n') 3885 slen--; 3886 mddev->clevel[slen] = 0; 3887 mddev->level = LEVEL_NONE; 3888 rv = len; 3889 goto out_unlock; 3890 } 3891 rv = -EROFS; 3892 if (mddev->ro) 3893 goto out_unlock; 3894 3895 /* request to change the personality. Need to ensure: 3896 * - array is not engaged in resync/recovery/reshape 3897 * - old personality can be suspended 3898 * - new personality will access other array. 3899 */ 3900 3901 rv = -EBUSY; 3902 if (mddev->sync_thread || 3903 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3904 mddev->reshape_position != MaxSector || 3905 mddev->sysfs_active) 3906 goto out_unlock; 3907 3908 rv = -EINVAL; 3909 if (!mddev->pers->quiesce) { 3910 pr_warn("md: %s: %s does not support online personality change\n", 3911 mdname(mddev), mddev->pers->name); 3912 goto out_unlock; 3913 } 3914 3915 /* Now find the new personality */ 3916 strncpy(clevel, buf, slen); 3917 if (clevel[slen-1] == '\n') 3918 slen--; 3919 clevel[slen] = 0; 3920 if (kstrtol(clevel, 10, &level)) 3921 level = LEVEL_NONE; 3922 3923 if (request_module("md-%s", clevel) != 0) 3924 request_module("md-level-%s", clevel); 3925 spin_lock(&pers_lock); 3926 pers = find_pers(level, clevel); 3927 if (!pers || !try_module_get(pers->owner)) { 3928 spin_unlock(&pers_lock); 3929 pr_warn("md: personality %s not loaded\n", clevel); 3930 rv = -EINVAL; 3931 goto out_unlock; 3932 } 3933 spin_unlock(&pers_lock); 3934 3935 if (pers == mddev->pers) { 3936 /* Nothing to do! */ 3937 module_put(pers->owner); 3938 rv = len; 3939 goto out_unlock; 3940 } 3941 if (!pers->takeover) { 3942 module_put(pers->owner); 3943 pr_warn("md: %s: %s does not support personality takeover\n", 3944 mdname(mddev), clevel); 3945 rv = -EINVAL; 3946 goto out_unlock; 3947 } 3948 3949 rdev_for_each(rdev, mddev) 3950 rdev->new_raid_disk = rdev->raid_disk; 3951 3952 /* ->takeover must set new_* and/or delta_disks 3953 * if it succeeds, and may set them when it fails. 3954 */ 3955 priv = pers->takeover(mddev); 3956 if (IS_ERR(priv)) { 3957 mddev->new_level = mddev->level; 3958 mddev->new_layout = mddev->layout; 3959 mddev->new_chunk_sectors = mddev->chunk_sectors; 3960 mddev->raid_disks -= mddev->delta_disks; 3961 mddev->delta_disks = 0; 3962 mddev->reshape_backwards = 0; 3963 module_put(pers->owner); 3964 pr_warn("md: %s: %s would not accept array\n", 3965 mdname(mddev), clevel); 3966 rv = PTR_ERR(priv); 3967 goto out_unlock; 3968 } 3969 3970 /* Looks like we have a winner */ 3971 mddev_suspend(mddev); 3972 mddev_detach(mddev); 3973 3974 spin_lock(&mddev->lock); 3975 oldpers = mddev->pers; 3976 oldpriv = mddev->private; 3977 mddev->pers = pers; 3978 mddev->private = priv; 3979 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 3980 mddev->level = mddev->new_level; 3981 mddev->layout = mddev->new_layout; 3982 mddev->chunk_sectors = mddev->new_chunk_sectors; 3983 mddev->delta_disks = 0; 3984 mddev->reshape_backwards = 0; 3985 mddev->degraded = 0; 3986 spin_unlock(&mddev->lock); 3987 3988 if (oldpers->sync_request == NULL && 3989 mddev->external) { 3990 /* We are converting from a no-redundancy array 3991 * to a redundancy array and metadata is managed 3992 * externally so we need to be sure that writes 3993 * won't block due to a need to transition 3994 * clean->dirty 3995 * until external management is started. 3996 */ 3997 mddev->in_sync = 0; 3998 mddev->safemode_delay = 0; 3999 mddev->safemode = 0; 4000 } 4001 4002 oldpers->free(mddev, oldpriv); 4003 4004 if (oldpers->sync_request == NULL && 4005 pers->sync_request != NULL) { 4006 /* need to add the md_redundancy_group */ 4007 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 4008 pr_warn("md: cannot register extra attributes for %s\n", 4009 mdname(mddev)); 4010 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); 4011 } 4012 if (oldpers->sync_request != NULL && 4013 pers->sync_request == NULL) { 4014 /* need to remove the md_redundancy_group */ 4015 if (mddev->to_remove == NULL) 4016 mddev->to_remove = &md_redundancy_group; 4017 } 4018 4019 module_put(oldpers->owner); 4020 4021 rdev_for_each(rdev, mddev) { 4022 if (rdev->raid_disk < 0) 4023 continue; 4024 if (rdev->new_raid_disk >= mddev->raid_disks) 4025 rdev->new_raid_disk = -1; 4026 if (rdev->new_raid_disk == rdev->raid_disk) 4027 continue; 4028 sysfs_unlink_rdev(mddev, rdev); 4029 } 4030 rdev_for_each(rdev, mddev) { 4031 if (rdev->raid_disk < 0) 4032 continue; 4033 if (rdev->new_raid_disk == rdev->raid_disk) 4034 continue; 4035 rdev->raid_disk = rdev->new_raid_disk; 4036 if (rdev->raid_disk < 0) 4037 clear_bit(In_sync, &rdev->flags); 4038 else { 4039 if (sysfs_link_rdev(mddev, rdev)) 4040 pr_warn("md: cannot register rd%d for %s after level change\n", 4041 rdev->raid_disk, mdname(mddev)); 4042 } 4043 } 4044 4045 if (pers->sync_request == NULL) { 4046 /* this is now an array without redundancy, so 4047 * it must always be in_sync 4048 */ 4049 mddev->in_sync = 1; 4050 del_timer_sync(&mddev->safemode_timer); 4051 } 4052 blk_set_stacking_limits(&mddev->queue->limits); 4053 pers->run(mddev); 4054 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 4055 mddev_resume(mddev); 4056 if (!mddev->thread) 4057 md_update_sb(mddev, 1); 4058 sysfs_notify(&mddev->kobj, NULL, "level"); 4059 md_new_event(mddev); 4060 rv = len; 4061 out_unlock: 4062 mddev_unlock(mddev); 4063 return rv; 4064 } 4065 4066 static struct md_sysfs_entry md_level = 4067 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 4068 4069 static ssize_t 4070 layout_show(struct mddev *mddev, char *page) 4071 { 4072 /* just a number, not meaningful for all levels */ 4073 if (mddev->reshape_position != MaxSector && 4074 mddev->layout != mddev->new_layout) 4075 return sprintf(page, "%d (%d)\n", 4076 mddev->new_layout, mddev->layout); 4077 return sprintf(page, "%d\n", mddev->layout); 4078 } 4079 4080 static ssize_t 4081 layout_store(struct mddev *mddev, const char *buf, size_t len) 4082 { 4083 unsigned int n; 4084 int err; 4085 4086 err = kstrtouint(buf, 10, &n); 4087 if (err < 0) 4088 return err; 4089 err = mddev_lock(mddev); 4090 if (err) 4091 return err; 4092 4093 if (mddev->pers) { 4094 if (mddev->pers->check_reshape == NULL) 4095 err = -EBUSY; 4096 else if (mddev->ro) 4097 err = -EROFS; 4098 else { 4099 mddev->new_layout = n; 4100 err = mddev->pers->check_reshape(mddev); 4101 if (err) 4102 mddev->new_layout = mddev->layout; 4103 } 4104 } else { 4105 mddev->new_layout = n; 4106 if (mddev->reshape_position == MaxSector) 4107 mddev->layout = n; 4108 } 4109 mddev_unlock(mddev); 4110 return err ?: len; 4111 } 4112 static struct md_sysfs_entry md_layout = 4113 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 4114 4115 static ssize_t 4116 raid_disks_show(struct mddev *mddev, char *page) 4117 { 4118 if (mddev->raid_disks == 0) 4119 return 0; 4120 if (mddev->reshape_position != MaxSector && 4121 mddev->delta_disks != 0) 4122 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 4123 mddev->raid_disks - mddev->delta_disks); 4124 return sprintf(page, "%d\n", mddev->raid_disks); 4125 } 4126 4127 static int update_raid_disks(struct mddev *mddev, int raid_disks); 4128 4129 static ssize_t 4130 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) 4131 { 4132 unsigned int n; 4133 int err; 4134 4135 err = kstrtouint(buf, 10, &n); 4136 if (err < 0) 4137 return err; 4138 4139 err = mddev_lock(mddev); 4140 if (err) 4141 return err; 4142 if (mddev->pers) 4143 err = update_raid_disks(mddev, n); 4144 else if (mddev->reshape_position != MaxSector) { 4145 struct md_rdev *rdev; 4146 int olddisks = mddev->raid_disks - mddev->delta_disks; 4147 4148 err = -EINVAL; 4149 rdev_for_each(rdev, mddev) { 4150 if (olddisks < n && 4151 rdev->data_offset < rdev->new_data_offset) 4152 goto out_unlock; 4153 if (olddisks > n && 4154 rdev->data_offset > rdev->new_data_offset) 4155 goto out_unlock; 4156 } 4157 err = 0; 4158 mddev->delta_disks = n - olddisks; 4159 mddev->raid_disks = n; 4160 mddev->reshape_backwards = (mddev->delta_disks < 0); 4161 } else 4162 mddev->raid_disks = n; 4163 out_unlock: 4164 mddev_unlock(mddev); 4165 return err ? err : len; 4166 } 4167 static struct md_sysfs_entry md_raid_disks = 4168 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 4169 4170 static ssize_t 4171 chunk_size_show(struct mddev *mddev, char *page) 4172 { 4173 if (mddev->reshape_position != MaxSector && 4174 mddev->chunk_sectors != mddev->new_chunk_sectors) 4175 return sprintf(page, "%d (%d)\n", 4176 mddev->new_chunk_sectors << 9, 4177 mddev->chunk_sectors << 9); 4178 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 4179 } 4180 4181 static ssize_t 4182 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) 4183 { 4184 unsigned long n; 4185 int err; 4186 4187 err = kstrtoul(buf, 10, &n); 4188 if (err < 0) 4189 return err; 4190 4191 err = mddev_lock(mddev); 4192 if (err) 4193 return err; 4194 if (mddev->pers) { 4195 if (mddev->pers->check_reshape == NULL) 4196 err = -EBUSY; 4197 else if (mddev->ro) 4198 err = -EROFS; 4199 else { 4200 mddev->new_chunk_sectors = n >> 9; 4201 err = mddev->pers->check_reshape(mddev); 4202 if (err) 4203 mddev->new_chunk_sectors = mddev->chunk_sectors; 4204 } 4205 } else { 4206 mddev->new_chunk_sectors = n >> 9; 4207 if (mddev->reshape_position == MaxSector) 4208 mddev->chunk_sectors = n >> 9; 4209 } 4210 mddev_unlock(mddev); 4211 return err ?: len; 4212 } 4213 static struct md_sysfs_entry md_chunk_size = 4214 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 4215 4216 static ssize_t 4217 resync_start_show(struct mddev *mddev, char *page) 4218 { 4219 if (mddev->recovery_cp == MaxSector) 4220 return sprintf(page, "none\n"); 4221 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 4222 } 4223 4224 static ssize_t 4225 resync_start_store(struct mddev *mddev, const char *buf, size_t len) 4226 { 4227 unsigned long long n; 4228 int err; 4229 4230 if (cmd_match(buf, "none")) 4231 n = MaxSector; 4232 else { 4233 err = kstrtoull(buf, 10, &n); 4234 if (err < 0) 4235 return err; 4236 if (n != (sector_t)n) 4237 return -EINVAL; 4238 } 4239 4240 err = mddev_lock(mddev); 4241 if (err) 4242 return err; 4243 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 4244 err = -EBUSY; 4245 4246 if (!err) { 4247 mddev->recovery_cp = n; 4248 if (mddev->pers) 4249 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 4250 } 4251 mddev_unlock(mddev); 4252 return err ?: len; 4253 } 4254 static struct md_sysfs_entry md_resync_start = 4255 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR, 4256 resync_start_show, resync_start_store); 4257 4258 /* 4259 * The array state can be: 4260 * 4261 * clear 4262 * No devices, no size, no level 4263 * Equivalent to STOP_ARRAY ioctl 4264 * inactive 4265 * May have some settings, but array is not active 4266 * all IO results in error 4267 * When written, doesn't tear down array, but just stops it 4268 * suspended (not supported yet) 4269 * All IO requests will block. The array can be reconfigured. 4270 * Writing this, if accepted, will block until array is quiescent 4271 * readonly 4272 * no resync can happen. no superblocks get written. 4273 * write requests fail 4274 * read-auto 4275 * like readonly, but behaves like 'clean' on a write request. 4276 * 4277 * clean - no pending writes, but otherwise active. 4278 * When written to inactive array, starts without resync 4279 * If a write request arrives then 4280 * if metadata is known, mark 'dirty' and switch to 'active'. 4281 * if not known, block and switch to write-pending 4282 * If written to an active array that has pending writes, then fails. 4283 * active 4284 * fully active: IO and resync can be happening. 4285 * When written to inactive array, starts with resync 4286 * 4287 * write-pending 4288 * clean, but writes are blocked waiting for 'active' to be written. 4289 * 4290 * active-idle 4291 * like active, but no writes have been seen for a while (100msec). 4292 * 4293 * broken 4294 * RAID0/LINEAR-only: same as clean, but array is missing a member. 4295 * It's useful because RAID0/LINEAR mounted-arrays aren't stopped 4296 * when a member is gone, so this state will at least alert the 4297 * user that something is wrong. 4298 */ 4299 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 4300 write_pending, active_idle, broken, bad_word}; 4301 static char *array_states[] = { 4302 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 4303 "write-pending", "active-idle", "broken", NULL }; 4304 4305 static int match_word(const char *word, char **list) 4306 { 4307 int n; 4308 for (n=0; list[n]; n++) 4309 if (cmd_match(word, list[n])) 4310 break; 4311 return n; 4312 } 4313 4314 static ssize_t 4315 array_state_show(struct mddev *mddev, char *page) 4316 { 4317 enum array_state st = inactive; 4318 4319 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) { 4320 switch(mddev->ro) { 4321 case 1: 4322 st = readonly; 4323 break; 4324 case 2: 4325 st = read_auto; 4326 break; 4327 case 0: 4328 spin_lock(&mddev->lock); 4329 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 4330 st = write_pending; 4331 else if (mddev->in_sync) 4332 st = clean; 4333 else if (mddev->safemode) 4334 st = active_idle; 4335 else 4336 st = active; 4337 spin_unlock(&mddev->lock); 4338 } 4339 4340 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean) 4341 st = broken; 4342 } else { 4343 if (list_empty(&mddev->disks) && 4344 mddev->raid_disks == 0 && 4345 mddev->dev_sectors == 0) 4346 st = clear; 4347 else 4348 st = inactive; 4349 } 4350 return sprintf(page, "%s\n", array_states[st]); 4351 } 4352 4353 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev); 4354 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev); 4355 static int do_md_run(struct mddev *mddev); 4356 static int restart_array(struct mddev *mddev); 4357 4358 static ssize_t 4359 array_state_store(struct mddev *mddev, const char *buf, size_t len) 4360 { 4361 int err = 0; 4362 enum array_state st = match_word(buf, array_states); 4363 4364 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) { 4365 /* don't take reconfig_mutex when toggling between 4366 * clean and active 4367 */ 4368 spin_lock(&mddev->lock); 4369 if (st == active) { 4370 restart_array(mddev); 4371 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 4372 md_wakeup_thread(mddev->thread); 4373 wake_up(&mddev->sb_wait); 4374 } else /* st == clean */ { 4375 restart_array(mddev); 4376 if (!set_in_sync(mddev)) 4377 err = -EBUSY; 4378 } 4379 if (!err) 4380 sysfs_notify_dirent_safe(mddev->sysfs_state); 4381 spin_unlock(&mddev->lock); 4382 return err ?: len; 4383 } 4384 err = mddev_lock(mddev); 4385 if (err) 4386 return err; 4387 err = -EINVAL; 4388 switch(st) { 4389 case bad_word: 4390 break; 4391 case clear: 4392 /* stopping an active array */ 4393 err = do_md_stop(mddev, 0, NULL); 4394 break; 4395 case inactive: 4396 /* stopping an active array */ 4397 if (mddev->pers) 4398 err = do_md_stop(mddev, 2, NULL); 4399 else 4400 err = 0; /* already inactive */ 4401 break; 4402 case suspended: 4403 break; /* not supported yet */ 4404 case readonly: 4405 if (mddev->pers) 4406 err = md_set_readonly(mddev, NULL); 4407 else { 4408 mddev->ro = 1; 4409 set_disk_ro(mddev->gendisk, 1); 4410 err = do_md_run(mddev); 4411 } 4412 break; 4413 case read_auto: 4414 if (mddev->pers) { 4415 if (mddev->ro == 0) 4416 err = md_set_readonly(mddev, NULL); 4417 else if (mddev->ro == 1) 4418 err = restart_array(mddev); 4419 if (err == 0) { 4420 mddev->ro = 2; 4421 set_disk_ro(mddev->gendisk, 0); 4422 } 4423 } else { 4424 mddev->ro = 2; 4425 err = do_md_run(mddev); 4426 } 4427 break; 4428 case clean: 4429 if (mddev->pers) { 4430 err = restart_array(mddev); 4431 if (err) 4432 break; 4433 spin_lock(&mddev->lock); 4434 if (!set_in_sync(mddev)) 4435 err = -EBUSY; 4436 spin_unlock(&mddev->lock); 4437 } else 4438 err = -EINVAL; 4439 break; 4440 case active: 4441 if (mddev->pers) { 4442 err = restart_array(mddev); 4443 if (err) 4444 break; 4445 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 4446 wake_up(&mddev->sb_wait); 4447 err = 0; 4448 } else { 4449 mddev->ro = 0; 4450 set_disk_ro(mddev->gendisk, 0); 4451 err = do_md_run(mddev); 4452 } 4453 break; 4454 case write_pending: 4455 case active_idle: 4456 case broken: 4457 /* these cannot be set */ 4458 break; 4459 } 4460 4461 if (!err) { 4462 if (mddev->hold_active == UNTIL_IOCTL) 4463 mddev->hold_active = 0; 4464 sysfs_notify_dirent_safe(mddev->sysfs_state); 4465 } 4466 mddev_unlock(mddev); 4467 return err ?: len; 4468 } 4469 static struct md_sysfs_entry md_array_state = 4470 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 4471 4472 static ssize_t 4473 max_corrected_read_errors_show(struct mddev *mddev, char *page) { 4474 return sprintf(page, "%d\n", 4475 atomic_read(&mddev->max_corr_read_errors)); 4476 } 4477 4478 static ssize_t 4479 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) 4480 { 4481 unsigned int n; 4482 int rv; 4483 4484 rv = kstrtouint(buf, 10, &n); 4485 if (rv < 0) 4486 return rv; 4487 atomic_set(&mddev->max_corr_read_errors, n); 4488 return len; 4489 } 4490 4491 static struct md_sysfs_entry max_corr_read_errors = 4492 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 4493 max_corrected_read_errors_store); 4494 4495 static ssize_t 4496 null_show(struct mddev *mddev, char *page) 4497 { 4498 return -EINVAL; 4499 } 4500 4501 /* need to ensure rdev_delayed_delete() has completed */ 4502 static void flush_rdev_wq(struct mddev *mddev) 4503 { 4504 struct md_rdev *rdev; 4505 4506 rcu_read_lock(); 4507 rdev_for_each_rcu(rdev, mddev) 4508 if (work_pending(&rdev->del_work)) { 4509 flush_workqueue(md_rdev_misc_wq); 4510 break; 4511 } 4512 rcu_read_unlock(); 4513 } 4514 4515 static ssize_t 4516 new_dev_store(struct mddev *mddev, const char *buf, size_t len) 4517 { 4518 /* buf must be %d:%d\n? giving major and minor numbers */ 4519 /* The new device is added to the array. 4520 * If the array has a persistent superblock, we read the 4521 * superblock to initialise info and check validity. 4522 * Otherwise, only checking done is that in bind_rdev_to_array, 4523 * which mainly checks size. 4524 */ 4525 char *e; 4526 int major = simple_strtoul(buf, &e, 10); 4527 int minor; 4528 dev_t dev; 4529 struct md_rdev *rdev; 4530 int err; 4531 4532 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 4533 return -EINVAL; 4534 minor = simple_strtoul(e+1, &e, 10); 4535 if (*e && *e != '\n') 4536 return -EINVAL; 4537 dev = MKDEV(major, minor); 4538 if (major != MAJOR(dev) || 4539 minor != MINOR(dev)) 4540 return -EOVERFLOW; 4541 4542 flush_rdev_wq(mddev); 4543 err = mddev_lock(mddev); 4544 if (err) 4545 return err; 4546 if (mddev->persistent) { 4547 rdev = md_import_device(dev, mddev->major_version, 4548 mddev->minor_version); 4549 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 4550 struct md_rdev *rdev0 4551 = list_entry(mddev->disks.next, 4552 struct md_rdev, same_set); 4553 err = super_types[mddev->major_version] 4554 .load_super(rdev, rdev0, mddev->minor_version); 4555 if (err < 0) 4556 goto out; 4557 } 4558 } else if (mddev->external) 4559 rdev = md_import_device(dev, -2, -1); 4560 else 4561 rdev = md_import_device(dev, -1, -1); 4562 4563 if (IS_ERR(rdev)) { 4564 mddev_unlock(mddev); 4565 return PTR_ERR(rdev); 4566 } 4567 err = bind_rdev_to_array(rdev, mddev); 4568 out: 4569 if (err) 4570 export_rdev(rdev); 4571 mddev_unlock(mddev); 4572 if (!err) 4573 md_new_event(mddev); 4574 return err ? err : len; 4575 } 4576 4577 static struct md_sysfs_entry md_new_device = 4578 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 4579 4580 static ssize_t 4581 bitmap_store(struct mddev *mddev, const char *buf, size_t len) 4582 { 4583 char *end; 4584 unsigned long chunk, end_chunk; 4585 int err; 4586 4587 err = mddev_lock(mddev); 4588 if (err) 4589 return err; 4590 if (!mddev->bitmap) 4591 goto out; 4592 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 4593 while (*buf) { 4594 chunk = end_chunk = simple_strtoul(buf, &end, 0); 4595 if (buf == end) break; 4596 if (*end == '-') { /* range */ 4597 buf = end + 1; 4598 end_chunk = simple_strtoul(buf, &end, 0); 4599 if (buf == end) break; 4600 } 4601 if (*end && !isspace(*end)) break; 4602 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 4603 buf = skip_spaces(end); 4604 } 4605 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 4606 out: 4607 mddev_unlock(mddev); 4608 return len; 4609 } 4610 4611 static struct md_sysfs_entry md_bitmap = 4612 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 4613 4614 static ssize_t 4615 size_show(struct mddev *mddev, char *page) 4616 { 4617 return sprintf(page, "%llu\n", 4618 (unsigned long long)mddev->dev_sectors / 2); 4619 } 4620 4621 static int update_size(struct mddev *mddev, sector_t num_sectors); 4622 4623 static ssize_t 4624 size_store(struct mddev *mddev, const char *buf, size_t len) 4625 { 4626 /* If array is inactive, we can reduce the component size, but 4627 * not increase it (except from 0). 4628 * If array is active, we can try an on-line resize 4629 */ 4630 sector_t sectors; 4631 int err = strict_blocks_to_sectors(buf, §ors); 4632 4633 if (err < 0) 4634 return err; 4635 err = mddev_lock(mddev); 4636 if (err) 4637 return err; 4638 if (mddev->pers) { 4639 err = update_size(mddev, sectors); 4640 if (err == 0) 4641 md_update_sb(mddev, 1); 4642 } else { 4643 if (mddev->dev_sectors == 0 || 4644 mddev->dev_sectors > sectors) 4645 mddev->dev_sectors = sectors; 4646 else 4647 err = -ENOSPC; 4648 } 4649 mddev_unlock(mddev); 4650 return err ? err : len; 4651 } 4652 4653 static struct md_sysfs_entry md_size = 4654 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 4655 4656 /* Metadata version. 4657 * This is one of 4658 * 'none' for arrays with no metadata (good luck...) 4659 * 'external' for arrays with externally managed metadata, 4660 * or N.M for internally known formats 4661 */ 4662 static ssize_t 4663 metadata_show(struct mddev *mddev, char *page) 4664 { 4665 if (mddev->persistent) 4666 return sprintf(page, "%d.%d\n", 4667 mddev->major_version, mddev->minor_version); 4668 else if (mddev->external) 4669 return sprintf(page, "external:%s\n", mddev->metadata_type); 4670 else 4671 return sprintf(page, "none\n"); 4672 } 4673 4674 static ssize_t 4675 metadata_store(struct mddev *mddev, const char *buf, size_t len) 4676 { 4677 int major, minor; 4678 char *e; 4679 int err; 4680 /* Changing the details of 'external' metadata is 4681 * always permitted. Otherwise there must be 4682 * no devices attached to the array. 4683 */ 4684 4685 err = mddev_lock(mddev); 4686 if (err) 4687 return err; 4688 err = -EBUSY; 4689 if (mddev->external && strncmp(buf, "external:", 9) == 0) 4690 ; 4691 else if (!list_empty(&mddev->disks)) 4692 goto out_unlock; 4693 4694 err = 0; 4695 if (cmd_match(buf, "none")) { 4696 mddev->persistent = 0; 4697 mddev->external = 0; 4698 mddev->major_version = 0; 4699 mddev->minor_version = 90; 4700 goto out_unlock; 4701 } 4702 if (strncmp(buf, "external:", 9) == 0) { 4703 size_t namelen = len-9; 4704 if (namelen >= sizeof(mddev->metadata_type)) 4705 namelen = sizeof(mddev->metadata_type)-1; 4706 strncpy(mddev->metadata_type, buf+9, namelen); 4707 mddev->metadata_type[namelen] = 0; 4708 if (namelen && mddev->metadata_type[namelen-1] == '\n') 4709 mddev->metadata_type[--namelen] = 0; 4710 mddev->persistent = 0; 4711 mddev->external = 1; 4712 mddev->major_version = 0; 4713 mddev->minor_version = 90; 4714 goto out_unlock; 4715 } 4716 major = simple_strtoul(buf, &e, 10); 4717 err = -EINVAL; 4718 if (e==buf || *e != '.') 4719 goto out_unlock; 4720 buf = e+1; 4721 minor = simple_strtoul(buf, &e, 10); 4722 if (e==buf || (*e && *e != '\n') ) 4723 goto out_unlock; 4724 err = -ENOENT; 4725 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 4726 goto out_unlock; 4727 mddev->major_version = major; 4728 mddev->minor_version = minor; 4729 mddev->persistent = 1; 4730 mddev->external = 0; 4731 err = 0; 4732 out_unlock: 4733 mddev_unlock(mddev); 4734 return err ?: len; 4735 } 4736 4737 static struct md_sysfs_entry md_metadata = 4738 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4739 4740 static ssize_t 4741 action_show(struct mddev *mddev, char *page) 4742 { 4743 char *type = "idle"; 4744 unsigned long recovery = mddev->recovery; 4745 if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 4746 type = "frozen"; 4747 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) || 4748 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) { 4749 if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 4750 type = "reshape"; 4751 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 4752 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) 4753 type = "resync"; 4754 else if (test_bit(MD_RECOVERY_CHECK, &recovery)) 4755 type = "check"; 4756 else 4757 type = "repair"; 4758 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 4759 type = "recover"; 4760 else if (mddev->reshape_position != MaxSector) 4761 type = "reshape"; 4762 } 4763 return sprintf(page, "%s\n", type); 4764 } 4765 4766 static ssize_t 4767 action_store(struct mddev *mddev, const char *page, size_t len) 4768 { 4769 if (!mddev->pers || !mddev->pers->sync_request) 4770 return -EINVAL; 4771 4772 4773 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4774 if (cmd_match(page, "frozen")) 4775 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4776 else 4777 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4778 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 4779 mddev_lock(mddev) == 0) { 4780 if (work_pending(&mddev->del_work)) 4781 flush_workqueue(md_misc_wq); 4782 if (mddev->sync_thread) { 4783 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4784 md_reap_sync_thread(mddev); 4785 } 4786 mddev_unlock(mddev); 4787 } 4788 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4789 return -EBUSY; 4790 else if (cmd_match(page, "resync")) 4791 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4792 else if (cmd_match(page, "recover")) { 4793 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4794 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4795 } else if (cmd_match(page, "reshape")) { 4796 int err; 4797 if (mddev->pers->start_reshape == NULL) 4798 return -EINVAL; 4799 err = mddev_lock(mddev); 4800 if (!err) { 4801 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4802 err = -EBUSY; 4803 else { 4804 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4805 err = mddev->pers->start_reshape(mddev); 4806 } 4807 mddev_unlock(mddev); 4808 } 4809 if (err) 4810 return err; 4811 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4812 } else { 4813 if (cmd_match(page, "check")) 4814 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4815 else if (!cmd_match(page, "repair")) 4816 return -EINVAL; 4817 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4818 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4819 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4820 } 4821 if (mddev->ro == 2) { 4822 /* A write to sync_action is enough to justify 4823 * canceling read-auto mode 4824 */ 4825 mddev->ro = 0; 4826 md_wakeup_thread(mddev->sync_thread); 4827 } 4828 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4829 md_wakeup_thread(mddev->thread); 4830 sysfs_notify_dirent_safe(mddev->sysfs_action); 4831 return len; 4832 } 4833 4834 static struct md_sysfs_entry md_scan_mode = 4835 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 4836 4837 static ssize_t 4838 last_sync_action_show(struct mddev *mddev, char *page) 4839 { 4840 return sprintf(page, "%s\n", mddev->last_sync_action); 4841 } 4842 4843 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action); 4844 4845 static ssize_t 4846 mismatch_cnt_show(struct mddev *mddev, char *page) 4847 { 4848 return sprintf(page, "%llu\n", 4849 (unsigned long long) 4850 atomic64_read(&mddev->resync_mismatches)); 4851 } 4852 4853 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 4854 4855 static ssize_t 4856 sync_min_show(struct mddev *mddev, char *page) 4857 { 4858 return sprintf(page, "%d (%s)\n", speed_min(mddev), 4859 mddev->sync_speed_min ? "local": "system"); 4860 } 4861 4862 static ssize_t 4863 sync_min_store(struct mddev *mddev, const char *buf, size_t len) 4864 { 4865 unsigned int min; 4866 int rv; 4867 4868 if (strncmp(buf, "system", 6)==0) { 4869 min = 0; 4870 } else { 4871 rv = kstrtouint(buf, 10, &min); 4872 if (rv < 0) 4873 return rv; 4874 if (min == 0) 4875 return -EINVAL; 4876 } 4877 mddev->sync_speed_min = min; 4878 return len; 4879 } 4880 4881 static struct md_sysfs_entry md_sync_min = 4882 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 4883 4884 static ssize_t 4885 sync_max_show(struct mddev *mddev, char *page) 4886 { 4887 return sprintf(page, "%d (%s)\n", speed_max(mddev), 4888 mddev->sync_speed_max ? "local": "system"); 4889 } 4890 4891 static ssize_t 4892 sync_max_store(struct mddev *mddev, const char *buf, size_t len) 4893 { 4894 unsigned int max; 4895 int rv; 4896 4897 if (strncmp(buf, "system", 6)==0) { 4898 max = 0; 4899 } else { 4900 rv = kstrtouint(buf, 10, &max); 4901 if (rv < 0) 4902 return rv; 4903 if (max == 0) 4904 return -EINVAL; 4905 } 4906 mddev->sync_speed_max = max; 4907 return len; 4908 } 4909 4910 static struct md_sysfs_entry md_sync_max = 4911 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 4912 4913 static ssize_t 4914 degraded_show(struct mddev *mddev, char *page) 4915 { 4916 return sprintf(page, "%d\n", mddev->degraded); 4917 } 4918 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 4919 4920 static ssize_t 4921 sync_force_parallel_show(struct mddev *mddev, char *page) 4922 { 4923 return sprintf(page, "%d\n", mddev->parallel_resync); 4924 } 4925 4926 static ssize_t 4927 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) 4928 { 4929 long n; 4930 4931 if (kstrtol(buf, 10, &n)) 4932 return -EINVAL; 4933 4934 if (n != 0 && n != 1) 4935 return -EINVAL; 4936 4937 mddev->parallel_resync = n; 4938 4939 if (mddev->sync_thread) 4940 wake_up(&resync_wait); 4941 4942 return len; 4943 } 4944 4945 /* force parallel resync, even with shared block devices */ 4946 static struct md_sysfs_entry md_sync_force_parallel = 4947 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 4948 sync_force_parallel_show, sync_force_parallel_store); 4949 4950 static ssize_t 4951 sync_speed_show(struct mddev *mddev, char *page) 4952 { 4953 unsigned long resync, dt, db; 4954 if (mddev->curr_resync == 0) 4955 return sprintf(page, "none\n"); 4956 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 4957 dt = (jiffies - mddev->resync_mark) / HZ; 4958 if (!dt) dt++; 4959 db = resync - mddev->resync_mark_cnt; 4960 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 4961 } 4962 4963 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 4964 4965 static ssize_t 4966 sync_completed_show(struct mddev *mddev, char *page) 4967 { 4968 unsigned long long max_sectors, resync; 4969 4970 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4971 return sprintf(page, "none\n"); 4972 4973 if (mddev->curr_resync == 1 || 4974 mddev->curr_resync == 2) 4975 return sprintf(page, "delayed\n"); 4976 4977 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 4978 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4979 max_sectors = mddev->resync_max_sectors; 4980 else 4981 max_sectors = mddev->dev_sectors; 4982 4983 resync = mddev->curr_resync_completed; 4984 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 4985 } 4986 4987 static struct md_sysfs_entry md_sync_completed = 4988 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL); 4989 4990 static ssize_t 4991 min_sync_show(struct mddev *mddev, char *page) 4992 { 4993 return sprintf(page, "%llu\n", 4994 (unsigned long long)mddev->resync_min); 4995 } 4996 static ssize_t 4997 min_sync_store(struct mddev *mddev, const char *buf, size_t len) 4998 { 4999 unsigned long long min; 5000 int err; 5001 5002 if (kstrtoull(buf, 10, &min)) 5003 return -EINVAL; 5004 5005 spin_lock(&mddev->lock); 5006 err = -EINVAL; 5007 if (min > mddev->resync_max) 5008 goto out_unlock; 5009 5010 err = -EBUSY; 5011 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5012 goto out_unlock; 5013 5014 /* Round down to multiple of 4K for safety */ 5015 mddev->resync_min = round_down(min, 8); 5016 err = 0; 5017 5018 out_unlock: 5019 spin_unlock(&mddev->lock); 5020 return err ?: len; 5021 } 5022 5023 static struct md_sysfs_entry md_min_sync = 5024 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 5025 5026 static ssize_t 5027 max_sync_show(struct mddev *mddev, char *page) 5028 { 5029 if (mddev->resync_max == MaxSector) 5030 return sprintf(page, "max\n"); 5031 else 5032 return sprintf(page, "%llu\n", 5033 (unsigned long long)mddev->resync_max); 5034 } 5035 static ssize_t 5036 max_sync_store(struct mddev *mddev, const char *buf, size_t len) 5037 { 5038 int err; 5039 spin_lock(&mddev->lock); 5040 if (strncmp(buf, "max", 3) == 0) 5041 mddev->resync_max = MaxSector; 5042 else { 5043 unsigned long long max; 5044 int chunk; 5045 5046 err = -EINVAL; 5047 if (kstrtoull(buf, 10, &max)) 5048 goto out_unlock; 5049 if (max < mddev->resync_min) 5050 goto out_unlock; 5051 5052 err = -EBUSY; 5053 if (max < mddev->resync_max && 5054 mddev->ro == 0 && 5055 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5056 goto out_unlock; 5057 5058 /* Must be a multiple of chunk_size */ 5059 chunk = mddev->chunk_sectors; 5060 if (chunk) { 5061 sector_t temp = max; 5062 5063 err = -EINVAL; 5064 if (sector_div(temp, chunk)) 5065 goto out_unlock; 5066 } 5067 mddev->resync_max = max; 5068 } 5069 wake_up(&mddev->recovery_wait); 5070 err = 0; 5071 out_unlock: 5072 spin_unlock(&mddev->lock); 5073 return err ?: len; 5074 } 5075 5076 static struct md_sysfs_entry md_max_sync = 5077 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 5078 5079 static ssize_t 5080 suspend_lo_show(struct mddev *mddev, char *page) 5081 { 5082 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 5083 } 5084 5085 static ssize_t 5086 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) 5087 { 5088 unsigned long long new; 5089 int err; 5090 5091 err = kstrtoull(buf, 10, &new); 5092 if (err < 0) 5093 return err; 5094 if (new != (sector_t)new) 5095 return -EINVAL; 5096 5097 err = mddev_lock(mddev); 5098 if (err) 5099 return err; 5100 err = -EINVAL; 5101 if (mddev->pers == NULL || 5102 mddev->pers->quiesce == NULL) 5103 goto unlock; 5104 mddev_suspend(mddev); 5105 mddev->suspend_lo = new; 5106 mddev_resume(mddev); 5107 5108 err = 0; 5109 unlock: 5110 mddev_unlock(mddev); 5111 return err ?: len; 5112 } 5113 static struct md_sysfs_entry md_suspend_lo = 5114 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 5115 5116 static ssize_t 5117 suspend_hi_show(struct mddev *mddev, char *page) 5118 { 5119 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 5120 } 5121 5122 static ssize_t 5123 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) 5124 { 5125 unsigned long long new; 5126 int err; 5127 5128 err = kstrtoull(buf, 10, &new); 5129 if (err < 0) 5130 return err; 5131 if (new != (sector_t)new) 5132 return -EINVAL; 5133 5134 err = mddev_lock(mddev); 5135 if (err) 5136 return err; 5137 err = -EINVAL; 5138 if (mddev->pers == NULL) 5139 goto unlock; 5140 5141 mddev_suspend(mddev); 5142 mddev->suspend_hi = new; 5143 mddev_resume(mddev); 5144 5145 err = 0; 5146 unlock: 5147 mddev_unlock(mddev); 5148 return err ?: len; 5149 } 5150 static struct md_sysfs_entry md_suspend_hi = 5151 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 5152 5153 static ssize_t 5154 reshape_position_show(struct mddev *mddev, char *page) 5155 { 5156 if (mddev->reshape_position != MaxSector) 5157 return sprintf(page, "%llu\n", 5158 (unsigned long long)mddev->reshape_position); 5159 strcpy(page, "none\n"); 5160 return 5; 5161 } 5162 5163 static ssize_t 5164 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) 5165 { 5166 struct md_rdev *rdev; 5167 unsigned long long new; 5168 int err; 5169 5170 err = kstrtoull(buf, 10, &new); 5171 if (err < 0) 5172 return err; 5173 if (new != (sector_t)new) 5174 return -EINVAL; 5175 err = mddev_lock(mddev); 5176 if (err) 5177 return err; 5178 err = -EBUSY; 5179 if (mddev->pers) 5180 goto unlock; 5181 mddev->reshape_position = new; 5182 mddev->delta_disks = 0; 5183 mddev->reshape_backwards = 0; 5184 mddev->new_level = mddev->level; 5185 mddev->new_layout = mddev->layout; 5186 mddev->new_chunk_sectors = mddev->chunk_sectors; 5187 rdev_for_each(rdev, mddev) 5188 rdev->new_data_offset = rdev->data_offset; 5189 err = 0; 5190 unlock: 5191 mddev_unlock(mddev); 5192 return err ?: len; 5193 } 5194 5195 static struct md_sysfs_entry md_reshape_position = 5196 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 5197 reshape_position_store); 5198 5199 static ssize_t 5200 reshape_direction_show(struct mddev *mddev, char *page) 5201 { 5202 return sprintf(page, "%s\n", 5203 mddev->reshape_backwards ? "backwards" : "forwards"); 5204 } 5205 5206 static ssize_t 5207 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) 5208 { 5209 int backwards = 0; 5210 int err; 5211 5212 if (cmd_match(buf, "forwards")) 5213 backwards = 0; 5214 else if (cmd_match(buf, "backwards")) 5215 backwards = 1; 5216 else 5217 return -EINVAL; 5218 if (mddev->reshape_backwards == backwards) 5219 return len; 5220 5221 err = mddev_lock(mddev); 5222 if (err) 5223 return err; 5224 /* check if we are allowed to change */ 5225 if (mddev->delta_disks) 5226 err = -EBUSY; 5227 else if (mddev->persistent && 5228 mddev->major_version == 0) 5229 err = -EINVAL; 5230 else 5231 mddev->reshape_backwards = backwards; 5232 mddev_unlock(mddev); 5233 return err ?: len; 5234 } 5235 5236 static struct md_sysfs_entry md_reshape_direction = 5237 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show, 5238 reshape_direction_store); 5239 5240 static ssize_t 5241 array_size_show(struct mddev *mddev, char *page) 5242 { 5243 if (mddev->external_size) 5244 return sprintf(page, "%llu\n", 5245 (unsigned long long)mddev->array_sectors/2); 5246 else 5247 return sprintf(page, "default\n"); 5248 } 5249 5250 static ssize_t 5251 array_size_store(struct mddev *mddev, const char *buf, size_t len) 5252 { 5253 sector_t sectors; 5254 int err; 5255 5256 err = mddev_lock(mddev); 5257 if (err) 5258 return err; 5259 5260 /* cluster raid doesn't support change array_sectors */ 5261 if (mddev_is_clustered(mddev)) { 5262 mddev_unlock(mddev); 5263 return -EINVAL; 5264 } 5265 5266 if (strncmp(buf, "default", 7) == 0) { 5267 if (mddev->pers) 5268 sectors = mddev->pers->size(mddev, 0, 0); 5269 else 5270 sectors = mddev->array_sectors; 5271 5272 mddev->external_size = 0; 5273 } else { 5274 if (strict_blocks_to_sectors(buf, §ors) < 0) 5275 err = -EINVAL; 5276 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 5277 err = -E2BIG; 5278 else 5279 mddev->external_size = 1; 5280 } 5281 5282 if (!err) { 5283 mddev->array_sectors = sectors; 5284 if (mddev->pers) { 5285 set_capacity(mddev->gendisk, mddev->array_sectors); 5286 revalidate_disk(mddev->gendisk); 5287 } 5288 } 5289 mddev_unlock(mddev); 5290 return err ?: len; 5291 } 5292 5293 static struct md_sysfs_entry md_array_size = 5294 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 5295 array_size_store); 5296 5297 static ssize_t 5298 consistency_policy_show(struct mddev *mddev, char *page) 5299 { 5300 int ret; 5301 5302 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 5303 ret = sprintf(page, "journal\n"); 5304 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) { 5305 ret = sprintf(page, "ppl\n"); 5306 } else if (mddev->bitmap) { 5307 ret = sprintf(page, "bitmap\n"); 5308 } else if (mddev->pers) { 5309 if (mddev->pers->sync_request) 5310 ret = sprintf(page, "resync\n"); 5311 else 5312 ret = sprintf(page, "none\n"); 5313 } else { 5314 ret = sprintf(page, "unknown\n"); 5315 } 5316 5317 return ret; 5318 } 5319 5320 static ssize_t 5321 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len) 5322 { 5323 int err = 0; 5324 5325 if (mddev->pers) { 5326 if (mddev->pers->change_consistency_policy) 5327 err = mddev->pers->change_consistency_policy(mddev, buf); 5328 else 5329 err = -EBUSY; 5330 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) { 5331 set_bit(MD_HAS_PPL, &mddev->flags); 5332 } else { 5333 err = -EINVAL; 5334 } 5335 5336 return err ? err : len; 5337 } 5338 5339 static struct md_sysfs_entry md_consistency_policy = 5340 __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show, 5341 consistency_policy_store); 5342 5343 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page) 5344 { 5345 return sprintf(page, "%d\n", mddev->fail_last_dev); 5346 } 5347 5348 /* 5349 * Setting fail_last_dev to true to allow last device to be forcibly removed 5350 * from RAID1/RAID10. 5351 */ 5352 static ssize_t 5353 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len) 5354 { 5355 int ret; 5356 bool value; 5357 5358 ret = kstrtobool(buf, &value); 5359 if (ret) 5360 return ret; 5361 5362 if (value != mddev->fail_last_dev) 5363 mddev->fail_last_dev = value; 5364 5365 return len; 5366 } 5367 static struct md_sysfs_entry md_fail_last_dev = 5368 __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show, 5369 fail_last_dev_store); 5370 5371 static ssize_t serialize_policy_show(struct mddev *mddev, char *page) 5372 { 5373 if (mddev->pers == NULL || (mddev->pers->level != 1)) 5374 return sprintf(page, "n/a\n"); 5375 else 5376 return sprintf(page, "%d\n", mddev->serialize_policy); 5377 } 5378 5379 /* 5380 * Setting serialize_policy to true to enforce write IO is not reordered 5381 * for raid1. 5382 */ 5383 static ssize_t 5384 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len) 5385 { 5386 int err; 5387 bool value; 5388 5389 err = kstrtobool(buf, &value); 5390 if (err) 5391 return err; 5392 5393 if (value == mddev->serialize_policy) 5394 return len; 5395 5396 err = mddev_lock(mddev); 5397 if (err) 5398 return err; 5399 if (mddev->pers == NULL || (mddev->pers->level != 1)) { 5400 pr_err("md: serialize_policy is only effective for raid1\n"); 5401 err = -EINVAL; 5402 goto unlock; 5403 } 5404 5405 mddev_suspend(mddev); 5406 if (value) 5407 mddev_create_serial_pool(mddev, NULL, true); 5408 else 5409 mddev_destroy_serial_pool(mddev, NULL, true); 5410 mddev->serialize_policy = value; 5411 mddev_resume(mddev); 5412 unlock: 5413 mddev_unlock(mddev); 5414 return err ?: len; 5415 } 5416 5417 static struct md_sysfs_entry md_serialize_policy = 5418 __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show, 5419 serialize_policy_store); 5420 5421 5422 static struct attribute *md_default_attrs[] = { 5423 &md_level.attr, 5424 &md_layout.attr, 5425 &md_raid_disks.attr, 5426 &md_chunk_size.attr, 5427 &md_size.attr, 5428 &md_resync_start.attr, 5429 &md_metadata.attr, 5430 &md_new_device.attr, 5431 &md_safe_delay.attr, 5432 &md_array_state.attr, 5433 &md_reshape_position.attr, 5434 &md_reshape_direction.attr, 5435 &md_array_size.attr, 5436 &max_corr_read_errors.attr, 5437 &md_consistency_policy.attr, 5438 &md_fail_last_dev.attr, 5439 &md_serialize_policy.attr, 5440 NULL, 5441 }; 5442 5443 static struct attribute *md_redundancy_attrs[] = { 5444 &md_scan_mode.attr, 5445 &md_last_scan_mode.attr, 5446 &md_mismatches.attr, 5447 &md_sync_min.attr, 5448 &md_sync_max.attr, 5449 &md_sync_speed.attr, 5450 &md_sync_force_parallel.attr, 5451 &md_sync_completed.attr, 5452 &md_min_sync.attr, 5453 &md_max_sync.attr, 5454 &md_suspend_lo.attr, 5455 &md_suspend_hi.attr, 5456 &md_bitmap.attr, 5457 &md_degraded.attr, 5458 NULL, 5459 }; 5460 static struct attribute_group md_redundancy_group = { 5461 .name = NULL, 5462 .attrs = md_redundancy_attrs, 5463 }; 5464 5465 static ssize_t 5466 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 5467 { 5468 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 5469 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 5470 ssize_t rv; 5471 5472 if (!entry->show) 5473 return -EIO; 5474 spin_lock(&all_mddevs_lock); 5475 if (list_empty(&mddev->all_mddevs)) { 5476 spin_unlock(&all_mddevs_lock); 5477 return -EBUSY; 5478 } 5479 mddev_get(mddev); 5480 spin_unlock(&all_mddevs_lock); 5481 5482 rv = entry->show(mddev, page); 5483 mddev_put(mddev); 5484 return rv; 5485 } 5486 5487 static ssize_t 5488 md_attr_store(struct kobject *kobj, struct attribute *attr, 5489 const char *page, size_t length) 5490 { 5491 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 5492 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 5493 ssize_t rv; 5494 5495 if (!entry->store) 5496 return -EIO; 5497 if (!capable(CAP_SYS_ADMIN)) 5498 return -EACCES; 5499 spin_lock(&all_mddevs_lock); 5500 if (list_empty(&mddev->all_mddevs)) { 5501 spin_unlock(&all_mddevs_lock); 5502 return -EBUSY; 5503 } 5504 mddev_get(mddev); 5505 spin_unlock(&all_mddevs_lock); 5506 rv = entry->store(mddev, page, length); 5507 mddev_put(mddev); 5508 return rv; 5509 } 5510 5511 static void md_free(struct kobject *ko) 5512 { 5513 struct mddev *mddev = container_of(ko, struct mddev, kobj); 5514 5515 if (mddev->sysfs_state) 5516 sysfs_put(mddev->sysfs_state); 5517 5518 if (mddev->gendisk) 5519 del_gendisk(mddev->gendisk); 5520 if (mddev->queue) 5521 blk_cleanup_queue(mddev->queue); 5522 if (mddev->gendisk) 5523 put_disk(mddev->gendisk); 5524 percpu_ref_exit(&mddev->writes_pending); 5525 5526 bioset_exit(&mddev->bio_set); 5527 bioset_exit(&mddev->sync_set); 5528 kfree(mddev); 5529 } 5530 5531 static const struct sysfs_ops md_sysfs_ops = { 5532 .show = md_attr_show, 5533 .store = md_attr_store, 5534 }; 5535 static struct kobj_type md_ktype = { 5536 .release = md_free, 5537 .sysfs_ops = &md_sysfs_ops, 5538 .default_attrs = md_default_attrs, 5539 }; 5540 5541 int mdp_major = 0; 5542 5543 static void mddev_delayed_delete(struct work_struct *ws) 5544 { 5545 struct mddev *mddev = container_of(ws, struct mddev, del_work); 5546 5547 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); 5548 kobject_del(&mddev->kobj); 5549 kobject_put(&mddev->kobj); 5550 } 5551 5552 static void no_op(struct percpu_ref *r) {} 5553 5554 int mddev_init_writes_pending(struct mddev *mddev) 5555 { 5556 if (mddev->writes_pending.percpu_count_ptr) 5557 return 0; 5558 if (percpu_ref_init(&mddev->writes_pending, no_op, 5559 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0) 5560 return -ENOMEM; 5561 /* We want to start with the refcount at zero */ 5562 percpu_ref_put(&mddev->writes_pending); 5563 return 0; 5564 } 5565 EXPORT_SYMBOL_GPL(mddev_init_writes_pending); 5566 5567 static int md_alloc(dev_t dev, char *name) 5568 { 5569 /* 5570 * If dev is zero, name is the name of a device to allocate with 5571 * an arbitrary minor number. It will be "md_???" 5572 * If dev is non-zero it must be a device number with a MAJOR of 5573 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then 5574 * the device is being created by opening a node in /dev. 5575 * If "name" is not NULL, the device is being created by 5576 * writing to /sys/module/md_mod/parameters/new_array. 5577 */ 5578 static DEFINE_MUTEX(disks_mutex); 5579 struct mddev *mddev = mddev_find(dev); 5580 struct gendisk *disk; 5581 int partitioned; 5582 int shift; 5583 int unit; 5584 int error; 5585 5586 if (!mddev) 5587 return -ENODEV; 5588 5589 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 5590 shift = partitioned ? MdpMinorShift : 0; 5591 unit = MINOR(mddev->unit) >> shift; 5592 5593 /* wait for any previous instance of this device to be 5594 * completely removed (mddev_delayed_delete). 5595 */ 5596 flush_workqueue(md_misc_wq); 5597 5598 mutex_lock(&disks_mutex); 5599 error = -EEXIST; 5600 if (mddev->gendisk) 5601 goto abort; 5602 5603 if (name && !dev) { 5604 /* Need to ensure that 'name' is not a duplicate. 5605 */ 5606 struct mddev *mddev2; 5607 spin_lock(&all_mddevs_lock); 5608 5609 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 5610 if (mddev2->gendisk && 5611 strcmp(mddev2->gendisk->disk_name, name) == 0) { 5612 spin_unlock(&all_mddevs_lock); 5613 goto abort; 5614 } 5615 spin_unlock(&all_mddevs_lock); 5616 } 5617 if (name && dev) 5618 /* 5619 * Creating /dev/mdNNN via "newarray", so adjust hold_active. 5620 */ 5621 mddev->hold_active = UNTIL_STOP; 5622 5623 error = -ENOMEM; 5624 mddev->queue = blk_alloc_queue(NUMA_NO_NODE); 5625 if (!mddev->queue) 5626 goto abort; 5627 5628 blk_set_stacking_limits(&mddev->queue->limits); 5629 5630 disk = alloc_disk(1 << shift); 5631 if (!disk) { 5632 blk_cleanup_queue(mddev->queue); 5633 mddev->queue = NULL; 5634 goto abort; 5635 } 5636 disk->major = MAJOR(mddev->unit); 5637 disk->first_minor = unit << shift; 5638 if (name) 5639 strcpy(disk->disk_name, name); 5640 else if (partitioned) 5641 sprintf(disk->disk_name, "md_d%d", unit); 5642 else 5643 sprintf(disk->disk_name, "md%d", unit); 5644 disk->fops = &md_fops; 5645 disk->private_data = mddev; 5646 disk->queue = mddev->queue; 5647 blk_queue_write_cache(mddev->queue, true, true); 5648 /* Allow extended partitions. This makes the 5649 * 'mdp' device redundant, but we can't really 5650 * remove it now. 5651 */ 5652 disk->flags |= GENHD_FL_EXT_DEVT; 5653 disk->events |= DISK_EVENT_MEDIA_CHANGE; 5654 mddev->gendisk = disk; 5655 /* As soon as we call add_disk(), another thread could get 5656 * through to md_open, so make sure it doesn't get too far 5657 */ 5658 mutex_lock(&mddev->open_mutex); 5659 add_disk(disk); 5660 5661 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); 5662 if (error) { 5663 /* This isn't possible, but as kobject_init_and_add is marked 5664 * __must_check, we must do something with the result 5665 */ 5666 pr_debug("md: cannot register %s/md - name in use\n", 5667 disk->disk_name); 5668 error = 0; 5669 } 5670 if (mddev->kobj.sd && 5671 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) 5672 pr_debug("pointless warning\n"); 5673 mutex_unlock(&mddev->open_mutex); 5674 abort: 5675 mutex_unlock(&disks_mutex); 5676 if (!error && mddev->kobj.sd) { 5677 kobject_uevent(&mddev->kobj, KOBJ_ADD); 5678 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 5679 } 5680 mddev_put(mddev); 5681 return error; 5682 } 5683 5684 static struct kobject *md_probe(dev_t dev, int *part, void *data) 5685 { 5686 if (create_on_open) 5687 md_alloc(dev, NULL); 5688 return NULL; 5689 } 5690 5691 static int add_named_array(const char *val, const struct kernel_param *kp) 5692 { 5693 /* 5694 * val must be "md_*" or "mdNNN". 5695 * For "md_*" we allocate an array with a large free minor number, and 5696 * set the name to val. val must not already be an active name. 5697 * For "mdNNN" we allocate an array with the minor number NNN 5698 * which must not already be in use. 5699 */ 5700 int len = strlen(val); 5701 char buf[DISK_NAME_LEN]; 5702 unsigned long devnum; 5703 5704 while (len && val[len-1] == '\n') 5705 len--; 5706 if (len >= DISK_NAME_LEN) 5707 return -E2BIG; 5708 strlcpy(buf, val, len+1); 5709 if (strncmp(buf, "md_", 3) == 0) 5710 return md_alloc(0, buf); 5711 if (strncmp(buf, "md", 2) == 0 && 5712 isdigit(buf[2]) && 5713 kstrtoul(buf+2, 10, &devnum) == 0 && 5714 devnum <= MINORMASK) 5715 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL); 5716 5717 return -EINVAL; 5718 } 5719 5720 static void md_safemode_timeout(struct timer_list *t) 5721 { 5722 struct mddev *mddev = from_timer(mddev, t, safemode_timer); 5723 5724 mddev->safemode = 1; 5725 if (mddev->external) 5726 sysfs_notify_dirent_safe(mddev->sysfs_state); 5727 5728 md_wakeup_thread(mddev->thread); 5729 } 5730 5731 static int start_dirty_degraded; 5732 5733 int md_run(struct mddev *mddev) 5734 { 5735 int err; 5736 struct md_rdev *rdev; 5737 struct md_personality *pers; 5738 5739 if (list_empty(&mddev->disks)) 5740 /* cannot run an array with no devices.. */ 5741 return -EINVAL; 5742 5743 if (mddev->pers) 5744 return -EBUSY; 5745 /* Cannot run until previous stop completes properly */ 5746 if (mddev->sysfs_active) 5747 return -EBUSY; 5748 5749 /* 5750 * Analyze all RAID superblock(s) 5751 */ 5752 if (!mddev->raid_disks) { 5753 if (!mddev->persistent) 5754 return -EINVAL; 5755 err = analyze_sbs(mddev); 5756 if (err) 5757 return -EINVAL; 5758 } 5759 5760 if (mddev->level != LEVEL_NONE) 5761 request_module("md-level-%d", mddev->level); 5762 else if (mddev->clevel[0]) 5763 request_module("md-%s", mddev->clevel); 5764 5765 /* 5766 * Drop all container device buffers, from now on 5767 * the only valid external interface is through the md 5768 * device. 5769 */ 5770 mddev->has_superblocks = false; 5771 rdev_for_each(rdev, mddev) { 5772 if (test_bit(Faulty, &rdev->flags)) 5773 continue; 5774 sync_blockdev(rdev->bdev); 5775 invalidate_bdev(rdev->bdev); 5776 if (mddev->ro != 1 && 5777 (bdev_read_only(rdev->bdev) || 5778 bdev_read_only(rdev->meta_bdev))) { 5779 mddev->ro = 1; 5780 if (mddev->gendisk) 5781 set_disk_ro(mddev->gendisk, 1); 5782 } 5783 5784 if (rdev->sb_page) 5785 mddev->has_superblocks = true; 5786 5787 /* perform some consistency tests on the device. 5788 * We don't want the data to overlap the metadata, 5789 * Internal Bitmap issues have been handled elsewhere. 5790 */ 5791 if (rdev->meta_bdev) { 5792 /* Nothing to check */; 5793 } else if (rdev->data_offset < rdev->sb_start) { 5794 if (mddev->dev_sectors && 5795 rdev->data_offset + mddev->dev_sectors 5796 > rdev->sb_start) { 5797 pr_warn("md: %s: data overlaps metadata\n", 5798 mdname(mddev)); 5799 return -EINVAL; 5800 } 5801 } else { 5802 if (rdev->sb_start + rdev->sb_size/512 5803 > rdev->data_offset) { 5804 pr_warn("md: %s: metadata overlaps data\n", 5805 mdname(mddev)); 5806 return -EINVAL; 5807 } 5808 } 5809 sysfs_notify_dirent_safe(rdev->sysfs_state); 5810 } 5811 5812 if (!bioset_initialized(&mddev->bio_set)) { 5813 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 5814 if (err) 5815 return err; 5816 } 5817 if (!bioset_initialized(&mddev->sync_set)) { 5818 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 5819 if (err) 5820 return err; 5821 } 5822 5823 spin_lock(&pers_lock); 5824 pers = find_pers(mddev->level, mddev->clevel); 5825 if (!pers || !try_module_get(pers->owner)) { 5826 spin_unlock(&pers_lock); 5827 if (mddev->level != LEVEL_NONE) 5828 pr_warn("md: personality for level %d is not loaded!\n", 5829 mddev->level); 5830 else 5831 pr_warn("md: personality for level %s is not loaded!\n", 5832 mddev->clevel); 5833 err = -EINVAL; 5834 goto abort; 5835 } 5836 spin_unlock(&pers_lock); 5837 if (mddev->level != pers->level) { 5838 mddev->level = pers->level; 5839 mddev->new_level = pers->level; 5840 } 5841 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 5842 5843 if (mddev->reshape_position != MaxSector && 5844 pers->start_reshape == NULL) { 5845 /* This personality cannot handle reshaping... */ 5846 module_put(pers->owner); 5847 err = -EINVAL; 5848 goto abort; 5849 } 5850 5851 if (pers->sync_request) { 5852 /* Warn if this is a potentially silly 5853 * configuration. 5854 */ 5855 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5856 struct md_rdev *rdev2; 5857 int warned = 0; 5858 5859 rdev_for_each(rdev, mddev) 5860 rdev_for_each(rdev2, mddev) { 5861 if (rdev < rdev2 && 5862 rdev->bdev->bd_contains == 5863 rdev2->bdev->bd_contains) { 5864 pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n", 5865 mdname(mddev), 5866 bdevname(rdev->bdev,b), 5867 bdevname(rdev2->bdev,b2)); 5868 warned = 1; 5869 } 5870 } 5871 5872 if (warned) 5873 pr_warn("True protection against single-disk failure might be compromised.\n"); 5874 } 5875 5876 mddev->recovery = 0; 5877 /* may be over-ridden by personality */ 5878 mddev->resync_max_sectors = mddev->dev_sectors; 5879 5880 mddev->ok_start_degraded = start_dirty_degraded; 5881 5882 if (start_readonly && mddev->ro == 0) 5883 mddev->ro = 2; /* read-only, but switch on first write */ 5884 5885 err = pers->run(mddev); 5886 if (err) 5887 pr_warn("md: pers->run() failed ...\n"); 5888 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { 5889 WARN_ONCE(!mddev->external_size, 5890 "%s: default size too small, but 'external_size' not in effect?\n", 5891 __func__); 5892 pr_warn("md: invalid array_size %llu > default size %llu\n", 5893 (unsigned long long)mddev->array_sectors / 2, 5894 (unsigned long long)pers->size(mddev, 0, 0) / 2); 5895 err = -EINVAL; 5896 } 5897 if (err == 0 && pers->sync_request && 5898 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { 5899 struct bitmap *bitmap; 5900 5901 bitmap = md_bitmap_create(mddev, -1); 5902 if (IS_ERR(bitmap)) { 5903 err = PTR_ERR(bitmap); 5904 pr_warn("%s: failed to create bitmap (%d)\n", 5905 mdname(mddev), err); 5906 } else 5907 mddev->bitmap = bitmap; 5908 5909 } 5910 if (err) 5911 goto bitmap_abort; 5912 5913 if (mddev->bitmap_info.max_write_behind > 0) { 5914 bool create_pool = false; 5915 5916 rdev_for_each(rdev, mddev) { 5917 if (test_bit(WriteMostly, &rdev->flags) && 5918 rdev_init_serial(rdev)) 5919 create_pool = true; 5920 } 5921 if (create_pool && mddev->serial_info_pool == NULL) { 5922 mddev->serial_info_pool = 5923 mempool_create_kmalloc_pool(NR_SERIAL_INFOS, 5924 sizeof(struct serial_info)); 5925 if (!mddev->serial_info_pool) { 5926 err = -ENOMEM; 5927 goto bitmap_abort; 5928 } 5929 } 5930 } 5931 5932 if (mddev->queue) { 5933 bool nonrot = true; 5934 5935 rdev_for_each(rdev, mddev) { 5936 if (rdev->raid_disk >= 0 && 5937 !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) { 5938 nonrot = false; 5939 break; 5940 } 5941 } 5942 if (mddev->degraded) 5943 nonrot = false; 5944 if (nonrot) 5945 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); 5946 else 5947 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); 5948 } 5949 if (pers->sync_request) { 5950 if (mddev->kobj.sd && 5951 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 5952 pr_warn("md: cannot register extra attributes for %s\n", 5953 mdname(mddev)); 5954 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 5955 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 5956 mddev->ro = 0; 5957 5958 atomic_set(&mddev->max_corr_read_errors, 5959 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 5960 mddev->safemode = 0; 5961 if (mddev_is_clustered(mddev)) 5962 mddev->safemode_delay = 0; 5963 else 5964 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 5965 mddev->in_sync = 1; 5966 smp_wmb(); 5967 spin_lock(&mddev->lock); 5968 mddev->pers = pers; 5969 spin_unlock(&mddev->lock); 5970 rdev_for_each(rdev, mddev) 5971 if (rdev->raid_disk >= 0) 5972 sysfs_link_rdev(mddev, rdev); /* failure here is OK */ 5973 5974 if (mddev->degraded && !mddev->ro) 5975 /* This ensures that recovering status is reported immediately 5976 * via sysfs - until a lack of spares is confirmed. 5977 */ 5978 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5979 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5980 5981 if (mddev->sb_flags) 5982 md_update_sb(mddev, 0); 5983 5984 md_new_event(mddev); 5985 return 0; 5986 5987 bitmap_abort: 5988 mddev_detach(mddev); 5989 if (mddev->private) 5990 pers->free(mddev, mddev->private); 5991 mddev->private = NULL; 5992 module_put(pers->owner); 5993 md_bitmap_destroy(mddev); 5994 abort: 5995 bioset_exit(&mddev->bio_set); 5996 bioset_exit(&mddev->sync_set); 5997 return err; 5998 } 5999 EXPORT_SYMBOL_GPL(md_run); 6000 6001 static int do_md_run(struct mddev *mddev) 6002 { 6003 int err; 6004 6005 set_bit(MD_NOT_READY, &mddev->flags); 6006 err = md_run(mddev); 6007 if (err) 6008 goto out; 6009 err = md_bitmap_load(mddev); 6010 if (err) { 6011 md_bitmap_destroy(mddev); 6012 goto out; 6013 } 6014 6015 if (mddev_is_clustered(mddev)) 6016 md_allow_write(mddev); 6017 6018 /* run start up tasks that require md_thread */ 6019 md_start(mddev); 6020 6021 md_wakeup_thread(mddev->thread); 6022 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 6023 6024 set_capacity(mddev->gendisk, mddev->array_sectors); 6025 revalidate_disk(mddev->gendisk); 6026 clear_bit(MD_NOT_READY, &mddev->flags); 6027 mddev->changed = 1; 6028 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 6029 sysfs_notify_dirent_safe(mddev->sysfs_state); 6030 sysfs_notify_dirent_safe(mddev->sysfs_action); 6031 sysfs_notify(&mddev->kobj, NULL, "degraded"); 6032 out: 6033 clear_bit(MD_NOT_READY, &mddev->flags); 6034 return err; 6035 } 6036 6037 int md_start(struct mddev *mddev) 6038 { 6039 int ret = 0; 6040 6041 if (mddev->pers->start) { 6042 set_bit(MD_RECOVERY_WAIT, &mddev->recovery); 6043 md_wakeup_thread(mddev->thread); 6044 ret = mddev->pers->start(mddev); 6045 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); 6046 md_wakeup_thread(mddev->sync_thread); 6047 } 6048 return ret; 6049 } 6050 EXPORT_SYMBOL_GPL(md_start); 6051 6052 static int restart_array(struct mddev *mddev) 6053 { 6054 struct gendisk *disk = mddev->gendisk; 6055 struct md_rdev *rdev; 6056 bool has_journal = false; 6057 bool has_readonly = false; 6058 6059 /* Complain if it has no devices */ 6060 if (list_empty(&mddev->disks)) 6061 return -ENXIO; 6062 if (!mddev->pers) 6063 return -EINVAL; 6064 if (!mddev->ro) 6065 return -EBUSY; 6066 6067 rcu_read_lock(); 6068 rdev_for_each_rcu(rdev, mddev) { 6069 if (test_bit(Journal, &rdev->flags) && 6070 !test_bit(Faulty, &rdev->flags)) 6071 has_journal = true; 6072 if (bdev_read_only(rdev->bdev)) 6073 has_readonly = true; 6074 } 6075 rcu_read_unlock(); 6076 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal) 6077 /* Don't restart rw with journal missing/faulty */ 6078 return -EINVAL; 6079 if (has_readonly) 6080 return -EROFS; 6081 6082 mddev->safemode = 0; 6083 mddev->ro = 0; 6084 set_disk_ro(disk, 0); 6085 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); 6086 /* Kick recovery or resync if necessary */ 6087 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6088 md_wakeup_thread(mddev->thread); 6089 md_wakeup_thread(mddev->sync_thread); 6090 sysfs_notify_dirent_safe(mddev->sysfs_state); 6091 return 0; 6092 } 6093 6094 static void md_clean(struct mddev *mddev) 6095 { 6096 mddev->array_sectors = 0; 6097 mddev->external_size = 0; 6098 mddev->dev_sectors = 0; 6099 mddev->raid_disks = 0; 6100 mddev->recovery_cp = 0; 6101 mddev->resync_min = 0; 6102 mddev->resync_max = MaxSector; 6103 mddev->reshape_position = MaxSector; 6104 mddev->external = 0; 6105 mddev->persistent = 0; 6106 mddev->level = LEVEL_NONE; 6107 mddev->clevel[0] = 0; 6108 mddev->flags = 0; 6109 mddev->sb_flags = 0; 6110 mddev->ro = 0; 6111 mddev->metadata_type[0] = 0; 6112 mddev->chunk_sectors = 0; 6113 mddev->ctime = mddev->utime = 0; 6114 mddev->layout = 0; 6115 mddev->max_disks = 0; 6116 mddev->events = 0; 6117 mddev->can_decrease_events = 0; 6118 mddev->delta_disks = 0; 6119 mddev->reshape_backwards = 0; 6120 mddev->new_level = LEVEL_NONE; 6121 mddev->new_layout = 0; 6122 mddev->new_chunk_sectors = 0; 6123 mddev->curr_resync = 0; 6124 atomic64_set(&mddev->resync_mismatches, 0); 6125 mddev->suspend_lo = mddev->suspend_hi = 0; 6126 mddev->sync_speed_min = mddev->sync_speed_max = 0; 6127 mddev->recovery = 0; 6128 mddev->in_sync = 0; 6129 mddev->changed = 0; 6130 mddev->degraded = 0; 6131 mddev->safemode = 0; 6132 mddev->private = NULL; 6133 mddev->cluster_info = NULL; 6134 mddev->bitmap_info.offset = 0; 6135 mddev->bitmap_info.default_offset = 0; 6136 mddev->bitmap_info.default_space = 0; 6137 mddev->bitmap_info.chunksize = 0; 6138 mddev->bitmap_info.daemon_sleep = 0; 6139 mddev->bitmap_info.max_write_behind = 0; 6140 mddev->bitmap_info.nodes = 0; 6141 } 6142 6143 static void __md_stop_writes(struct mddev *mddev) 6144 { 6145 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6146 if (work_pending(&mddev->del_work)) 6147 flush_workqueue(md_misc_wq); 6148 if (mddev->sync_thread) { 6149 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6150 md_reap_sync_thread(mddev); 6151 } 6152 6153 del_timer_sync(&mddev->safemode_timer); 6154 6155 if (mddev->pers && mddev->pers->quiesce) { 6156 mddev->pers->quiesce(mddev, 1); 6157 mddev->pers->quiesce(mddev, 0); 6158 } 6159 md_bitmap_flush(mddev); 6160 6161 if (mddev->ro == 0 && 6162 ((!mddev->in_sync && !mddev_is_clustered(mddev)) || 6163 mddev->sb_flags)) { 6164 /* mark array as shutdown cleanly */ 6165 if (!mddev_is_clustered(mddev)) 6166 mddev->in_sync = 1; 6167 md_update_sb(mddev, 1); 6168 } 6169 /* disable policy to guarantee rdevs free resources for serialization */ 6170 mddev->serialize_policy = 0; 6171 mddev_destroy_serial_pool(mddev, NULL, true); 6172 } 6173 6174 void md_stop_writes(struct mddev *mddev) 6175 { 6176 mddev_lock_nointr(mddev); 6177 __md_stop_writes(mddev); 6178 mddev_unlock(mddev); 6179 } 6180 EXPORT_SYMBOL_GPL(md_stop_writes); 6181 6182 static void mddev_detach(struct mddev *mddev) 6183 { 6184 md_bitmap_wait_behind_writes(mddev); 6185 if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) { 6186 mddev->pers->quiesce(mddev, 1); 6187 mddev->pers->quiesce(mddev, 0); 6188 } 6189 md_unregister_thread(&mddev->thread); 6190 if (mddev->queue) 6191 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 6192 } 6193 6194 static void __md_stop(struct mddev *mddev) 6195 { 6196 struct md_personality *pers = mddev->pers; 6197 md_bitmap_destroy(mddev); 6198 mddev_detach(mddev); 6199 /* Ensure ->event_work is done */ 6200 if (mddev->event_work.func) 6201 flush_workqueue(md_misc_wq); 6202 spin_lock(&mddev->lock); 6203 mddev->pers = NULL; 6204 spin_unlock(&mddev->lock); 6205 pers->free(mddev, mddev->private); 6206 mddev->private = NULL; 6207 if (pers->sync_request && mddev->to_remove == NULL) 6208 mddev->to_remove = &md_redundancy_group; 6209 module_put(pers->owner); 6210 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6211 } 6212 6213 void md_stop(struct mddev *mddev) 6214 { 6215 /* stop the array and free an attached data structures. 6216 * This is called from dm-raid 6217 */ 6218 __md_stop(mddev); 6219 bioset_exit(&mddev->bio_set); 6220 bioset_exit(&mddev->sync_set); 6221 } 6222 6223 EXPORT_SYMBOL_GPL(md_stop); 6224 6225 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) 6226 { 6227 int err = 0; 6228 int did_freeze = 0; 6229 6230 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 6231 did_freeze = 1; 6232 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6233 md_wakeup_thread(mddev->thread); 6234 } 6235 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 6236 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6237 if (mddev->sync_thread) 6238 /* Thread might be blocked waiting for metadata update 6239 * which will now never happen */ 6240 wake_up_process(mddev->sync_thread->tsk); 6241 6242 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 6243 return -EBUSY; 6244 mddev_unlock(mddev); 6245 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, 6246 &mddev->recovery)); 6247 wait_event(mddev->sb_wait, 6248 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 6249 mddev_lock_nointr(mddev); 6250 6251 mutex_lock(&mddev->open_mutex); 6252 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 6253 mddev->sync_thread || 6254 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 6255 pr_warn("md: %s still in use.\n",mdname(mddev)); 6256 if (did_freeze) { 6257 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6258 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6259 md_wakeup_thread(mddev->thread); 6260 } 6261 err = -EBUSY; 6262 goto out; 6263 } 6264 if (mddev->pers) { 6265 __md_stop_writes(mddev); 6266 6267 err = -ENXIO; 6268 if (mddev->ro==1) 6269 goto out; 6270 mddev->ro = 1; 6271 set_disk_ro(mddev->gendisk, 1); 6272 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6273 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6274 md_wakeup_thread(mddev->thread); 6275 sysfs_notify_dirent_safe(mddev->sysfs_state); 6276 err = 0; 6277 } 6278 out: 6279 mutex_unlock(&mddev->open_mutex); 6280 return err; 6281 } 6282 6283 /* mode: 6284 * 0 - completely stop and dis-assemble array 6285 * 2 - stop but do not disassemble array 6286 */ 6287 static int do_md_stop(struct mddev *mddev, int mode, 6288 struct block_device *bdev) 6289 { 6290 struct gendisk *disk = mddev->gendisk; 6291 struct md_rdev *rdev; 6292 int did_freeze = 0; 6293 6294 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 6295 did_freeze = 1; 6296 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6297 md_wakeup_thread(mddev->thread); 6298 } 6299 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 6300 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6301 if (mddev->sync_thread) 6302 /* Thread might be blocked waiting for metadata update 6303 * which will now never happen */ 6304 wake_up_process(mddev->sync_thread->tsk); 6305 6306 mddev_unlock(mddev); 6307 wait_event(resync_wait, (mddev->sync_thread == NULL && 6308 !test_bit(MD_RECOVERY_RUNNING, 6309 &mddev->recovery))); 6310 mddev_lock_nointr(mddev); 6311 6312 mutex_lock(&mddev->open_mutex); 6313 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 6314 mddev->sysfs_active || 6315 mddev->sync_thread || 6316 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 6317 pr_warn("md: %s still in use.\n",mdname(mddev)); 6318 mutex_unlock(&mddev->open_mutex); 6319 if (did_freeze) { 6320 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6321 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6322 md_wakeup_thread(mddev->thread); 6323 } 6324 return -EBUSY; 6325 } 6326 if (mddev->pers) { 6327 if (mddev->ro) 6328 set_disk_ro(disk, 0); 6329 6330 __md_stop_writes(mddev); 6331 __md_stop(mddev); 6332 6333 /* tell userspace to handle 'inactive' */ 6334 sysfs_notify_dirent_safe(mddev->sysfs_state); 6335 6336 rdev_for_each(rdev, mddev) 6337 if (rdev->raid_disk >= 0) 6338 sysfs_unlink_rdev(mddev, rdev); 6339 6340 set_capacity(disk, 0); 6341 mutex_unlock(&mddev->open_mutex); 6342 mddev->changed = 1; 6343 revalidate_disk(disk); 6344 6345 if (mddev->ro) 6346 mddev->ro = 0; 6347 } else 6348 mutex_unlock(&mddev->open_mutex); 6349 /* 6350 * Free resources if final stop 6351 */ 6352 if (mode == 0) { 6353 pr_info("md: %s stopped.\n", mdname(mddev)); 6354 6355 if (mddev->bitmap_info.file) { 6356 struct file *f = mddev->bitmap_info.file; 6357 spin_lock(&mddev->lock); 6358 mddev->bitmap_info.file = NULL; 6359 spin_unlock(&mddev->lock); 6360 fput(f); 6361 } 6362 mddev->bitmap_info.offset = 0; 6363 6364 export_array(mddev); 6365 6366 md_clean(mddev); 6367 if (mddev->hold_active == UNTIL_STOP) 6368 mddev->hold_active = 0; 6369 } 6370 md_new_event(mddev); 6371 sysfs_notify_dirent_safe(mddev->sysfs_state); 6372 return 0; 6373 } 6374 6375 #ifndef MODULE 6376 static void autorun_array(struct mddev *mddev) 6377 { 6378 struct md_rdev *rdev; 6379 int err; 6380 6381 if (list_empty(&mddev->disks)) 6382 return; 6383 6384 pr_info("md: running: "); 6385 6386 rdev_for_each(rdev, mddev) { 6387 char b[BDEVNAME_SIZE]; 6388 pr_cont("<%s>", bdevname(rdev->bdev,b)); 6389 } 6390 pr_cont("\n"); 6391 6392 err = do_md_run(mddev); 6393 if (err) { 6394 pr_warn("md: do_md_run() returned %d\n", err); 6395 do_md_stop(mddev, 0, NULL); 6396 } 6397 } 6398 6399 /* 6400 * lets try to run arrays based on all disks that have arrived 6401 * until now. (those are in pending_raid_disks) 6402 * 6403 * the method: pick the first pending disk, collect all disks with 6404 * the same UUID, remove all from the pending list and put them into 6405 * the 'same_array' list. Then order this list based on superblock 6406 * update time (freshest comes first), kick out 'old' disks and 6407 * compare superblocks. If everything's fine then run it. 6408 * 6409 * If "unit" is allocated, then bump its reference count 6410 */ 6411 static void autorun_devices(int part) 6412 { 6413 struct md_rdev *rdev0, *rdev, *tmp; 6414 struct mddev *mddev; 6415 char b[BDEVNAME_SIZE]; 6416 6417 pr_info("md: autorun ...\n"); 6418 while (!list_empty(&pending_raid_disks)) { 6419 int unit; 6420 dev_t dev; 6421 LIST_HEAD(candidates); 6422 rdev0 = list_entry(pending_raid_disks.next, 6423 struct md_rdev, same_set); 6424 6425 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b)); 6426 INIT_LIST_HEAD(&candidates); 6427 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 6428 if (super_90_load(rdev, rdev0, 0) >= 0) { 6429 pr_debug("md: adding %s ...\n", 6430 bdevname(rdev->bdev,b)); 6431 list_move(&rdev->same_set, &candidates); 6432 } 6433 /* 6434 * now we have a set of devices, with all of them having 6435 * mostly sane superblocks. It's time to allocate the 6436 * mddev. 6437 */ 6438 if (part) { 6439 dev = MKDEV(mdp_major, 6440 rdev0->preferred_minor << MdpMinorShift); 6441 unit = MINOR(dev) >> MdpMinorShift; 6442 } else { 6443 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 6444 unit = MINOR(dev); 6445 } 6446 if (rdev0->preferred_minor != unit) { 6447 pr_warn("md: unit number in %s is bad: %d\n", 6448 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 6449 break; 6450 } 6451 6452 md_probe(dev, NULL, NULL); 6453 mddev = mddev_find(dev); 6454 if (!mddev || !mddev->gendisk) { 6455 if (mddev) 6456 mddev_put(mddev); 6457 break; 6458 } 6459 if (mddev_lock(mddev)) 6460 pr_warn("md: %s locked, cannot run\n", mdname(mddev)); 6461 else if (mddev->raid_disks || mddev->major_version 6462 || !list_empty(&mddev->disks)) { 6463 pr_warn("md: %s already running, cannot run %s\n", 6464 mdname(mddev), bdevname(rdev0->bdev,b)); 6465 mddev_unlock(mddev); 6466 } else { 6467 pr_debug("md: created %s\n", mdname(mddev)); 6468 mddev->persistent = 1; 6469 rdev_for_each_list(rdev, tmp, &candidates) { 6470 list_del_init(&rdev->same_set); 6471 if (bind_rdev_to_array(rdev, mddev)) 6472 export_rdev(rdev); 6473 } 6474 autorun_array(mddev); 6475 mddev_unlock(mddev); 6476 } 6477 /* on success, candidates will be empty, on error 6478 * it won't... 6479 */ 6480 rdev_for_each_list(rdev, tmp, &candidates) { 6481 list_del_init(&rdev->same_set); 6482 export_rdev(rdev); 6483 } 6484 mddev_put(mddev); 6485 } 6486 pr_info("md: ... autorun DONE.\n"); 6487 } 6488 #endif /* !MODULE */ 6489 6490 static int get_version(void __user *arg) 6491 { 6492 mdu_version_t ver; 6493 6494 ver.major = MD_MAJOR_VERSION; 6495 ver.minor = MD_MINOR_VERSION; 6496 ver.patchlevel = MD_PATCHLEVEL_VERSION; 6497 6498 if (copy_to_user(arg, &ver, sizeof(ver))) 6499 return -EFAULT; 6500 6501 return 0; 6502 } 6503 6504 static int get_array_info(struct mddev *mddev, void __user *arg) 6505 { 6506 mdu_array_info_t info; 6507 int nr,working,insync,failed,spare; 6508 struct md_rdev *rdev; 6509 6510 nr = working = insync = failed = spare = 0; 6511 rcu_read_lock(); 6512 rdev_for_each_rcu(rdev, mddev) { 6513 nr++; 6514 if (test_bit(Faulty, &rdev->flags)) 6515 failed++; 6516 else { 6517 working++; 6518 if (test_bit(In_sync, &rdev->flags)) 6519 insync++; 6520 else if (test_bit(Journal, &rdev->flags)) 6521 /* TODO: add journal count to md_u.h */ 6522 ; 6523 else 6524 spare++; 6525 } 6526 } 6527 rcu_read_unlock(); 6528 6529 info.major_version = mddev->major_version; 6530 info.minor_version = mddev->minor_version; 6531 info.patch_version = MD_PATCHLEVEL_VERSION; 6532 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); 6533 info.level = mddev->level; 6534 info.size = mddev->dev_sectors / 2; 6535 if (info.size != mddev->dev_sectors / 2) /* overflow */ 6536 info.size = -1; 6537 info.nr_disks = nr; 6538 info.raid_disks = mddev->raid_disks; 6539 info.md_minor = mddev->md_minor; 6540 info.not_persistent= !mddev->persistent; 6541 6542 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); 6543 info.state = 0; 6544 if (mddev->in_sync) 6545 info.state = (1<<MD_SB_CLEAN); 6546 if (mddev->bitmap && mddev->bitmap_info.offset) 6547 info.state |= (1<<MD_SB_BITMAP_PRESENT); 6548 if (mddev_is_clustered(mddev)) 6549 info.state |= (1<<MD_SB_CLUSTERED); 6550 info.active_disks = insync; 6551 info.working_disks = working; 6552 info.failed_disks = failed; 6553 info.spare_disks = spare; 6554 6555 info.layout = mddev->layout; 6556 info.chunk_size = mddev->chunk_sectors << 9; 6557 6558 if (copy_to_user(arg, &info, sizeof(info))) 6559 return -EFAULT; 6560 6561 return 0; 6562 } 6563 6564 static int get_bitmap_file(struct mddev *mddev, void __user * arg) 6565 { 6566 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 6567 char *ptr; 6568 int err; 6569 6570 file = kzalloc(sizeof(*file), GFP_NOIO); 6571 if (!file) 6572 return -ENOMEM; 6573 6574 err = 0; 6575 spin_lock(&mddev->lock); 6576 /* bitmap enabled */ 6577 if (mddev->bitmap_info.file) { 6578 ptr = file_path(mddev->bitmap_info.file, file->pathname, 6579 sizeof(file->pathname)); 6580 if (IS_ERR(ptr)) 6581 err = PTR_ERR(ptr); 6582 else 6583 memmove(file->pathname, ptr, 6584 sizeof(file->pathname)-(ptr-file->pathname)); 6585 } 6586 spin_unlock(&mddev->lock); 6587 6588 if (err == 0 && 6589 copy_to_user(arg, file, sizeof(*file))) 6590 err = -EFAULT; 6591 6592 kfree(file); 6593 return err; 6594 } 6595 6596 static int get_disk_info(struct mddev *mddev, void __user * arg) 6597 { 6598 mdu_disk_info_t info; 6599 struct md_rdev *rdev; 6600 6601 if (copy_from_user(&info, arg, sizeof(info))) 6602 return -EFAULT; 6603 6604 rcu_read_lock(); 6605 rdev = md_find_rdev_nr_rcu(mddev, info.number); 6606 if (rdev) { 6607 info.major = MAJOR(rdev->bdev->bd_dev); 6608 info.minor = MINOR(rdev->bdev->bd_dev); 6609 info.raid_disk = rdev->raid_disk; 6610 info.state = 0; 6611 if (test_bit(Faulty, &rdev->flags)) 6612 info.state |= (1<<MD_DISK_FAULTY); 6613 else if (test_bit(In_sync, &rdev->flags)) { 6614 info.state |= (1<<MD_DISK_ACTIVE); 6615 info.state |= (1<<MD_DISK_SYNC); 6616 } 6617 if (test_bit(Journal, &rdev->flags)) 6618 info.state |= (1<<MD_DISK_JOURNAL); 6619 if (test_bit(WriteMostly, &rdev->flags)) 6620 info.state |= (1<<MD_DISK_WRITEMOSTLY); 6621 if (test_bit(FailFast, &rdev->flags)) 6622 info.state |= (1<<MD_DISK_FAILFAST); 6623 } else { 6624 info.major = info.minor = 0; 6625 info.raid_disk = -1; 6626 info.state = (1<<MD_DISK_REMOVED); 6627 } 6628 rcu_read_unlock(); 6629 6630 if (copy_to_user(arg, &info, sizeof(info))) 6631 return -EFAULT; 6632 6633 return 0; 6634 } 6635 6636 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) 6637 { 6638 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 6639 struct md_rdev *rdev; 6640 dev_t dev = MKDEV(info->major,info->minor); 6641 6642 if (mddev_is_clustered(mddev) && 6643 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) { 6644 pr_warn("%s: Cannot add to clustered mddev.\n", 6645 mdname(mddev)); 6646 return -EINVAL; 6647 } 6648 6649 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 6650 return -EOVERFLOW; 6651 6652 if (!mddev->raid_disks) { 6653 int err; 6654 /* expecting a device which has a superblock */ 6655 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 6656 if (IS_ERR(rdev)) { 6657 pr_warn("md: md_import_device returned %ld\n", 6658 PTR_ERR(rdev)); 6659 return PTR_ERR(rdev); 6660 } 6661 if (!list_empty(&mddev->disks)) { 6662 struct md_rdev *rdev0 6663 = list_entry(mddev->disks.next, 6664 struct md_rdev, same_set); 6665 err = super_types[mddev->major_version] 6666 .load_super(rdev, rdev0, mddev->minor_version); 6667 if (err < 0) { 6668 pr_warn("md: %s has different UUID to %s\n", 6669 bdevname(rdev->bdev,b), 6670 bdevname(rdev0->bdev,b2)); 6671 export_rdev(rdev); 6672 return -EINVAL; 6673 } 6674 } 6675 err = bind_rdev_to_array(rdev, mddev); 6676 if (err) 6677 export_rdev(rdev); 6678 return err; 6679 } 6680 6681 /* 6682 * add_new_disk can be used once the array is assembled 6683 * to add "hot spares". They must already have a superblock 6684 * written 6685 */ 6686 if (mddev->pers) { 6687 int err; 6688 if (!mddev->pers->hot_add_disk) { 6689 pr_warn("%s: personality does not support diskops!\n", 6690 mdname(mddev)); 6691 return -EINVAL; 6692 } 6693 if (mddev->persistent) 6694 rdev = md_import_device(dev, mddev->major_version, 6695 mddev->minor_version); 6696 else 6697 rdev = md_import_device(dev, -1, -1); 6698 if (IS_ERR(rdev)) { 6699 pr_warn("md: md_import_device returned %ld\n", 6700 PTR_ERR(rdev)); 6701 return PTR_ERR(rdev); 6702 } 6703 /* set saved_raid_disk if appropriate */ 6704 if (!mddev->persistent) { 6705 if (info->state & (1<<MD_DISK_SYNC) && 6706 info->raid_disk < mddev->raid_disks) { 6707 rdev->raid_disk = info->raid_disk; 6708 set_bit(In_sync, &rdev->flags); 6709 clear_bit(Bitmap_sync, &rdev->flags); 6710 } else 6711 rdev->raid_disk = -1; 6712 rdev->saved_raid_disk = rdev->raid_disk; 6713 } else 6714 super_types[mddev->major_version]. 6715 validate_super(mddev, rdev); 6716 if ((info->state & (1<<MD_DISK_SYNC)) && 6717 rdev->raid_disk != info->raid_disk) { 6718 /* This was a hot-add request, but events doesn't 6719 * match, so reject it. 6720 */ 6721 export_rdev(rdev); 6722 return -EINVAL; 6723 } 6724 6725 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 6726 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 6727 set_bit(WriteMostly, &rdev->flags); 6728 else 6729 clear_bit(WriteMostly, &rdev->flags); 6730 if (info->state & (1<<MD_DISK_FAILFAST)) 6731 set_bit(FailFast, &rdev->flags); 6732 else 6733 clear_bit(FailFast, &rdev->flags); 6734 6735 if (info->state & (1<<MD_DISK_JOURNAL)) { 6736 struct md_rdev *rdev2; 6737 bool has_journal = false; 6738 6739 /* make sure no existing journal disk */ 6740 rdev_for_each(rdev2, mddev) { 6741 if (test_bit(Journal, &rdev2->flags)) { 6742 has_journal = true; 6743 break; 6744 } 6745 } 6746 if (has_journal || mddev->bitmap) { 6747 export_rdev(rdev); 6748 return -EBUSY; 6749 } 6750 set_bit(Journal, &rdev->flags); 6751 } 6752 /* 6753 * check whether the device shows up in other nodes 6754 */ 6755 if (mddev_is_clustered(mddev)) { 6756 if (info->state & (1 << MD_DISK_CANDIDATE)) 6757 set_bit(Candidate, &rdev->flags); 6758 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) { 6759 /* --add initiated by this node */ 6760 err = md_cluster_ops->add_new_disk(mddev, rdev); 6761 if (err) { 6762 export_rdev(rdev); 6763 return err; 6764 } 6765 } 6766 } 6767 6768 rdev->raid_disk = -1; 6769 err = bind_rdev_to_array(rdev, mddev); 6770 6771 if (err) 6772 export_rdev(rdev); 6773 6774 if (mddev_is_clustered(mddev)) { 6775 if (info->state & (1 << MD_DISK_CANDIDATE)) { 6776 if (!err) { 6777 err = md_cluster_ops->new_disk_ack(mddev, 6778 err == 0); 6779 if (err) 6780 md_kick_rdev_from_array(rdev); 6781 } 6782 } else { 6783 if (err) 6784 md_cluster_ops->add_new_disk_cancel(mddev); 6785 else 6786 err = add_bound_rdev(rdev); 6787 } 6788 6789 } else if (!err) 6790 err = add_bound_rdev(rdev); 6791 6792 return err; 6793 } 6794 6795 /* otherwise, add_new_disk is only allowed 6796 * for major_version==0 superblocks 6797 */ 6798 if (mddev->major_version != 0) { 6799 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev)); 6800 return -EINVAL; 6801 } 6802 6803 if (!(info->state & (1<<MD_DISK_FAULTY))) { 6804 int err; 6805 rdev = md_import_device(dev, -1, 0); 6806 if (IS_ERR(rdev)) { 6807 pr_warn("md: error, md_import_device() returned %ld\n", 6808 PTR_ERR(rdev)); 6809 return PTR_ERR(rdev); 6810 } 6811 rdev->desc_nr = info->number; 6812 if (info->raid_disk < mddev->raid_disks) 6813 rdev->raid_disk = info->raid_disk; 6814 else 6815 rdev->raid_disk = -1; 6816 6817 if (rdev->raid_disk < mddev->raid_disks) 6818 if (info->state & (1<<MD_DISK_SYNC)) 6819 set_bit(In_sync, &rdev->flags); 6820 6821 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 6822 set_bit(WriteMostly, &rdev->flags); 6823 if (info->state & (1<<MD_DISK_FAILFAST)) 6824 set_bit(FailFast, &rdev->flags); 6825 6826 if (!mddev->persistent) { 6827 pr_debug("md: nonpersistent superblock ...\n"); 6828 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 6829 } else 6830 rdev->sb_start = calc_dev_sboffset(rdev); 6831 rdev->sectors = rdev->sb_start; 6832 6833 err = bind_rdev_to_array(rdev, mddev); 6834 if (err) { 6835 export_rdev(rdev); 6836 return err; 6837 } 6838 } 6839 6840 return 0; 6841 } 6842 6843 static int hot_remove_disk(struct mddev *mddev, dev_t dev) 6844 { 6845 char b[BDEVNAME_SIZE]; 6846 struct md_rdev *rdev; 6847 6848 if (!mddev->pers) 6849 return -ENODEV; 6850 6851 rdev = find_rdev(mddev, dev); 6852 if (!rdev) 6853 return -ENXIO; 6854 6855 if (rdev->raid_disk < 0) 6856 goto kick_rdev; 6857 6858 clear_bit(Blocked, &rdev->flags); 6859 remove_and_add_spares(mddev, rdev); 6860 6861 if (rdev->raid_disk >= 0) 6862 goto busy; 6863 6864 kick_rdev: 6865 if (mddev_is_clustered(mddev)) 6866 md_cluster_ops->remove_disk(mddev, rdev); 6867 6868 md_kick_rdev_from_array(rdev); 6869 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6870 if (mddev->thread) 6871 md_wakeup_thread(mddev->thread); 6872 else 6873 md_update_sb(mddev, 1); 6874 md_new_event(mddev); 6875 6876 return 0; 6877 busy: 6878 pr_debug("md: cannot remove active disk %s from %s ...\n", 6879 bdevname(rdev->bdev,b), mdname(mddev)); 6880 return -EBUSY; 6881 } 6882 6883 static int hot_add_disk(struct mddev *mddev, dev_t dev) 6884 { 6885 char b[BDEVNAME_SIZE]; 6886 int err; 6887 struct md_rdev *rdev; 6888 6889 if (!mddev->pers) 6890 return -ENODEV; 6891 6892 if (mddev->major_version != 0) { 6893 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n", 6894 mdname(mddev)); 6895 return -EINVAL; 6896 } 6897 if (!mddev->pers->hot_add_disk) { 6898 pr_warn("%s: personality does not support diskops!\n", 6899 mdname(mddev)); 6900 return -EINVAL; 6901 } 6902 6903 rdev = md_import_device(dev, -1, 0); 6904 if (IS_ERR(rdev)) { 6905 pr_warn("md: error, md_import_device() returned %ld\n", 6906 PTR_ERR(rdev)); 6907 return -EINVAL; 6908 } 6909 6910 if (mddev->persistent) 6911 rdev->sb_start = calc_dev_sboffset(rdev); 6912 else 6913 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 6914 6915 rdev->sectors = rdev->sb_start; 6916 6917 if (test_bit(Faulty, &rdev->flags)) { 6918 pr_warn("md: can not hot-add faulty %s disk to %s!\n", 6919 bdevname(rdev->bdev,b), mdname(mddev)); 6920 err = -EINVAL; 6921 goto abort_export; 6922 } 6923 6924 clear_bit(In_sync, &rdev->flags); 6925 rdev->desc_nr = -1; 6926 rdev->saved_raid_disk = -1; 6927 err = bind_rdev_to_array(rdev, mddev); 6928 if (err) 6929 goto abort_export; 6930 6931 /* 6932 * The rest should better be atomic, we can have disk failures 6933 * noticed in interrupt contexts ... 6934 */ 6935 6936 rdev->raid_disk = -1; 6937 6938 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6939 if (!mddev->thread) 6940 md_update_sb(mddev, 1); 6941 /* 6942 * Kick recovery, maybe this spare has to be added to the 6943 * array immediately. 6944 */ 6945 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6946 md_wakeup_thread(mddev->thread); 6947 md_new_event(mddev); 6948 return 0; 6949 6950 abort_export: 6951 export_rdev(rdev); 6952 return err; 6953 } 6954 6955 static int set_bitmap_file(struct mddev *mddev, int fd) 6956 { 6957 int err = 0; 6958 6959 if (mddev->pers) { 6960 if (!mddev->pers->quiesce || !mddev->thread) 6961 return -EBUSY; 6962 if (mddev->recovery || mddev->sync_thread) 6963 return -EBUSY; 6964 /* we should be able to change the bitmap.. */ 6965 } 6966 6967 if (fd >= 0) { 6968 struct inode *inode; 6969 struct file *f; 6970 6971 if (mddev->bitmap || mddev->bitmap_info.file) 6972 return -EEXIST; /* cannot add when bitmap is present */ 6973 f = fget(fd); 6974 6975 if (f == NULL) { 6976 pr_warn("%s: error: failed to get bitmap file\n", 6977 mdname(mddev)); 6978 return -EBADF; 6979 } 6980 6981 inode = f->f_mapping->host; 6982 if (!S_ISREG(inode->i_mode)) { 6983 pr_warn("%s: error: bitmap file must be a regular file\n", 6984 mdname(mddev)); 6985 err = -EBADF; 6986 } else if (!(f->f_mode & FMODE_WRITE)) { 6987 pr_warn("%s: error: bitmap file must open for write\n", 6988 mdname(mddev)); 6989 err = -EBADF; 6990 } else if (atomic_read(&inode->i_writecount) != 1) { 6991 pr_warn("%s: error: bitmap file is already in use\n", 6992 mdname(mddev)); 6993 err = -EBUSY; 6994 } 6995 if (err) { 6996 fput(f); 6997 return err; 6998 } 6999 mddev->bitmap_info.file = f; 7000 mddev->bitmap_info.offset = 0; /* file overrides offset */ 7001 } else if (mddev->bitmap == NULL) 7002 return -ENOENT; /* cannot remove what isn't there */ 7003 err = 0; 7004 if (mddev->pers) { 7005 if (fd >= 0) { 7006 struct bitmap *bitmap; 7007 7008 bitmap = md_bitmap_create(mddev, -1); 7009 mddev_suspend(mddev); 7010 if (!IS_ERR(bitmap)) { 7011 mddev->bitmap = bitmap; 7012 err = md_bitmap_load(mddev); 7013 } else 7014 err = PTR_ERR(bitmap); 7015 if (err) { 7016 md_bitmap_destroy(mddev); 7017 fd = -1; 7018 } 7019 mddev_resume(mddev); 7020 } else if (fd < 0) { 7021 mddev_suspend(mddev); 7022 md_bitmap_destroy(mddev); 7023 mddev_resume(mddev); 7024 } 7025 } 7026 if (fd < 0) { 7027 struct file *f = mddev->bitmap_info.file; 7028 if (f) { 7029 spin_lock(&mddev->lock); 7030 mddev->bitmap_info.file = NULL; 7031 spin_unlock(&mddev->lock); 7032 fput(f); 7033 } 7034 } 7035 7036 return err; 7037 } 7038 7039 /* 7040 * set_array_info is used two different ways 7041 * The original usage is when creating a new array. 7042 * In this usage, raid_disks is > 0 and it together with 7043 * level, size, not_persistent,layout,chunksize determine the 7044 * shape of the array. 7045 * This will always create an array with a type-0.90.0 superblock. 7046 * The newer usage is when assembling an array. 7047 * In this case raid_disks will be 0, and the major_version field is 7048 * use to determine which style super-blocks are to be found on the devices. 7049 * The minor and patch _version numbers are also kept incase the 7050 * super_block handler wishes to interpret them. 7051 */ 7052 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) 7053 { 7054 7055 if (info->raid_disks == 0) { 7056 /* just setting version number for superblock loading */ 7057 if (info->major_version < 0 || 7058 info->major_version >= ARRAY_SIZE(super_types) || 7059 super_types[info->major_version].name == NULL) { 7060 /* maybe try to auto-load a module? */ 7061 pr_warn("md: superblock version %d not known\n", 7062 info->major_version); 7063 return -EINVAL; 7064 } 7065 mddev->major_version = info->major_version; 7066 mddev->minor_version = info->minor_version; 7067 mddev->patch_version = info->patch_version; 7068 mddev->persistent = !info->not_persistent; 7069 /* ensure mddev_put doesn't delete this now that there 7070 * is some minimal configuration. 7071 */ 7072 mddev->ctime = ktime_get_real_seconds(); 7073 return 0; 7074 } 7075 mddev->major_version = MD_MAJOR_VERSION; 7076 mddev->minor_version = MD_MINOR_VERSION; 7077 mddev->patch_version = MD_PATCHLEVEL_VERSION; 7078 mddev->ctime = ktime_get_real_seconds(); 7079 7080 mddev->level = info->level; 7081 mddev->clevel[0] = 0; 7082 mddev->dev_sectors = 2 * (sector_t)info->size; 7083 mddev->raid_disks = info->raid_disks; 7084 /* don't set md_minor, it is determined by which /dev/md* was 7085 * openned 7086 */ 7087 if (info->state & (1<<MD_SB_CLEAN)) 7088 mddev->recovery_cp = MaxSector; 7089 else 7090 mddev->recovery_cp = 0; 7091 mddev->persistent = ! info->not_persistent; 7092 mddev->external = 0; 7093 7094 mddev->layout = info->layout; 7095 if (mddev->level == 0) 7096 /* Cannot trust RAID0 layout info here */ 7097 mddev->layout = -1; 7098 mddev->chunk_sectors = info->chunk_size >> 9; 7099 7100 if (mddev->persistent) { 7101 mddev->max_disks = MD_SB_DISKS; 7102 mddev->flags = 0; 7103 mddev->sb_flags = 0; 7104 } 7105 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 7106 7107 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 7108 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 7109 mddev->bitmap_info.offset = 0; 7110 7111 mddev->reshape_position = MaxSector; 7112 7113 /* 7114 * Generate a 128 bit UUID 7115 */ 7116 get_random_bytes(mddev->uuid, 16); 7117 7118 mddev->new_level = mddev->level; 7119 mddev->new_chunk_sectors = mddev->chunk_sectors; 7120 mddev->new_layout = mddev->layout; 7121 mddev->delta_disks = 0; 7122 mddev->reshape_backwards = 0; 7123 7124 return 0; 7125 } 7126 7127 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) 7128 { 7129 lockdep_assert_held(&mddev->reconfig_mutex); 7130 7131 if (mddev->external_size) 7132 return; 7133 7134 mddev->array_sectors = array_sectors; 7135 } 7136 EXPORT_SYMBOL(md_set_array_sectors); 7137 7138 static int update_size(struct mddev *mddev, sector_t num_sectors) 7139 { 7140 struct md_rdev *rdev; 7141 int rv; 7142 int fit = (num_sectors == 0); 7143 sector_t old_dev_sectors = mddev->dev_sectors; 7144 7145 if (mddev->pers->resize == NULL) 7146 return -EINVAL; 7147 /* The "num_sectors" is the number of sectors of each device that 7148 * is used. This can only make sense for arrays with redundancy. 7149 * linear and raid0 always use whatever space is available. We can only 7150 * consider changing this number if no resync or reconstruction is 7151 * happening, and if the new size is acceptable. It must fit before the 7152 * sb_start or, if that is <data_offset, it must fit before the size 7153 * of each device. If num_sectors is zero, we find the largest size 7154 * that fits. 7155 */ 7156 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 7157 mddev->sync_thread) 7158 return -EBUSY; 7159 if (mddev->ro) 7160 return -EROFS; 7161 7162 rdev_for_each(rdev, mddev) { 7163 sector_t avail = rdev->sectors; 7164 7165 if (fit && (num_sectors == 0 || num_sectors > avail)) 7166 num_sectors = avail; 7167 if (avail < num_sectors) 7168 return -ENOSPC; 7169 } 7170 rv = mddev->pers->resize(mddev, num_sectors); 7171 if (!rv) { 7172 if (mddev_is_clustered(mddev)) 7173 md_cluster_ops->update_size(mddev, old_dev_sectors); 7174 else if (mddev->queue) { 7175 set_capacity(mddev->gendisk, mddev->array_sectors); 7176 revalidate_disk(mddev->gendisk); 7177 } 7178 } 7179 return rv; 7180 } 7181 7182 static int update_raid_disks(struct mddev *mddev, int raid_disks) 7183 { 7184 int rv; 7185 struct md_rdev *rdev; 7186 /* change the number of raid disks */ 7187 if (mddev->pers->check_reshape == NULL) 7188 return -EINVAL; 7189 if (mddev->ro) 7190 return -EROFS; 7191 if (raid_disks <= 0 || 7192 (mddev->max_disks && raid_disks >= mddev->max_disks)) 7193 return -EINVAL; 7194 if (mddev->sync_thread || 7195 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 7196 mddev->reshape_position != MaxSector) 7197 return -EBUSY; 7198 7199 rdev_for_each(rdev, mddev) { 7200 if (mddev->raid_disks < raid_disks && 7201 rdev->data_offset < rdev->new_data_offset) 7202 return -EINVAL; 7203 if (mddev->raid_disks > raid_disks && 7204 rdev->data_offset > rdev->new_data_offset) 7205 return -EINVAL; 7206 } 7207 7208 mddev->delta_disks = raid_disks - mddev->raid_disks; 7209 if (mddev->delta_disks < 0) 7210 mddev->reshape_backwards = 1; 7211 else if (mddev->delta_disks > 0) 7212 mddev->reshape_backwards = 0; 7213 7214 rv = mddev->pers->check_reshape(mddev); 7215 if (rv < 0) { 7216 mddev->delta_disks = 0; 7217 mddev->reshape_backwards = 0; 7218 } 7219 return rv; 7220 } 7221 7222 /* 7223 * update_array_info is used to change the configuration of an 7224 * on-line array. 7225 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 7226 * fields in the info are checked against the array. 7227 * Any differences that cannot be handled will cause an error. 7228 * Normally, only one change can be managed at a time. 7229 */ 7230 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) 7231 { 7232 int rv = 0; 7233 int cnt = 0; 7234 int state = 0; 7235 7236 /* calculate expected state,ignoring low bits */ 7237 if (mddev->bitmap && mddev->bitmap_info.offset) 7238 state |= (1 << MD_SB_BITMAP_PRESENT); 7239 7240 if (mddev->major_version != info->major_version || 7241 mddev->minor_version != info->minor_version || 7242 /* mddev->patch_version != info->patch_version || */ 7243 mddev->ctime != info->ctime || 7244 mddev->level != info->level || 7245 /* mddev->layout != info->layout || */ 7246 mddev->persistent != !info->not_persistent || 7247 mddev->chunk_sectors != info->chunk_size >> 9 || 7248 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 7249 ((state^info->state) & 0xfffffe00) 7250 ) 7251 return -EINVAL; 7252 /* Check there is only one change */ 7253 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 7254 cnt++; 7255 if (mddev->raid_disks != info->raid_disks) 7256 cnt++; 7257 if (mddev->layout != info->layout) 7258 cnt++; 7259 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 7260 cnt++; 7261 if (cnt == 0) 7262 return 0; 7263 if (cnt > 1) 7264 return -EINVAL; 7265 7266 if (mddev->layout != info->layout) { 7267 /* Change layout 7268 * we don't need to do anything at the md level, the 7269 * personality will take care of it all. 7270 */ 7271 if (mddev->pers->check_reshape == NULL) 7272 return -EINVAL; 7273 else { 7274 mddev->new_layout = info->layout; 7275 rv = mddev->pers->check_reshape(mddev); 7276 if (rv) 7277 mddev->new_layout = mddev->layout; 7278 return rv; 7279 } 7280 } 7281 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 7282 rv = update_size(mddev, (sector_t)info->size * 2); 7283 7284 if (mddev->raid_disks != info->raid_disks) 7285 rv = update_raid_disks(mddev, info->raid_disks); 7286 7287 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 7288 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { 7289 rv = -EINVAL; 7290 goto err; 7291 } 7292 if (mddev->recovery || mddev->sync_thread) { 7293 rv = -EBUSY; 7294 goto err; 7295 } 7296 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 7297 struct bitmap *bitmap; 7298 /* add the bitmap */ 7299 if (mddev->bitmap) { 7300 rv = -EEXIST; 7301 goto err; 7302 } 7303 if (mddev->bitmap_info.default_offset == 0) { 7304 rv = -EINVAL; 7305 goto err; 7306 } 7307 mddev->bitmap_info.offset = 7308 mddev->bitmap_info.default_offset; 7309 mddev->bitmap_info.space = 7310 mddev->bitmap_info.default_space; 7311 bitmap = md_bitmap_create(mddev, -1); 7312 mddev_suspend(mddev); 7313 if (!IS_ERR(bitmap)) { 7314 mddev->bitmap = bitmap; 7315 rv = md_bitmap_load(mddev); 7316 } else 7317 rv = PTR_ERR(bitmap); 7318 if (rv) 7319 md_bitmap_destroy(mddev); 7320 mddev_resume(mddev); 7321 } else { 7322 /* remove the bitmap */ 7323 if (!mddev->bitmap) { 7324 rv = -ENOENT; 7325 goto err; 7326 } 7327 if (mddev->bitmap->storage.file) { 7328 rv = -EINVAL; 7329 goto err; 7330 } 7331 if (mddev->bitmap_info.nodes) { 7332 /* hold PW on all the bitmap lock */ 7333 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) { 7334 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n"); 7335 rv = -EPERM; 7336 md_cluster_ops->unlock_all_bitmaps(mddev); 7337 goto err; 7338 } 7339 7340 mddev->bitmap_info.nodes = 0; 7341 md_cluster_ops->leave(mddev); 7342 } 7343 mddev_suspend(mddev); 7344 md_bitmap_destroy(mddev); 7345 mddev_resume(mddev); 7346 mddev->bitmap_info.offset = 0; 7347 } 7348 } 7349 md_update_sb(mddev, 1); 7350 return rv; 7351 err: 7352 return rv; 7353 } 7354 7355 static int set_disk_faulty(struct mddev *mddev, dev_t dev) 7356 { 7357 struct md_rdev *rdev; 7358 int err = 0; 7359 7360 if (mddev->pers == NULL) 7361 return -ENODEV; 7362 7363 rcu_read_lock(); 7364 rdev = md_find_rdev_rcu(mddev, dev); 7365 if (!rdev) 7366 err = -ENODEV; 7367 else { 7368 md_error(mddev, rdev); 7369 if (!test_bit(Faulty, &rdev->flags)) 7370 err = -EBUSY; 7371 } 7372 rcu_read_unlock(); 7373 return err; 7374 } 7375 7376 /* 7377 * We have a problem here : there is no easy way to give a CHS 7378 * virtual geometry. We currently pretend that we have a 2 heads 7379 * 4 sectors (with a BIG number of cylinders...). This drives 7380 * dosfs just mad... ;-) 7381 */ 7382 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 7383 { 7384 struct mddev *mddev = bdev->bd_disk->private_data; 7385 7386 geo->heads = 2; 7387 geo->sectors = 4; 7388 geo->cylinders = mddev->array_sectors / 8; 7389 return 0; 7390 } 7391 7392 static inline bool md_ioctl_valid(unsigned int cmd) 7393 { 7394 switch (cmd) { 7395 case ADD_NEW_DISK: 7396 case BLKROSET: 7397 case GET_ARRAY_INFO: 7398 case GET_BITMAP_FILE: 7399 case GET_DISK_INFO: 7400 case HOT_ADD_DISK: 7401 case HOT_REMOVE_DISK: 7402 case RAID_AUTORUN: 7403 case RAID_VERSION: 7404 case RESTART_ARRAY_RW: 7405 case RUN_ARRAY: 7406 case SET_ARRAY_INFO: 7407 case SET_BITMAP_FILE: 7408 case SET_DISK_FAULTY: 7409 case STOP_ARRAY: 7410 case STOP_ARRAY_RO: 7411 case CLUSTERED_DISK_NACK: 7412 return true; 7413 default: 7414 return false; 7415 } 7416 } 7417 7418 static int md_ioctl(struct block_device *bdev, fmode_t mode, 7419 unsigned int cmd, unsigned long arg) 7420 { 7421 int err = 0; 7422 void __user *argp = (void __user *)arg; 7423 struct mddev *mddev = NULL; 7424 int ro; 7425 bool did_set_md_closing = false; 7426 7427 if (!md_ioctl_valid(cmd)) 7428 return -ENOTTY; 7429 7430 switch (cmd) { 7431 case RAID_VERSION: 7432 case GET_ARRAY_INFO: 7433 case GET_DISK_INFO: 7434 break; 7435 default: 7436 if (!capable(CAP_SYS_ADMIN)) 7437 return -EACCES; 7438 } 7439 7440 /* 7441 * Commands dealing with the RAID driver but not any 7442 * particular array: 7443 */ 7444 switch (cmd) { 7445 case RAID_VERSION: 7446 err = get_version(argp); 7447 goto out; 7448 7449 #ifndef MODULE 7450 case RAID_AUTORUN: 7451 err = 0; 7452 autostart_arrays(arg); 7453 goto out; 7454 #endif 7455 default:; 7456 } 7457 7458 /* 7459 * Commands creating/starting a new array: 7460 */ 7461 7462 mddev = bdev->bd_disk->private_data; 7463 7464 if (!mddev) { 7465 BUG(); 7466 goto out; 7467 } 7468 7469 /* Some actions do not requires the mutex */ 7470 switch (cmd) { 7471 case GET_ARRAY_INFO: 7472 if (!mddev->raid_disks && !mddev->external) 7473 err = -ENODEV; 7474 else 7475 err = get_array_info(mddev, argp); 7476 goto out; 7477 7478 case GET_DISK_INFO: 7479 if (!mddev->raid_disks && !mddev->external) 7480 err = -ENODEV; 7481 else 7482 err = get_disk_info(mddev, argp); 7483 goto out; 7484 7485 case SET_DISK_FAULTY: 7486 err = set_disk_faulty(mddev, new_decode_dev(arg)); 7487 goto out; 7488 7489 case GET_BITMAP_FILE: 7490 err = get_bitmap_file(mddev, argp); 7491 goto out; 7492 7493 } 7494 7495 if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK) 7496 flush_rdev_wq(mddev); 7497 7498 if (cmd == HOT_REMOVE_DISK) 7499 /* need to ensure recovery thread has run */ 7500 wait_event_interruptible_timeout(mddev->sb_wait, 7501 !test_bit(MD_RECOVERY_NEEDED, 7502 &mddev->recovery), 7503 msecs_to_jiffies(5000)); 7504 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { 7505 /* Need to flush page cache, and ensure no-one else opens 7506 * and writes 7507 */ 7508 mutex_lock(&mddev->open_mutex); 7509 if (mddev->pers && atomic_read(&mddev->openers) > 1) { 7510 mutex_unlock(&mddev->open_mutex); 7511 err = -EBUSY; 7512 goto out; 7513 } 7514 WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags)); 7515 set_bit(MD_CLOSING, &mddev->flags); 7516 did_set_md_closing = true; 7517 mutex_unlock(&mddev->open_mutex); 7518 sync_blockdev(bdev); 7519 } 7520 err = mddev_lock(mddev); 7521 if (err) { 7522 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n", 7523 err, cmd); 7524 goto out; 7525 } 7526 7527 if (cmd == SET_ARRAY_INFO) { 7528 mdu_array_info_t info; 7529 if (!arg) 7530 memset(&info, 0, sizeof(info)); 7531 else if (copy_from_user(&info, argp, sizeof(info))) { 7532 err = -EFAULT; 7533 goto unlock; 7534 } 7535 if (mddev->pers) { 7536 err = update_array_info(mddev, &info); 7537 if (err) { 7538 pr_warn("md: couldn't update array info. %d\n", err); 7539 goto unlock; 7540 } 7541 goto unlock; 7542 } 7543 if (!list_empty(&mddev->disks)) { 7544 pr_warn("md: array %s already has disks!\n", mdname(mddev)); 7545 err = -EBUSY; 7546 goto unlock; 7547 } 7548 if (mddev->raid_disks) { 7549 pr_warn("md: array %s already initialised!\n", mdname(mddev)); 7550 err = -EBUSY; 7551 goto unlock; 7552 } 7553 err = set_array_info(mddev, &info); 7554 if (err) { 7555 pr_warn("md: couldn't set array info. %d\n", err); 7556 goto unlock; 7557 } 7558 goto unlock; 7559 } 7560 7561 /* 7562 * Commands querying/configuring an existing array: 7563 */ 7564 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 7565 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 7566 if ((!mddev->raid_disks && !mddev->external) 7567 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 7568 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 7569 && cmd != GET_BITMAP_FILE) { 7570 err = -ENODEV; 7571 goto unlock; 7572 } 7573 7574 /* 7575 * Commands even a read-only array can execute: 7576 */ 7577 switch (cmd) { 7578 case RESTART_ARRAY_RW: 7579 err = restart_array(mddev); 7580 goto unlock; 7581 7582 case STOP_ARRAY: 7583 err = do_md_stop(mddev, 0, bdev); 7584 goto unlock; 7585 7586 case STOP_ARRAY_RO: 7587 err = md_set_readonly(mddev, bdev); 7588 goto unlock; 7589 7590 case HOT_REMOVE_DISK: 7591 err = hot_remove_disk(mddev, new_decode_dev(arg)); 7592 goto unlock; 7593 7594 case ADD_NEW_DISK: 7595 /* We can support ADD_NEW_DISK on read-only arrays 7596 * only if we are re-adding a preexisting device. 7597 * So require mddev->pers and MD_DISK_SYNC. 7598 */ 7599 if (mddev->pers) { 7600 mdu_disk_info_t info; 7601 if (copy_from_user(&info, argp, sizeof(info))) 7602 err = -EFAULT; 7603 else if (!(info.state & (1<<MD_DISK_SYNC))) 7604 /* Need to clear read-only for this */ 7605 break; 7606 else 7607 err = add_new_disk(mddev, &info); 7608 goto unlock; 7609 } 7610 break; 7611 7612 case BLKROSET: 7613 if (get_user(ro, (int __user *)(arg))) { 7614 err = -EFAULT; 7615 goto unlock; 7616 } 7617 err = -EINVAL; 7618 7619 /* if the bdev is going readonly the value of mddev->ro 7620 * does not matter, no writes are coming 7621 */ 7622 if (ro) 7623 goto unlock; 7624 7625 /* are we are already prepared for writes? */ 7626 if (mddev->ro != 1) 7627 goto unlock; 7628 7629 /* transitioning to readauto need only happen for 7630 * arrays that call md_write_start 7631 */ 7632 if (mddev->pers) { 7633 err = restart_array(mddev); 7634 if (err == 0) { 7635 mddev->ro = 2; 7636 set_disk_ro(mddev->gendisk, 0); 7637 } 7638 } 7639 goto unlock; 7640 } 7641 7642 /* 7643 * The remaining ioctls are changing the state of the 7644 * superblock, so we do not allow them on read-only arrays. 7645 */ 7646 if (mddev->ro && mddev->pers) { 7647 if (mddev->ro == 2) { 7648 mddev->ro = 0; 7649 sysfs_notify_dirent_safe(mddev->sysfs_state); 7650 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7651 /* mddev_unlock will wake thread */ 7652 /* If a device failed while we were read-only, we 7653 * need to make sure the metadata is updated now. 7654 */ 7655 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { 7656 mddev_unlock(mddev); 7657 wait_event(mddev->sb_wait, 7658 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && 7659 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 7660 mddev_lock_nointr(mddev); 7661 } 7662 } else { 7663 err = -EROFS; 7664 goto unlock; 7665 } 7666 } 7667 7668 switch (cmd) { 7669 case ADD_NEW_DISK: 7670 { 7671 mdu_disk_info_t info; 7672 if (copy_from_user(&info, argp, sizeof(info))) 7673 err = -EFAULT; 7674 else 7675 err = add_new_disk(mddev, &info); 7676 goto unlock; 7677 } 7678 7679 case CLUSTERED_DISK_NACK: 7680 if (mddev_is_clustered(mddev)) 7681 md_cluster_ops->new_disk_ack(mddev, false); 7682 else 7683 err = -EINVAL; 7684 goto unlock; 7685 7686 case HOT_ADD_DISK: 7687 err = hot_add_disk(mddev, new_decode_dev(arg)); 7688 goto unlock; 7689 7690 case RUN_ARRAY: 7691 err = do_md_run(mddev); 7692 goto unlock; 7693 7694 case SET_BITMAP_FILE: 7695 err = set_bitmap_file(mddev, (int)arg); 7696 goto unlock; 7697 7698 default: 7699 err = -EINVAL; 7700 goto unlock; 7701 } 7702 7703 unlock: 7704 if (mddev->hold_active == UNTIL_IOCTL && 7705 err != -EINVAL) 7706 mddev->hold_active = 0; 7707 mddev_unlock(mddev); 7708 out: 7709 if(did_set_md_closing) 7710 clear_bit(MD_CLOSING, &mddev->flags); 7711 return err; 7712 } 7713 #ifdef CONFIG_COMPAT 7714 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, 7715 unsigned int cmd, unsigned long arg) 7716 { 7717 switch (cmd) { 7718 case HOT_REMOVE_DISK: 7719 case HOT_ADD_DISK: 7720 case SET_DISK_FAULTY: 7721 case SET_BITMAP_FILE: 7722 /* These take in integer arg, do not convert */ 7723 break; 7724 default: 7725 arg = (unsigned long)compat_ptr(arg); 7726 break; 7727 } 7728 7729 return md_ioctl(bdev, mode, cmd, arg); 7730 } 7731 #endif /* CONFIG_COMPAT */ 7732 7733 static int md_open(struct block_device *bdev, fmode_t mode) 7734 { 7735 /* 7736 * Succeed if we can lock the mddev, which confirms that 7737 * it isn't being stopped right now. 7738 */ 7739 struct mddev *mddev = mddev_find(bdev->bd_dev); 7740 int err; 7741 7742 if (!mddev) 7743 return -ENODEV; 7744 7745 if (mddev->gendisk != bdev->bd_disk) { 7746 /* we are racing with mddev_put which is discarding this 7747 * bd_disk. 7748 */ 7749 mddev_put(mddev); 7750 /* Wait until bdev->bd_disk is definitely gone */ 7751 if (work_pending(&mddev->del_work)) 7752 flush_workqueue(md_misc_wq); 7753 /* Then retry the open from the top */ 7754 return -ERESTARTSYS; 7755 } 7756 BUG_ON(mddev != bdev->bd_disk->private_data); 7757 7758 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) 7759 goto out; 7760 7761 if (test_bit(MD_CLOSING, &mddev->flags)) { 7762 mutex_unlock(&mddev->open_mutex); 7763 err = -ENODEV; 7764 goto out; 7765 } 7766 7767 err = 0; 7768 atomic_inc(&mddev->openers); 7769 mutex_unlock(&mddev->open_mutex); 7770 7771 check_disk_change(bdev); 7772 out: 7773 if (err) 7774 mddev_put(mddev); 7775 return err; 7776 } 7777 7778 static void md_release(struct gendisk *disk, fmode_t mode) 7779 { 7780 struct mddev *mddev = disk->private_data; 7781 7782 BUG_ON(!mddev); 7783 atomic_dec(&mddev->openers); 7784 mddev_put(mddev); 7785 } 7786 7787 static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing) 7788 { 7789 struct mddev *mddev = disk->private_data; 7790 unsigned int ret = 0; 7791 7792 if (mddev->changed) 7793 ret = DISK_EVENT_MEDIA_CHANGE; 7794 mddev->changed = 0; 7795 return ret; 7796 } 7797 7798 static const struct block_device_operations md_fops = 7799 { 7800 .owner = THIS_MODULE, 7801 .submit_bio = md_submit_bio, 7802 .open = md_open, 7803 .release = md_release, 7804 .ioctl = md_ioctl, 7805 #ifdef CONFIG_COMPAT 7806 .compat_ioctl = md_compat_ioctl, 7807 #endif 7808 .getgeo = md_getgeo, 7809 .check_events = md_check_events, 7810 }; 7811 7812 static int md_thread(void *arg) 7813 { 7814 struct md_thread *thread = arg; 7815 7816 /* 7817 * md_thread is a 'system-thread', it's priority should be very 7818 * high. We avoid resource deadlocks individually in each 7819 * raid personality. (RAID5 does preallocation) We also use RR and 7820 * the very same RT priority as kswapd, thus we will never get 7821 * into a priority inversion deadlock. 7822 * 7823 * we definitely have to have equal or higher priority than 7824 * bdflush, otherwise bdflush will deadlock if there are too 7825 * many dirty RAID5 blocks. 7826 */ 7827 7828 allow_signal(SIGKILL); 7829 while (!kthread_should_stop()) { 7830 7831 /* We need to wait INTERRUPTIBLE so that 7832 * we don't add to the load-average. 7833 * That means we need to be sure no signals are 7834 * pending 7835 */ 7836 if (signal_pending(current)) 7837 flush_signals(current); 7838 7839 wait_event_interruptible_timeout 7840 (thread->wqueue, 7841 test_bit(THREAD_WAKEUP, &thread->flags) 7842 || kthread_should_stop() || kthread_should_park(), 7843 thread->timeout); 7844 7845 clear_bit(THREAD_WAKEUP, &thread->flags); 7846 if (kthread_should_park()) 7847 kthread_parkme(); 7848 if (!kthread_should_stop()) 7849 thread->run(thread); 7850 } 7851 7852 return 0; 7853 } 7854 7855 void md_wakeup_thread(struct md_thread *thread) 7856 { 7857 if (thread) { 7858 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); 7859 set_bit(THREAD_WAKEUP, &thread->flags); 7860 wake_up(&thread->wqueue); 7861 } 7862 } 7863 EXPORT_SYMBOL(md_wakeup_thread); 7864 7865 struct md_thread *md_register_thread(void (*run) (struct md_thread *), 7866 struct mddev *mddev, const char *name) 7867 { 7868 struct md_thread *thread; 7869 7870 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL); 7871 if (!thread) 7872 return NULL; 7873 7874 init_waitqueue_head(&thread->wqueue); 7875 7876 thread->run = run; 7877 thread->mddev = mddev; 7878 thread->timeout = MAX_SCHEDULE_TIMEOUT; 7879 thread->tsk = kthread_run(md_thread, thread, 7880 "%s_%s", 7881 mdname(thread->mddev), 7882 name); 7883 if (IS_ERR(thread->tsk)) { 7884 kfree(thread); 7885 return NULL; 7886 } 7887 return thread; 7888 } 7889 EXPORT_SYMBOL(md_register_thread); 7890 7891 void md_unregister_thread(struct md_thread **threadp) 7892 { 7893 struct md_thread *thread = *threadp; 7894 if (!thread) 7895 return; 7896 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 7897 /* Locking ensures that mddev_unlock does not wake_up a 7898 * non-existent thread 7899 */ 7900 spin_lock(&pers_lock); 7901 *threadp = NULL; 7902 spin_unlock(&pers_lock); 7903 7904 kthread_stop(thread->tsk); 7905 kfree(thread); 7906 } 7907 EXPORT_SYMBOL(md_unregister_thread); 7908 7909 void md_error(struct mddev *mddev, struct md_rdev *rdev) 7910 { 7911 if (!rdev || test_bit(Faulty, &rdev->flags)) 7912 return; 7913 7914 if (!mddev->pers || !mddev->pers->error_handler) 7915 return; 7916 mddev->pers->error_handler(mddev,rdev); 7917 if (mddev->degraded) 7918 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7919 sysfs_notify_dirent_safe(rdev->sysfs_state); 7920 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7921 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7922 md_wakeup_thread(mddev->thread); 7923 if (mddev->event_work.func) 7924 queue_work(md_misc_wq, &mddev->event_work); 7925 md_new_event(mddev); 7926 } 7927 EXPORT_SYMBOL(md_error); 7928 7929 /* seq_file implementation /proc/mdstat */ 7930 7931 static void status_unused(struct seq_file *seq) 7932 { 7933 int i = 0; 7934 struct md_rdev *rdev; 7935 7936 seq_printf(seq, "unused devices: "); 7937 7938 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 7939 char b[BDEVNAME_SIZE]; 7940 i++; 7941 seq_printf(seq, "%s ", 7942 bdevname(rdev->bdev,b)); 7943 } 7944 if (!i) 7945 seq_printf(seq, "<none>"); 7946 7947 seq_printf(seq, "\n"); 7948 } 7949 7950 static int status_resync(struct seq_file *seq, struct mddev *mddev) 7951 { 7952 sector_t max_sectors, resync, res; 7953 unsigned long dt, db = 0; 7954 sector_t rt, curr_mark_cnt, resync_mark_cnt; 7955 int scale, recovery_active; 7956 unsigned int per_milli; 7957 7958 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 7959 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7960 max_sectors = mddev->resync_max_sectors; 7961 else 7962 max_sectors = mddev->dev_sectors; 7963 7964 resync = mddev->curr_resync; 7965 if (resync <= 3) { 7966 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7967 /* Still cleaning up */ 7968 resync = max_sectors; 7969 } else if (resync > max_sectors) 7970 resync = max_sectors; 7971 else 7972 resync -= atomic_read(&mddev->recovery_active); 7973 7974 if (resync == 0) { 7975 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { 7976 struct md_rdev *rdev; 7977 7978 rdev_for_each(rdev, mddev) 7979 if (rdev->raid_disk >= 0 && 7980 !test_bit(Faulty, &rdev->flags) && 7981 rdev->recovery_offset != MaxSector && 7982 rdev->recovery_offset) { 7983 seq_printf(seq, "\trecover=REMOTE"); 7984 return 1; 7985 } 7986 if (mddev->reshape_position != MaxSector) 7987 seq_printf(seq, "\treshape=REMOTE"); 7988 else 7989 seq_printf(seq, "\tresync=REMOTE"); 7990 return 1; 7991 } 7992 if (mddev->recovery_cp < MaxSector) { 7993 seq_printf(seq, "\tresync=PENDING"); 7994 return 1; 7995 } 7996 return 0; 7997 } 7998 if (resync < 3) { 7999 seq_printf(seq, "\tresync=DELAYED"); 8000 return 1; 8001 } 8002 8003 WARN_ON(max_sectors == 0); 8004 /* Pick 'scale' such that (resync>>scale)*1000 will fit 8005 * in a sector_t, and (max_sectors>>scale) will fit in a 8006 * u32, as those are the requirements for sector_div. 8007 * Thus 'scale' must be at least 10 8008 */ 8009 scale = 10; 8010 if (sizeof(sector_t) > sizeof(unsigned long)) { 8011 while ( max_sectors/2 > (1ULL<<(scale+32))) 8012 scale++; 8013 } 8014 res = (resync>>scale)*1000; 8015 sector_div(res, (u32)((max_sectors>>scale)+1)); 8016 8017 per_milli = res; 8018 { 8019 int i, x = per_milli/50, y = 20-x; 8020 seq_printf(seq, "["); 8021 for (i = 0; i < x; i++) 8022 seq_printf(seq, "="); 8023 seq_printf(seq, ">"); 8024 for (i = 0; i < y; i++) 8025 seq_printf(seq, "."); 8026 seq_printf(seq, "] "); 8027 } 8028 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 8029 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 8030 "reshape" : 8031 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 8032 "check" : 8033 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 8034 "resync" : "recovery"))), 8035 per_milli/10, per_milli % 10, 8036 (unsigned long long) resync/2, 8037 (unsigned long long) max_sectors/2); 8038 8039 /* 8040 * dt: time from mark until now 8041 * db: blocks written from mark until now 8042 * rt: remaining time 8043 * 8044 * rt is a sector_t, which is always 64bit now. We are keeping 8045 * the original algorithm, but it is not really necessary. 8046 * 8047 * Original algorithm: 8048 * So we divide before multiply in case it is 32bit and close 8049 * to the limit. 8050 * We scale the divisor (db) by 32 to avoid losing precision 8051 * near the end of resync when the number of remaining sectors 8052 * is close to 'db'. 8053 * We then divide rt by 32 after multiplying by db to compensate. 8054 * The '+1' avoids division by zero if db is very small. 8055 */ 8056 dt = ((jiffies - mddev->resync_mark) / HZ); 8057 if (!dt) dt++; 8058 8059 curr_mark_cnt = mddev->curr_mark_cnt; 8060 recovery_active = atomic_read(&mddev->recovery_active); 8061 resync_mark_cnt = mddev->resync_mark_cnt; 8062 8063 if (curr_mark_cnt >= (recovery_active + resync_mark_cnt)) 8064 db = curr_mark_cnt - (recovery_active + resync_mark_cnt); 8065 8066 rt = max_sectors - resync; /* number of remaining sectors */ 8067 rt = div64_u64(rt, db/32+1); 8068 rt *= dt; 8069 rt >>= 5; 8070 8071 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 8072 ((unsigned long)rt % 60)/6); 8073 8074 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 8075 return 1; 8076 } 8077 8078 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 8079 { 8080 struct list_head *tmp; 8081 loff_t l = *pos; 8082 struct mddev *mddev; 8083 8084 if (l >= 0x10000) 8085 return NULL; 8086 if (!l--) 8087 /* header */ 8088 return (void*)1; 8089 8090 spin_lock(&all_mddevs_lock); 8091 list_for_each(tmp,&all_mddevs) 8092 if (!l--) { 8093 mddev = list_entry(tmp, struct mddev, all_mddevs); 8094 mddev_get(mddev); 8095 spin_unlock(&all_mddevs_lock); 8096 return mddev; 8097 } 8098 spin_unlock(&all_mddevs_lock); 8099 if (!l--) 8100 return (void*)2;/* tail */ 8101 return NULL; 8102 } 8103 8104 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 8105 { 8106 struct list_head *tmp; 8107 struct mddev *next_mddev, *mddev = v; 8108 8109 ++*pos; 8110 if (v == (void*)2) 8111 return NULL; 8112 8113 spin_lock(&all_mddevs_lock); 8114 if (v == (void*)1) 8115 tmp = all_mddevs.next; 8116 else 8117 tmp = mddev->all_mddevs.next; 8118 if (tmp != &all_mddevs) 8119 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs)); 8120 else { 8121 next_mddev = (void*)2; 8122 *pos = 0x10000; 8123 } 8124 spin_unlock(&all_mddevs_lock); 8125 8126 if (v != (void*)1) 8127 mddev_put(mddev); 8128 return next_mddev; 8129 8130 } 8131 8132 static void md_seq_stop(struct seq_file *seq, void *v) 8133 { 8134 struct mddev *mddev = v; 8135 8136 if (mddev && v != (void*)1 && v != (void*)2) 8137 mddev_put(mddev); 8138 } 8139 8140 static int md_seq_show(struct seq_file *seq, void *v) 8141 { 8142 struct mddev *mddev = v; 8143 sector_t sectors; 8144 struct md_rdev *rdev; 8145 8146 if (v == (void*)1) { 8147 struct md_personality *pers; 8148 seq_printf(seq, "Personalities : "); 8149 spin_lock(&pers_lock); 8150 list_for_each_entry(pers, &pers_list, list) 8151 seq_printf(seq, "[%s] ", pers->name); 8152 8153 spin_unlock(&pers_lock); 8154 seq_printf(seq, "\n"); 8155 seq->poll_event = atomic_read(&md_event_count); 8156 return 0; 8157 } 8158 if (v == (void*)2) { 8159 status_unused(seq); 8160 return 0; 8161 } 8162 8163 spin_lock(&mddev->lock); 8164 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 8165 seq_printf(seq, "%s : %sactive", mdname(mddev), 8166 mddev->pers ? "" : "in"); 8167 if (mddev->pers) { 8168 if (mddev->ro==1) 8169 seq_printf(seq, " (read-only)"); 8170 if (mddev->ro==2) 8171 seq_printf(seq, " (auto-read-only)"); 8172 seq_printf(seq, " %s", mddev->pers->name); 8173 } 8174 8175 sectors = 0; 8176 rcu_read_lock(); 8177 rdev_for_each_rcu(rdev, mddev) { 8178 char b[BDEVNAME_SIZE]; 8179 seq_printf(seq, " %s[%d]", 8180 bdevname(rdev->bdev,b), rdev->desc_nr); 8181 if (test_bit(WriteMostly, &rdev->flags)) 8182 seq_printf(seq, "(W)"); 8183 if (test_bit(Journal, &rdev->flags)) 8184 seq_printf(seq, "(J)"); 8185 if (test_bit(Faulty, &rdev->flags)) { 8186 seq_printf(seq, "(F)"); 8187 continue; 8188 } 8189 if (rdev->raid_disk < 0) 8190 seq_printf(seq, "(S)"); /* spare */ 8191 if (test_bit(Replacement, &rdev->flags)) 8192 seq_printf(seq, "(R)"); 8193 sectors += rdev->sectors; 8194 } 8195 rcu_read_unlock(); 8196 8197 if (!list_empty(&mddev->disks)) { 8198 if (mddev->pers) 8199 seq_printf(seq, "\n %llu blocks", 8200 (unsigned long long) 8201 mddev->array_sectors / 2); 8202 else 8203 seq_printf(seq, "\n %llu blocks", 8204 (unsigned long long)sectors / 2); 8205 } 8206 if (mddev->persistent) { 8207 if (mddev->major_version != 0 || 8208 mddev->minor_version != 90) { 8209 seq_printf(seq," super %d.%d", 8210 mddev->major_version, 8211 mddev->minor_version); 8212 } 8213 } else if (mddev->external) 8214 seq_printf(seq, " super external:%s", 8215 mddev->metadata_type); 8216 else 8217 seq_printf(seq, " super non-persistent"); 8218 8219 if (mddev->pers) { 8220 mddev->pers->status(seq, mddev); 8221 seq_printf(seq, "\n "); 8222 if (mddev->pers->sync_request) { 8223 if (status_resync(seq, mddev)) 8224 seq_printf(seq, "\n "); 8225 } 8226 } else 8227 seq_printf(seq, "\n "); 8228 8229 md_bitmap_status(seq, mddev->bitmap); 8230 8231 seq_printf(seq, "\n"); 8232 } 8233 spin_unlock(&mddev->lock); 8234 8235 return 0; 8236 } 8237 8238 static const struct seq_operations md_seq_ops = { 8239 .start = md_seq_start, 8240 .next = md_seq_next, 8241 .stop = md_seq_stop, 8242 .show = md_seq_show, 8243 }; 8244 8245 static int md_seq_open(struct inode *inode, struct file *file) 8246 { 8247 struct seq_file *seq; 8248 int error; 8249 8250 error = seq_open(file, &md_seq_ops); 8251 if (error) 8252 return error; 8253 8254 seq = file->private_data; 8255 seq->poll_event = atomic_read(&md_event_count); 8256 return error; 8257 } 8258 8259 static int md_unloading; 8260 static __poll_t mdstat_poll(struct file *filp, poll_table *wait) 8261 { 8262 struct seq_file *seq = filp->private_data; 8263 __poll_t mask; 8264 8265 if (md_unloading) 8266 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; 8267 poll_wait(filp, &md_event_waiters, wait); 8268 8269 /* always allow read */ 8270 mask = EPOLLIN | EPOLLRDNORM; 8271 8272 if (seq->poll_event != atomic_read(&md_event_count)) 8273 mask |= EPOLLERR | EPOLLPRI; 8274 return mask; 8275 } 8276 8277 static const struct proc_ops mdstat_proc_ops = { 8278 .proc_open = md_seq_open, 8279 .proc_read = seq_read, 8280 .proc_lseek = seq_lseek, 8281 .proc_release = seq_release, 8282 .proc_poll = mdstat_poll, 8283 }; 8284 8285 int register_md_personality(struct md_personality *p) 8286 { 8287 pr_debug("md: %s personality registered for level %d\n", 8288 p->name, p->level); 8289 spin_lock(&pers_lock); 8290 list_add_tail(&p->list, &pers_list); 8291 spin_unlock(&pers_lock); 8292 return 0; 8293 } 8294 EXPORT_SYMBOL(register_md_personality); 8295 8296 int unregister_md_personality(struct md_personality *p) 8297 { 8298 pr_debug("md: %s personality unregistered\n", p->name); 8299 spin_lock(&pers_lock); 8300 list_del_init(&p->list); 8301 spin_unlock(&pers_lock); 8302 return 0; 8303 } 8304 EXPORT_SYMBOL(unregister_md_personality); 8305 8306 int register_md_cluster_operations(struct md_cluster_operations *ops, 8307 struct module *module) 8308 { 8309 int ret = 0; 8310 spin_lock(&pers_lock); 8311 if (md_cluster_ops != NULL) 8312 ret = -EALREADY; 8313 else { 8314 md_cluster_ops = ops; 8315 md_cluster_mod = module; 8316 } 8317 spin_unlock(&pers_lock); 8318 return ret; 8319 } 8320 EXPORT_SYMBOL(register_md_cluster_operations); 8321 8322 int unregister_md_cluster_operations(void) 8323 { 8324 spin_lock(&pers_lock); 8325 md_cluster_ops = NULL; 8326 spin_unlock(&pers_lock); 8327 return 0; 8328 } 8329 EXPORT_SYMBOL(unregister_md_cluster_operations); 8330 8331 int md_setup_cluster(struct mddev *mddev, int nodes) 8332 { 8333 if (!md_cluster_ops) 8334 request_module("md-cluster"); 8335 spin_lock(&pers_lock); 8336 /* ensure module won't be unloaded */ 8337 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) { 8338 pr_warn("can't find md-cluster module or get it's reference.\n"); 8339 spin_unlock(&pers_lock); 8340 return -ENOENT; 8341 } 8342 spin_unlock(&pers_lock); 8343 8344 return md_cluster_ops->join(mddev, nodes); 8345 } 8346 8347 void md_cluster_stop(struct mddev *mddev) 8348 { 8349 if (!md_cluster_ops) 8350 return; 8351 md_cluster_ops->leave(mddev); 8352 module_put(md_cluster_mod); 8353 } 8354 8355 static int is_mddev_idle(struct mddev *mddev, int init) 8356 { 8357 struct md_rdev *rdev; 8358 int idle; 8359 int curr_events; 8360 8361 idle = 1; 8362 rcu_read_lock(); 8363 rdev_for_each_rcu(rdev, mddev) { 8364 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 8365 curr_events = (int)part_stat_read_accum(&disk->part0, sectors) - 8366 atomic_read(&disk->sync_io); 8367 /* sync IO will cause sync_io to increase before the disk_stats 8368 * as sync_io is counted when a request starts, and 8369 * disk_stats is counted when it completes. 8370 * So resync activity will cause curr_events to be smaller than 8371 * when there was no such activity. 8372 * non-sync IO will cause disk_stat to increase without 8373 * increasing sync_io so curr_events will (eventually) 8374 * be larger than it was before. Once it becomes 8375 * substantially larger, the test below will cause 8376 * the array to appear non-idle, and resync will slow 8377 * down. 8378 * If there is a lot of outstanding resync activity when 8379 * we set last_event to curr_events, then all that activity 8380 * completing might cause the array to appear non-idle 8381 * and resync will be slowed down even though there might 8382 * not have been non-resync activity. This will only 8383 * happen once though. 'last_events' will soon reflect 8384 * the state where there is little or no outstanding 8385 * resync requests, and further resync activity will 8386 * always make curr_events less than last_events. 8387 * 8388 */ 8389 if (init || curr_events - rdev->last_events > 64) { 8390 rdev->last_events = curr_events; 8391 idle = 0; 8392 } 8393 } 8394 rcu_read_unlock(); 8395 return idle; 8396 } 8397 8398 void md_done_sync(struct mddev *mddev, int blocks, int ok) 8399 { 8400 /* another "blocks" (512byte) blocks have been synced */ 8401 atomic_sub(blocks, &mddev->recovery_active); 8402 wake_up(&mddev->recovery_wait); 8403 if (!ok) { 8404 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8405 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); 8406 md_wakeup_thread(mddev->thread); 8407 // stop recovery, signal do_sync .... 8408 } 8409 } 8410 EXPORT_SYMBOL(md_done_sync); 8411 8412 /* md_write_start(mddev, bi) 8413 * If we need to update some array metadata (e.g. 'active' flag 8414 * in superblock) before writing, schedule a superblock update 8415 * and wait for it to complete. 8416 * A return value of 'false' means that the write wasn't recorded 8417 * and cannot proceed as the array is being suspend. 8418 */ 8419 bool md_write_start(struct mddev *mddev, struct bio *bi) 8420 { 8421 int did_change = 0; 8422 8423 if (bio_data_dir(bi) != WRITE) 8424 return true; 8425 8426 BUG_ON(mddev->ro == 1); 8427 if (mddev->ro == 2) { 8428 /* need to switch to read/write */ 8429 mddev->ro = 0; 8430 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8431 md_wakeup_thread(mddev->thread); 8432 md_wakeup_thread(mddev->sync_thread); 8433 did_change = 1; 8434 } 8435 rcu_read_lock(); 8436 percpu_ref_get(&mddev->writes_pending); 8437 smp_mb(); /* Match smp_mb in set_in_sync() */ 8438 if (mddev->safemode == 1) 8439 mddev->safemode = 0; 8440 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */ 8441 if (mddev->in_sync || mddev->sync_checkers) { 8442 spin_lock(&mddev->lock); 8443 if (mddev->in_sync) { 8444 mddev->in_sync = 0; 8445 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8446 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 8447 md_wakeup_thread(mddev->thread); 8448 did_change = 1; 8449 } 8450 spin_unlock(&mddev->lock); 8451 } 8452 rcu_read_unlock(); 8453 if (did_change) 8454 sysfs_notify_dirent_safe(mddev->sysfs_state); 8455 if (!mddev->has_superblocks) 8456 return true; 8457 wait_event(mddev->sb_wait, 8458 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || 8459 mddev->suspended); 8460 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 8461 percpu_ref_put(&mddev->writes_pending); 8462 return false; 8463 } 8464 return true; 8465 } 8466 EXPORT_SYMBOL(md_write_start); 8467 8468 /* md_write_inc can only be called when md_write_start() has 8469 * already been called at least once of the current request. 8470 * It increments the counter and is useful when a single request 8471 * is split into several parts. Each part causes an increment and 8472 * so needs a matching md_write_end(). 8473 * Unlike md_write_start(), it is safe to call md_write_inc() inside 8474 * a spinlocked region. 8475 */ 8476 void md_write_inc(struct mddev *mddev, struct bio *bi) 8477 { 8478 if (bio_data_dir(bi) != WRITE) 8479 return; 8480 WARN_ON_ONCE(mddev->in_sync || mddev->ro); 8481 percpu_ref_get(&mddev->writes_pending); 8482 } 8483 EXPORT_SYMBOL(md_write_inc); 8484 8485 void md_write_end(struct mddev *mddev) 8486 { 8487 percpu_ref_put(&mddev->writes_pending); 8488 8489 if (mddev->safemode == 2) 8490 md_wakeup_thread(mddev->thread); 8491 else if (mddev->safemode_delay) 8492 /* The roundup() ensures this only performs locking once 8493 * every ->safemode_delay jiffies 8494 */ 8495 mod_timer(&mddev->safemode_timer, 8496 roundup(jiffies, mddev->safemode_delay) + 8497 mddev->safemode_delay); 8498 } 8499 8500 EXPORT_SYMBOL(md_write_end); 8501 8502 /* md_allow_write(mddev) 8503 * Calling this ensures that the array is marked 'active' so that writes 8504 * may proceed without blocking. It is important to call this before 8505 * attempting a GFP_KERNEL allocation while holding the mddev lock. 8506 * Must be called with mddev_lock held. 8507 */ 8508 void md_allow_write(struct mddev *mddev) 8509 { 8510 if (!mddev->pers) 8511 return; 8512 if (mddev->ro) 8513 return; 8514 if (!mddev->pers->sync_request) 8515 return; 8516 8517 spin_lock(&mddev->lock); 8518 if (mddev->in_sync) { 8519 mddev->in_sync = 0; 8520 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8521 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 8522 if (mddev->safemode_delay && 8523 mddev->safemode == 0) 8524 mddev->safemode = 1; 8525 spin_unlock(&mddev->lock); 8526 md_update_sb(mddev, 0); 8527 sysfs_notify_dirent_safe(mddev->sysfs_state); 8528 /* wait for the dirty state to be recorded in the metadata */ 8529 wait_event(mddev->sb_wait, 8530 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 8531 } else 8532 spin_unlock(&mddev->lock); 8533 } 8534 EXPORT_SYMBOL_GPL(md_allow_write); 8535 8536 #define SYNC_MARKS 10 8537 #define SYNC_MARK_STEP (3*HZ) 8538 #define UPDATE_FREQUENCY (5*60*HZ) 8539 void md_do_sync(struct md_thread *thread) 8540 { 8541 struct mddev *mddev = thread->mddev; 8542 struct mddev *mddev2; 8543 unsigned int currspeed = 0, window; 8544 sector_t max_sectors,j, io_sectors, recovery_done; 8545 unsigned long mark[SYNC_MARKS]; 8546 unsigned long update_time; 8547 sector_t mark_cnt[SYNC_MARKS]; 8548 int last_mark,m; 8549 struct list_head *tmp; 8550 sector_t last_check; 8551 int skipped = 0; 8552 struct md_rdev *rdev; 8553 char *desc, *action = NULL; 8554 struct blk_plug plug; 8555 int ret; 8556 8557 /* just incase thread restarts... */ 8558 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 8559 test_bit(MD_RECOVERY_WAIT, &mddev->recovery)) 8560 return; 8561 if (mddev->ro) {/* never try to sync a read-only array */ 8562 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8563 return; 8564 } 8565 8566 if (mddev_is_clustered(mddev)) { 8567 ret = md_cluster_ops->resync_start(mddev); 8568 if (ret) 8569 goto skip; 8570 8571 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags); 8572 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 8573 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || 8574 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 8575 && ((unsigned long long)mddev->curr_resync_completed 8576 < (unsigned long long)mddev->resync_max_sectors)) 8577 goto skip; 8578 } 8579 8580 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8581 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 8582 desc = "data-check"; 8583 action = "check"; 8584 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 8585 desc = "requested-resync"; 8586 action = "repair"; 8587 } else 8588 desc = "resync"; 8589 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 8590 desc = "reshape"; 8591 else 8592 desc = "recovery"; 8593 8594 mddev->last_sync_action = action ?: desc; 8595 8596 /* we overload curr_resync somewhat here. 8597 * 0 == not engaged in resync at all 8598 * 2 == checking that there is no conflict with another sync 8599 * 1 == like 2, but have yielded to allow conflicting resync to 8600 * commence 8601 * other == active in resync - this many blocks 8602 * 8603 * Before starting a resync we must have set curr_resync to 8604 * 2, and then checked that every "conflicting" array has curr_resync 8605 * less than ours. When we find one that is the same or higher 8606 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 8607 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 8608 * This will mean we have to start checking from the beginning again. 8609 * 8610 */ 8611 8612 do { 8613 int mddev2_minor = -1; 8614 mddev->curr_resync = 2; 8615 8616 try_again: 8617 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8618 goto skip; 8619 for_each_mddev(mddev2, tmp) { 8620 if (mddev2 == mddev) 8621 continue; 8622 if (!mddev->parallel_resync 8623 && mddev2->curr_resync 8624 && match_mddev_units(mddev, mddev2)) { 8625 DEFINE_WAIT(wq); 8626 if (mddev < mddev2 && mddev->curr_resync == 2) { 8627 /* arbitrarily yield */ 8628 mddev->curr_resync = 1; 8629 wake_up(&resync_wait); 8630 } 8631 if (mddev > mddev2 && mddev->curr_resync == 1) 8632 /* no need to wait here, we can wait the next 8633 * time 'round when curr_resync == 2 8634 */ 8635 continue; 8636 /* We need to wait 'interruptible' so as not to 8637 * contribute to the load average, and not to 8638 * be caught by 'softlockup' 8639 */ 8640 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 8641 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8642 mddev2->curr_resync >= mddev->curr_resync) { 8643 if (mddev2_minor != mddev2->md_minor) { 8644 mddev2_minor = mddev2->md_minor; 8645 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n", 8646 desc, mdname(mddev), 8647 mdname(mddev2)); 8648 } 8649 mddev_put(mddev2); 8650 if (signal_pending(current)) 8651 flush_signals(current); 8652 schedule(); 8653 finish_wait(&resync_wait, &wq); 8654 goto try_again; 8655 } 8656 finish_wait(&resync_wait, &wq); 8657 } 8658 } 8659 } while (mddev->curr_resync < 2); 8660 8661 j = 0; 8662 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8663 /* resync follows the size requested by the personality, 8664 * which defaults to physical size, but can be virtual size 8665 */ 8666 max_sectors = mddev->resync_max_sectors; 8667 atomic64_set(&mddev->resync_mismatches, 0); 8668 /* we don't use the checkpoint if there's a bitmap */ 8669 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 8670 j = mddev->resync_min; 8671 else if (!mddev->bitmap) 8672 j = mddev->recovery_cp; 8673 8674 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 8675 max_sectors = mddev->resync_max_sectors; 8676 /* 8677 * If the original node aborts reshaping then we continue the 8678 * reshaping, so set j again to avoid restart reshape from the 8679 * first beginning 8680 */ 8681 if (mddev_is_clustered(mddev) && 8682 mddev->reshape_position != MaxSector) 8683 j = mddev->reshape_position; 8684 } else { 8685 /* recovery follows the physical size of devices */ 8686 max_sectors = mddev->dev_sectors; 8687 j = MaxSector; 8688 rcu_read_lock(); 8689 rdev_for_each_rcu(rdev, mddev) 8690 if (rdev->raid_disk >= 0 && 8691 !test_bit(Journal, &rdev->flags) && 8692 !test_bit(Faulty, &rdev->flags) && 8693 !test_bit(In_sync, &rdev->flags) && 8694 rdev->recovery_offset < j) 8695 j = rdev->recovery_offset; 8696 rcu_read_unlock(); 8697 8698 /* If there is a bitmap, we need to make sure all 8699 * writes that started before we added a spare 8700 * complete before we start doing a recovery. 8701 * Otherwise the write might complete and (via 8702 * bitmap_endwrite) set a bit in the bitmap after the 8703 * recovery has checked that bit and skipped that 8704 * region. 8705 */ 8706 if (mddev->bitmap) { 8707 mddev->pers->quiesce(mddev, 1); 8708 mddev->pers->quiesce(mddev, 0); 8709 } 8710 } 8711 8712 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); 8713 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); 8714 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n", 8715 speed_max(mddev), desc); 8716 8717 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 8718 8719 io_sectors = 0; 8720 for (m = 0; m < SYNC_MARKS; m++) { 8721 mark[m] = jiffies; 8722 mark_cnt[m] = io_sectors; 8723 } 8724 last_mark = 0; 8725 mddev->resync_mark = mark[last_mark]; 8726 mddev->resync_mark_cnt = mark_cnt[last_mark]; 8727 8728 /* 8729 * Tune reconstruction: 8730 */ 8731 window = 32 * (PAGE_SIZE / 512); 8732 pr_debug("md: using %dk window, over a total of %lluk.\n", 8733 window/2, (unsigned long long)max_sectors/2); 8734 8735 atomic_set(&mddev->recovery_active, 0); 8736 last_check = 0; 8737 8738 if (j>2) { 8739 pr_debug("md: resuming %s of %s from checkpoint.\n", 8740 desc, mdname(mddev)); 8741 mddev->curr_resync = j; 8742 } else 8743 mddev->curr_resync = 3; /* no longer delayed */ 8744 mddev->curr_resync_completed = j; 8745 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 8746 md_new_event(mddev); 8747 update_time = jiffies; 8748 8749 blk_start_plug(&plug); 8750 while (j < max_sectors) { 8751 sector_t sectors; 8752 8753 skipped = 0; 8754 8755 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8756 ((mddev->curr_resync > mddev->curr_resync_completed && 8757 (mddev->curr_resync - mddev->curr_resync_completed) 8758 > (max_sectors >> 4)) || 8759 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) || 8760 (j - mddev->curr_resync_completed)*2 8761 >= mddev->resync_max - mddev->curr_resync_completed || 8762 mddev->curr_resync_completed > mddev->resync_max 8763 )) { 8764 /* time to update curr_resync_completed */ 8765 wait_event(mddev->recovery_wait, 8766 atomic_read(&mddev->recovery_active) == 0); 8767 mddev->curr_resync_completed = j; 8768 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 8769 j > mddev->recovery_cp) 8770 mddev->recovery_cp = j; 8771 update_time = jiffies; 8772 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8773 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 8774 } 8775 8776 while (j >= mddev->resync_max && 8777 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8778 /* As this condition is controlled by user-space, 8779 * we can block indefinitely, so use '_interruptible' 8780 * to avoid triggering warnings. 8781 */ 8782 flush_signals(current); /* just in case */ 8783 wait_event_interruptible(mddev->recovery_wait, 8784 mddev->resync_max > j 8785 || test_bit(MD_RECOVERY_INTR, 8786 &mddev->recovery)); 8787 } 8788 8789 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8790 break; 8791 8792 sectors = mddev->pers->sync_request(mddev, j, &skipped); 8793 if (sectors == 0) { 8794 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8795 break; 8796 } 8797 8798 if (!skipped) { /* actual IO requested */ 8799 io_sectors += sectors; 8800 atomic_add(sectors, &mddev->recovery_active); 8801 } 8802 8803 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8804 break; 8805 8806 j += sectors; 8807 if (j > max_sectors) 8808 /* when skipping, extra large numbers can be returned. */ 8809 j = max_sectors; 8810 if (j > 2) 8811 mddev->curr_resync = j; 8812 mddev->curr_mark_cnt = io_sectors; 8813 if (last_check == 0) 8814 /* this is the earliest that rebuild will be 8815 * visible in /proc/mdstat 8816 */ 8817 md_new_event(mddev); 8818 8819 if (last_check + window > io_sectors || j == max_sectors) 8820 continue; 8821 8822 last_check = io_sectors; 8823 repeat: 8824 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 8825 /* step marks */ 8826 int next = (last_mark+1) % SYNC_MARKS; 8827 8828 mddev->resync_mark = mark[next]; 8829 mddev->resync_mark_cnt = mark_cnt[next]; 8830 mark[next] = jiffies; 8831 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 8832 last_mark = next; 8833 } 8834 8835 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8836 break; 8837 8838 /* 8839 * this loop exits only if either when we are slower than 8840 * the 'hard' speed limit, or the system was IO-idle for 8841 * a jiffy. 8842 * the system might be non-idle CPU-wise, but we only care 8843 * about not overloading the IO subsystem. (things like an 8844 * e2fsck being done on the RAID array should execute fast) 8845 */ 8846 cond_resched(); 8847 8848 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); 8849 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 8850 /((jiffies-mddev->resync_mark)/HZ +1) +1; 8851 8852 if (currspeed > speed_min(mddev)) { 8853 if (currspeed > speed_max(mddev)) { 8854 msleep(500); 8855 goto repeat; 8856 } 8857 if (!is_mddev_idle(mddev, 0)) { 8858 /* 8859 * Give other IO more of a chance. 8860 * The faster the devices, the less we wait. 8861 */ 8862 wait_event(mddev->recovery_wait, 8863 !atomic_read(&mddev->recovery_active)); 8864 } 8865 } 8866 } 8867 pr_info("md: %s: %s %s.\n",mdname(mddev), desc, 8868 test_bit(MD_RECOVERY_INTR, &mddev->recovery) 8869 ? "interrupted" : "done"); 8870 /* 8871 * this also signals 'finished resyncing' to md_stop 8872 */ 8873 blk_finish_plug(&plug); 8874 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 8875 8876 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8877 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8878 mddev->curr_resync > 3) { 8879 mddev->curr_resync_completed = mddev->curr_resync; 8880 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 8881 } 8882 mddev->pers->sync_request(mddev, max_sectors, &skipped); 8883 8884 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 8885 mddev->curr_resync > 3) { 8886 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8887 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8888 if (mddev->curr_resync >= mddev->recovery_cp) { 8889 pr_debug("md: checkpointing %s of %s.\n", 8890 desc, mdname(mddev)); 8891 if (test_bit(MD_RECOVERY_ERROR, 8892 &mddev->recovery)) 8893 mddev->recovery_cp = 8894 mddev->curr_resync_completed; 8895 else 8896 mddev->recovery_cp = 8897 mddev->curr_resync; 8898 } 8899 } else 8900 mddev->recovery_cp = MaxSector; 8901 } else { 8902 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8903 mddev->curr_resync = MaxSector; 8904 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8905 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { 8906 rcu_read_lock(); 8907 rdev_for_each_rcu(rdev, mddev) 8908 if (rdev->raid_disk >= 0 && 8909 mddev->delta_disks >= 0 && 8910 !test_bit(Journal, &rdev->flags) && 8911 !test_bit(Faulty, &rdev->flags) && 8912 !test_bit(In_sync, &rdev->flags) && 8913 rdev->recovery_offset < mddev->curr_resync) 8914 rdev->recovery_offset = mddev->curr_resync; 8915 rcu_read_unlock(); 8916 } 8917 } 8918 } 8919 skip: 8920 /* set CHANGE_PENDING here since maybe another update is needed, 8921 * so other nodes are informed. It should be harmless for normal 8922 * raid */ 8923 set_mask_bits(&mddev->sb_flags, 0, 8924 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); 8925 8926 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8927 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8928 mddev->delta_disks > 0 && 8929 mddev->pers->finish_reshape && 8930 mddev->pers->size && 8931 mddev->queue) { 8932 mddev_lock_nointr(mddev); 8933 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); 8934 mddev_unlock(mddev); 8935 if (!mddev_is_clustered(mddev)) { 8936 set_capacity(mddev->gendisk, mddev->array_sectors); 8937 revalidate_disk(mddev->gendisk); 8938 } 8939 } 8940 8941 spin_lock(&mddev->lock); 8942 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8943 /* We completed so min/max setting can be forgotten if used. */ 8944 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 8945 mddev->resync_min = 0; 8946 mddev->resync_max = MaxSector; 8947 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 8948 mddev->resync_min = mddev->curr_resync_completed; 8949 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 8950 mddev->curr_resync = 0; 8951 spin_unlock(&mddev->lock); 8952 8953 wake_up(&resync_wait); 8954 md_wakeup_thread(mddev->thread); 8955 return; 8956 } 8957 EXPORT_SYMBOL_GPL(md_do_sync); 8958 8959 static int remove_and_add_spares(struct mddev *mddev, 8960 struct md_rdev *this) 8961 { 8962 struct md_rdev *rdev; 8963 int spares = 0; 8964 int removed = 0; 8965 bool remove_some = false; 8966 8967 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 8968 /* Mustn't remove devices when resync thread is running */ 8969 return 0; 8970 8971 rdev_for_each(rdev, mddev) { 8972 if ((this == NULL || rdev == this) && 8973 rdev->raid_disk >= 0 && 8974 !test_bit(Blocked, &rdev->flags) && 8975 test_bit(Faulty, &rdev->flags) && 8976 atomic_read(&rdev->nr_pending)==0) { 8977 /* Faulty non-Blocked devices with nr_pending == 0 8978 * never get nr_pending incremented, 8979 * never get Faulty cleared, and never get Blocked set. 8980 * So we can synchronize_rcu now rather than once per device 8981 */ 8982 remove_some = true; 8983 set_bit(RemoveSynchronized, &rdev->flags); 8984 } 8985 } 8986 8987 if (remove_some) 8988 synchronize_rcu(); 8989 rdev_for_each(rdev, mddev) { 8990 if ((this == NULL || rdev == this) && 8991 rdev->raid_disk >= 0 && 8992 !test_bit(Blocked, &rdev->flags) && 8993 ((test_bit(RemoveSynchronized, &rdev->flags) || 8994 (!test_bit(In_sync, &rdev->flags) && 8995 !test_bit(Journal, &rdev->flags))) && 8996 atomic_read(&rdev->nr_pending)==0)) { 8997 if (mddev->pers->hot_remove_disk( 8998 mddev, rdev) == 0) { 8999 sysfs_unlink_rdev(mddev, rdev); 9000 rdev->saved_raid_disk = rdev->raid_disk; 9001 rdev->raid_disk = -1; 9002 removed++; 9003 } 9004 } 9005 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags)) 9006 clear_bit(RemoveSynchronized, &rdev->flags); 9007 } 9008 9009 if (removed && mddev->kobj.sd) 9010 sysfs_notify(&mddev->kobj, NULL, "degraded"); 9011 9012 if (this && removed) 9013 goto no_add; 9014 9015 rdev_for_each(rdev, mddev) { 9016 if (this && this != rdev) 9017 continue; 9018 if (test_bit(Candidate, &rdev->flags)) 9019 continue; 9020 if (rdev->raid_disk >= 0 && 9021 !test_bit(In_sync, &rdev->flags) && 9022 !test_bit(Journal, &rdev->flags) && 9023 !test_bit(Faulty, &rdev->flags)) 9024 spares++; 9025 if (rdev->raid_disk >= 0) 9026 continue; 9027 if (test_bit(Faulty, &rdev->flags)) 9028 continue; 9029 if (!test_bit(Journal, &rdev->flags)) { 9030 if (mddev->ro && 9031 ! (rdev->saved_raid_disk >= 0 && 9032 !test_bit(Bitmap_sync, &rdev->flags))) 9033 continue; 9034 9035 rdev->recovery_offset = 0; 9036 } 9037 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) { 9038 if (sysfs_link_rdev(mddev, rdev)) 9039 /* failure here is OK */; 9040 if (!test_bit(Journal, &rdev->flags)) 9041 spares++; 9042 md_new_event(mddev); 9043 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 9044 } 9045 } 9046 no_add: 9047 if (removed) 9048 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 9049 return spares; 9050 } 9051 9052 static void md_start_sync(struct work_struct *ws) 9053 { 9054 struct mddev *mddev = container_of(ws, struct mddev, del_work); 9055 9056 mddev->sync_thread = md_register_thread(md_do_sync, 9057 mddev, 9058 "resync"); 9059 if (!mddev->sync_thread) { 9060 pr_warn("%s: could not start resync thread...\n", 9061 mdname(mddev)); 9062 /* leave the spares where they are, it shouldn't hurt */ 9063 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9064 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9065 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9066 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9067 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9068 wake_up(&resync_wait); 9069 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 9070 &mddev->recovery)) 9071 if (mddev->sysfs_action) 9072 sysfs_notify_dirent_safe(mddev->sysfs_action); 9073 } else 9074 md_wakeup_thread(mddev->sync_thread); 9075 sysfs_notify_dirent_safe(mddev->sysfs_action); 9076 md_new_event(mddev); 9077 } 9078 9079 /* 9080 * This routine is regularly called by all per-raid-array threads to 9081 * deal with generic issues like resync and super-block update. 9082 * Raid personalities that don't have a thread (linear/raid0) do not 9083 * need this as they never do any recovery or update the superblock. 9084 * 9085 * It does not do any resync itself, but rather "forks" off other threads 9086 * to do that as needed. 9087 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 9088 * "->recovery" and create a thread at ->sync_thread. 9089 * When the thread finishes it sets MD_RECOVERY_DONE 9090 * and wakeups up this thread which will reap the thread and finish up. 9091 * This thread also removes any faulty devices (with nr_pending == 0). 9092 * 9093 * The overall approach is: 9094 * 1/ if the superblock needs updating, update it. 9095 * 2/ If a recovery thread is running, don't do anything else. 9096 * 3/ If recovery has finished, clean up, possibly marking spares active. 9097 * 4/ If there are any faulty devices, remove them. 9098 * 5/ If array is degraded, try to add spares devices 9099 * 6/ If array has spares or is not in-sync, start a resync thread. 9100 */ 9101 void md_check_recovery(struct mddev *mddev) 9102 { 9103 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { 9104 /* Write superblock - thread that called mddev_suspend() 9105 * holds reconfig_mutex for us. 9106 */ 9107 set_bit(MD_UPDATING_SB, &mddev->flags); 9108 smp_mb__after_atomic(); 9109 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) 9110 md_update_sb(mddev, 0); 9111 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); 9112 wake_up(&mddev->sb_wait); 9113 } 9114 9115 if (mddev->suspended) 9116 return; 9117 9118 if (mddev->bitmap) 9119 md_bitmap_daemon_work(mddev); 9120 9121 if (signal_pending(current)) { 9122 if (mddev->pers->sync_request && !mddev->external) { 9123 pr_debug("md: %s in immediate safe mode\n", 9124 mdname(mddev)); 9125 mddev->safemode = 2; 9126 } 9127 flush_signals(current); 9128 } 9129 9130 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 9131 return; 9132 if ( ! ( 9133 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) || 9134 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 9135 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 9136 (mddev->external == 0 && mddev->safemode == 1) || 9137 (mddev->safemode == 2 9138 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 9139 )) 9140 return; 9141 9142 if (mddev_trylock(mddev)) { 9143 int spares = 0; 9144 bool try_set_sync = mddev->safemode != 0; 9145 9146 if (!mddev->external && mddev->safemode == 1) 9147 mddev->safemode = 0; 9148 9149 if (mddev->ro) { 9150 struct md_rdev *rdev; 9151 if (!mddev->external && mddev->in_sync) 9152 /* 'Blocked' flag not needed as failed devices 9153 * will be recorded if array switched to read/write. 9154 * Leaving it set will prevent the device 9155 * from being removed. 9156 */ 9157 rdev_for_each(rdev, mddev) 9158 clear_bit(Blocked, &rdev->flags); 9159 /* On a read-only array we can: 9160 * - remove failed devices 9161 * - add already-in_sync devices if the array itself 9162 * is in-sync. 9163 * As we only add devices that are already in-sync, 9164 * we can activate the spares immediately. 9165 */ 9166 remove_and_add_spares(mddev, NULL); 9167 /* There is no thread, but we need to call 9168 * ->spare_active and clear saved_raid_disk 9169 */ 9170 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 9171 md_reap_sync_thread(mddev); 9172 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9173 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9174 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 9175 goto unlock; 9176 } 9177 9178 if (mddev_is_clustered(mddev)) { 9179 struct md_rdev *rdev; 9180 /* kick the device if another node issued a 9181 * remove disk. 9182 */ 9183 rdev_for_each(rdev, mddev) { 9184 if (test_and_clear_bit(ClusterRemove, &rdev->flags) && 9185 rdev->raid_disk < 0) 9186 md_kick_rdev_from_array(rdev); 9187 } 9188 } 9189 9190 if (try_set_sync && !mddev->external && !mddev->in_sync) { 9191 spin_lock(&mddev->lock); 9192 set_in_sync(mddev); 9193 spin_unlock(&mddev->lock); 9194 } 9195 9196 if (mddev->sb_flags) 9197 md_update_sb(mddev, 0); 9198 9199 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 9200 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 9201 /* resync/recovery still happening */ 9202 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9203 goto unlock; 9204 } 9205 if (mddev->sync_thread) { 9206 md_reap_sync_thread(mddev); 9207 goto unlock; 9208 } 9209 /* Set RUNNING before clearing NEEDED to avoid 9210 * any transients in the value of "sync_action". 9211 */ 9212 mddev->curr_resync_completed = 0; 9213 spin_lock(&mddev->lock); 9214 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9215 spin_unlock(&mddev->lock); 9216 /* Clear some bits that don't mean anything, but 9217 * might be left set 9218 */ 9219 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 9220 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 9221 9222 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 9223 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 9224 goto not_running; 9225 /* no recovery is running. 9226 * remove any failed drives, then 9227 * add spares if possible. 9228 * Spares are also removed and re-added, to allow 9229 * the personality to fail the re-add. 9230 */ 9231 9232 if (mddev->reshape_position != MaxSector) { 9233 if (mddev->pers->check_reshape == NULL || 9234 mddev->pers->check_reshape(mddev) != 0) 9235 /* Cannot proceed */ 9236 goto not_running; 9237 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9238 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9239 } else if ((spares = remove_and_add_spares(mddev, NULL))) { 9240 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9241 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9242 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9243 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9244 } else if (mddev->recovery_cp < MaxSector) { 9245 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9246 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9247 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 9248 /* nothing to be done ... */ 9249 goto not_running; 9250 9251 if (mddev->pers->sync_request) { 9252 if (spares) { 9253 /* We are adding a device or devices to an array 9254 * which has the bitmap stored on all devices. 9255 * So make sure all bitmap pages get written 9256 */ 9257 md_bitmap_write_all(mddev->bitmap); 9258 } 9259 INIT_WORK(&mddev->del_work, md_start_sync); 9260 queue_work(md_misc_wq, &mddev->del_work); 9261 goto unlock; 9262 } 9263 not_running: 9264 if (!mddev->sync_thread) { 9265 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9266 wake_up(&resync_wait); 9267 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 9268 &mddev->recovery)) 9269 if (mddev->sysfs_action) 9270 sysfs_notify_dirent_safe(mddev->sysfs_action); 9271 } 9272 unlock: 9273 wake_up(&mddev->sb_wait); 9274 mddev_unlock(mddev); 9275 } 9276 } 9277 EXPORT_SYMBOL(md_check_recovery); 9278 9279 void md_reap_sync_thread(struct mddev *mddev) 9280 { 9281 struct md_rdev *rdev; 9282 sector_t old_dev_sectors = mddev->dev_sectors; 9283 bool is_reshaped = false; 9284 9285 /* resync has finished, collect result */ 9286 md_unregister_thread(&mddev->sync_thread); 9287 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 9288 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 9289 mddev->degraded != mddev->raid_disks) { 9290 /* success...*/ 9291 /* activate any spares */ 9292 if (mddev->pers->spare_active(mddev)) { 9293 sysfs_notify(&mddev->kobj, NULL, 9294 "degraded"); 9295 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 9296 } 9297 } 9298 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9299 mddev->pers->finish_reshape) { 9300 mddev->pers->finish_reshape(mddev); 9301 if (mddev_is_clustered(mddev)) 9302 is_reshaped = true; 9303 } 9304 9305 /* If array is no-longer degraded, then any saved_raid_disk 9306 * information must be scrapped. 9307 */ 9308 if (!mddev->degraded) 9309 rdev_for_each(rdev, mddev) 9310 rdev->saved_raid_disk = -1; 9311 9312 md_update_sb(mddev, 1); 9313 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can 9314 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by 9315 * clustered raid */ 9316 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) 9317 md_cluster_ops->resync_finish(mddev); 9318 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9319 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 9320 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9321 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9322 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9323 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9324 /* 9325 * We call md_cluster_ops->update_size here because sync_size could 9326 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared, 9327 * so it is time to update size across cluster. 9328 */ 9329 if (mddev_is_clustered(mddev) && is_reshaped 9330 && !test_bit(MD_CLOSING, &mddev->flags)) 9331 md_cluster_ops->update_size(mddev, old_dev_sectors); 9332 wake_up(&resync_wait); 9333 /* flag recovery needed just to double check */ 9334 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9335 sysfs_notify_dirent_safe(mddev->sysfs_action); 9336 md_new_event(mddev); 9337 if (mddev->event_work.func) 9338 queue_work(md_misc_wq, &mddev->event_work); 9339 } 9340 EXPORT_SYMBOL(md_reap_sync_thread); 9341 9342 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) 9343 { 9344 sysfs_notify_dirent_safe(rdev->sysfs_state); 9345 wait_event_timeout(rdev->blocked_wait, 9346 !test_bit(Blocked, &rdev->flags) && 9347 !test_bit(BlockedBadBlocks, &rdev->flags), 9348 msecs_to_jiffies(5000)); 9349 rdev_dec_pending(rdev, mddev); 9350 } 9351 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 9352 9353 void md_finish_reshape(struct mddev *mddev) 9354 { 9355 /* called be personality module when reshape completes. */ 9356 struct md_rdev *rdev; 9357 9358 rdev_for_each(rdev, mddev) { 9359 if (rdev->data_offset > rdev->new_data_offset) 9360 rdev->sectors += rdev->data_offset - rdev->new_data_offset; 9361 else 9362 rdev->sectors -= rdev->new_data_offset - rdev->data_offset; 9363 rdev->data_offset = rdev->new_data_offset; 9364 } 9365 } 9366 EXPORT_SYMBOL(md_finish_reshape); 9367 9368 /* Bad block management */ 9369 9370 /* Returns 1 on success, 0 on failure */ 9371 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 9372 int is_new) 9373 { 9374 struct mddev *mddev = rdev->mddev; 9375 int rv; 9376 if (is_new) 9377 s += rdev->new_data_offset; 9378 else 9379 s += rdev->data_offset; 9380 rv = badblocks_set(&rdev->badblocks, s, sectors, 0); 9381 if (rv == 0) { 9382 /* Make sure they get written out promptly */ 9383 if (test_bit(ExternalBbl, &rdev->flags)) 9384 sysfs_notify(&rdev->kobj, NULL, 9385 "unacknowledged_bad_blocks"); 9386 sysfs_notify_dirent_safe(rdev->sysfs_state); 9387 set_mask_bits(&mddev->sb_flags, 0, 9388 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING)); 9389 md_wakeup_thread(rdev->mddev->thread); 9390 return 1; 9391 } else 9392 return 0; 9393 } 9394 EXPORT_SYMBOL_GPL(rdev_set_badblocks); 9395 9396 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 9397 int is_new) 9398 { 9399 int rv; 9400 if (is_new) 9401 s += rdev->new_data_offset; 9402 else 9403 s += rdev->data_offset; 9404 rv = badblocks_clear(&rdev->badblocks, s, sectors); 9405 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags)) 9406 sysfs_notify(&rdev->kobj, NULL, "bad_blocks"); 9407 return rv; 9408 } 9409 EXPORT_SYMBOL_GPL(rdev_clear_badblocks); 9410 9411 static int md_notify_reboot(struct notifier_block *this, 9412 unsigned long code, void *x) 9413 { 9414 struct list_head *tmp; 9415 struct mddev *mddev; 9416 int need_delay = 0; 9417 9418 for_each_mddev(mddev, tmp) { 9419 if (mddev_trylock(mddev)) { 9420 if (mddev->pers) 9421 __md_stop_writes(mddev); 9422 if (mddev->persistent) 9423 mddev->safemode = 2; 9424 mddev_unlock(mddev); 9425 } 9426 need_delay = 1; 9427 } 9428 /* 9429 * certain more exotic SCSI devices are known to be 9430 * volatile wrt too early system reboots. While the 9431 * right place to handle this issue is the given 9432 * driver, we do want to have a safe RAID driver ... 9433 */ 9434 if (need_delay) 9435 mdelay(1000*1); 9436 9437 return NOTIFY_DONE; 9438 } 9439 9440 static struct notifier_block md_notifier = { 9441 .notifier_call = md_notify_reboot, 9442 .next = NULL, 9443 .priority = INT_MAX, /* before any real devices */ 9444 }; 9445 9446 static void md_geninit(void) 9447 { 9448 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 9449 9450 proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops); 9451 } 9452 9453 static int __init md_init(void) 9454 { 9455 int ret = -ENOMEM; 9456 9457 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); 9458 if (!md_wq) 9459 goto err_wq; 9460 9461 md_misc_wq = alloc_workqueue("md_misc", 0, 0); 9462 if (!md_misc_wq) 9463 goto err_misc_wq; 9464 9465 md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0); 9466 if (!md_misc_wq) 9467 goto err_rdev_misc_wq; 9468 9469 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0) 9470 goto err_md; 9471 9472 if ((ret = register_blkdev(0, "mdp")) < 0) 9473 goto err_mdp; 9474 mdp_major = ret; 9475 9476 blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE, 9477 md_probe, NULL, NULL); 9478 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 9479 md_probe, NULL, NULL); 9480 9481 register_reboot_notifier(&md_notifier); 9482 raid_table_header = register_sysctl_table(raid_root_table); 9483 9484 md_geninit(); 9485 return 0; 9486 9487 err_mdp: 9488 unregister_blkdev(MD_MAJOR, "md"); 9489 err_md: 9490 destroy_workqueue(md_rdev_misc_wq); 9491 err_rdev_misc_wq: 9492 destroy_workqueue(md_misc_wq); 9493 err_misc_wq: 9494 destroy_workqueue(md_wq); 9495 err_wq: 9496 return ret; 9497 } 9498 9499 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) 9500 { 9501 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 9502 struct md_rdev *rdev2; 9503 int role, ret; 9504 char b[BDEVNAME_SIZE]; 9505 9506 /* 9507 * If size is changed in another node then we need to 9508 * do resize as well. 9509 */ 9510 if (mddev->dev_sectors != le64_to_cpu(sb->size)) { 9511 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size)); 9512 if (ret) 9513 pr_info("md-cluster: resize failed\n"); 9514 else 9515 md_bitmap_update_sb(mddev->bitmap); 9516 } 9517 9518 /* Check for change of roles in the active devices */ 9519 rdev_for_each(rdev2, mddev) { 9520 if (test_bit(Faulty, &rdev2->flags)) 9521 continue; 9522 9523 /* Check if the roles changed */ 9524 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]); 9525 9526 if (test_bit(Candidate, &rdev2->flags)) { 9527 if (role == 0xfffe) { 9528 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b)); 9529 md_kick_rdev_from_array(rdev2); 9530 continue; 9531 } 9532 else 9533 clear_bit(Candidate, &rdev2->flags); 9534 } 9535 9536 if (role != rdev2->raid_disk) { 9537 /* 9538 * got activated except reshape is happening. 9539 */ 9540 if (rdev2->raid_disk == -1 && role != 0xffff && 9541 !(le32_to_cpu(sb->feature_map) & 9542 MD_FEATURE_RESHAPE_ACTIVE)) { 9543 rdev2->saved_raid_disk = role; 9544 ret = remove_and_add_spares(mddev, rdev2); 9545 pr_info("Activated spare: %s\n", 9546 bdevname(rdev2->bdev,b)); 9547 /* wakeup mddev->thread here, so array could 9548 * perform resync with the new activated disk */ 9549 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9550 md_wakeup_thread(mddev->thread); 9551 } 9552 /* device faulty 9553 * We just want to do the minimum to mark the disk 9554 * as faulty. The recovery is performed by the 9555 * one who initiated the error. 9556 */ 9557 if ((role == 0xfffe) || (role == 0xfffd)) { 9558 md_error(mddev, rdev2); 9559 clear_bit(Blocked, &rdev2->flags); 9560 } 9561 } 9562 } 9563 9564 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) 9565 update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); 9566 9567 /* 9568 * Since mddev->delta_disks has already updated in update_raid_disks, 9569 * so it is time to check reshape. 9570 */ 9571 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && 9572 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 9573 /* 9574 * reshape is happening in the remote node, we need to 9575 * update reshape_position and call start_reshape. 9576 */ 9577 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 9578 if (mddev->pers->update_reshape_pos) 9579 mddev->pers->update_reshape_pos(mddev); 9580 if (mddev->pers->start_reshape) 9581 mddev->pers->start_reshape(mddev); 9582 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && 9583 mddev->reshape_position != MaxSector && 9584 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 9585 /* reshape is just done in another node. */ 9586 mddev->reshape_position = MaxSector; 9587 if (mddev->pers->update_reshape_pos) 9588 mddev->pers->update_reshape_pos(mddev); 9589 } 9590 9591 /* Finally set the event to be up to date */ 9592 mddev->events = le64_to_cpu(sb->events); 9593 } 9594 9595 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) 9596 { 9597 int err; 9598 struct page *swapout = rdev->sb_page; 9599 struct mdp_superblock_1 *sb; 9600 9601 /* Store the sb page of the rdev in the swapout temporary 9602 * variable in case we err in the future 9603 */ 9604 rdev->sb_page = NULL; 9605 err = alloc_disk_sb(rdev); 9606 if (err == 0) { 9607 ClearPageUptodate(rdev->sb_page); 9608 rdev->sb_loaded = 0; 9609 err = super_types[mddev->major_version]. 9610 load_super(rdev, NULL, mddev->minor_version); 9611 } 9612 if (err < 0) { 9613 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n", 9614 __func__, __LINE__, rdev->desc_nr, err); 9615 if (rdev->sb_page) 9616 put_page(rdev->sb_page); 9617 rdev->sb_page = swapout; 9618 rdev->sb_loaded = 1; 9619 return err; 9620 } 9621 9622 sb = page_address(rdev->sb_page); 9623 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET 9624 * is not set 9625 */ 9626 9627 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET)) 9628 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 9629 9630 /* The other node finished recovery, call spare_active to set 9631 * device In_sync and mddev->degraded 9632 */ 9633 if (rdev->recovery_offset == MaxSector && 9634 !test_bit(In_sync, &rdev->flags) && 9635 mddev->pers->spare_active(mddev)) 9636 sysfs_notify(&mddev->kobj, NULL, "degraded"); 9637 9638 put_page(swapout); 9639 return 0; 9640 } 9641 9642 void md_reload_sb(struct mddev *mddev, int nr) 9643 { 9644 struct md_rdev *rdev; 9645 int err; 9646 9647 /* Find the rdev */ 9648 rdev_for_each_rcu(rdev, mddev) { 9649 if (rdev->desc_nr == nr) 9650 break; 9651 } 9652 9653 if (!rdev || rdev->desc_nr != nr) { 9654 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr); 9655 return; 9656 } 9657 9658 err = read_rdev(mddev, rdev); 9659 if (err < 0) 9660 return; 9661 9662 check_sb_changes(mddev, rdev); 9663 9664 /* Read all rdev's to update recovery_offset */ 9665 rdev_for_each_rcu(rdev, mddev) { 9666 if (!test_bit(Faulty, &rdev->flags)) 9667 read_rdev(mddev, rdev); 9668 } 9669 } 9670 EXPORT_SYMBOL(md_reload_sb); 9671 9672 #ifndef MODULE 9673 9674 /* 9675 * Searches all registered partitions for autorun RAID arrays 9676 * at boot time. 9677 */ 9678 9679 static DEFINE_MUTEX(detected_devices_mutex); 9680 static LIST_HEAD(all_detected_devices); 9681 struct detected_devices_node { 9682 struct list_head list; 9683 dev_t dev; 9684 }; 9685 9686 void md_autodetect_dev(dev_t dev) 9687 { 9688 struct detected_devices_node *node_detected_dev; 9689 9690 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 9691 if (node_detected_dev) { 9692 node_detected_dev->dev = dev; 9693 mutex_lock(&detected_devices_mutex); 9694 list_add_tail(&node_detected_dev->list, &all_detected_devices); 9695 mutex_unlock(&detected_devices_mutex); 9696 } 9697 } 9698 9699 static void autostart_arrays(int part) 9700 { 9701 struct md_rdev *rdev; 9702 struct detected_devices_node *node_detected_dev; 9703 dev_t dev; 9704 int i_scanned, i_passed; 9705 9706 i_scanned = 0; 9707 i_passed = 0; 9708 9709 pr_info("md: Autodetecting RAID arrays.\n"); 9710 9711 mutex_lock(&detected_devices_mutex); 9712 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 9713 i_scanned++; 9714 node_detected_dev = list_entry(all_detected_devices.next, 9715 struct detected_devices_node, list); 9716 list_del(&node_detected_dev->list); 9717 dev = node_detected_dev->dev; 9718 kfree(node_detected_dev); 9719 mutex_unlock(&detected_devices_mutex); 9720 rdev = md_import_device(dev,0, 90); 9721 mutex_lock(&detected_devices_mutex); 9722 if (IS_ERR(rdev)) 9723 continue; 9724 9725 if (test_bit(Faulty, &rdev->flags)) 9726 continue; 9727 9728 set_bit(AutoDetected, &rdev->flags); 9729 list_add(&rdev->same_set, &pending_raid_disks); 9730 i_passed++; 9731 } 9732 mutex_unlock(&detected_devices_mutex); 9733 9734 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed); 9735 9736 autorun_devices(part); 9737 } 9738 9739 #endif /* !MODULE */ 9740 9741 static __exit void md_exit(void) 9742 { 9743 struct mddev *mddev; 9744 struct list_head *tmp; 9745 int delay = 1; 9746 9747 blk_unregister_region(MKDEV(MD_MAJOR,0), 512); 9748 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 9749 9750 unregister_blkdev(MD_MAJOR,"md"); 9751 unregister_blkdev(mdp_major, "mdp"); 9752 unregister_reboot_notifier(&md_notifier); 9753 unregister_sysctl_table(raid_table_header); 9754 9755 /* We cannot unload the modules while some process is 9756 * waiting for us in select() or poll() - wake them up 9757 */ 9758 md_unloading = 1; 9759 while (waitqueue_active(&md_event_waiters)) { 9760 /* not safe to leave yet */ 9761 wake_up(&md_event_waiters); 9762 msleep(delay); 9763 delay += delay; 9764 } 9765 remove_proc_entry("mdstat", NULL); 9766 9767 for_each_mddev(mddev, tmp) { 9768 export_array(mddev); 9769 mddev->ctime = 0; 9770 mddev->hold_active = 0; 9771 /* 9772 * for_each_mddev() will call mddev_put() at the end of each 9773 * iteration. As the mddev is now fully clear, this will 9774 * schedule the mddev for destruction by a workqueue, and the 9775 * destroy_workqueue() below will wait for that to complete. 9776 */ 9777 } 9778 destroy_workqueue(md_rdev_misc_wq); 9779 destroy_workqueue(md_misc_wq); 9780 destroy_workqueue(md_wq); 9781 } 9782 9783 subsys_initcall(md_init); 9784 module_exit(md_exit) 9785 9786 static int get_ro(char *buffer, const struct kernel_param *kp) 9787 { 9788 return sprintf(buffer, "%d\n", start_readonly); 9789 } 9790 static int set_ro(const char *val, const struct kernel_param *kp) 9791 { 9792 return kstrtouint(val, 10, (unsigned int *)&start_readonly); 9793 } 9794 9795 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 9796 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 9797 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 9798 module_param(create_on_open, bool, S_IRUSR|S_IWUSR); 9799 9800 MODULE_LICENSE("GPL"); 9801 MODULE_DESCRIPTION("MD RAID framework"); 9802 MODULE_ALIAS("md"); 9803 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 9804