1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 md.c : Multiple Devices driver for Linux 4 Copyright (C) 1998, 1999, 2000 Ingo Molnar 5 6 completely rewritten, based on the MD driver code from Marc Zyngier 7 8 Changes: 9 10 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 11 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 12 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 13 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 14 - kmod support by: Cyrus Durgin 15 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 16 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 17 18 - lots of fixes and improvements to the RAID1/RAID5 and generic 19 RAID code (such as request based resynchronization): 20 21 Neil Brown <neilb@cse.unsw.edu.au>. 22 23 - persistent bitmap code 24 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 25 26 27 Errors, Warnings, etc. 28 Please use: 29 pr_crit() for error conditions that risk data loss 30 pr_err() for error conditions that are unexpected, like an IO error 31 or internal inconsistency 32 pr_warn() for error conditions that could have been predicated, like 33 adding a device to an array when it has incompatible metadata 34 pr_info() for every interesting, very rare events, like an array starting 35 or stopping, or resync starting or stopping 36 pr_debug() for everything else. 37 38 */ 39 40 #include <linux/sched/mm.h> 41 #include <linux/sched/signal.h> 42 #include <linux/kthread.h> 43 #include <linux/blkdev.h> 44 #include <linux/badblocks.h> 45 #include <linux/sysctl.h> 46 #include <linux/seq_file.h> 47 #include <linux/fs.h> 48 #include <linux/poll.h> 49 #include <linux/ctype.h> 50 #include <linux/string.h> 51 #include <linux/hdreg.h> 52 #include <linux/proc_fs.h> 53 #include <linux/random.h> 54 #include <linux/module.h> 55 #include <linux/reboot.h> 56 #include <linux/file.h> 57 #include <linux/compat.h> 58 #include <linux/delay.h> 59 #include <linux/raid/md_p.h> 60 #include <linux/raid/md_u.h> 61 #include <linux/slab.h> 62 #include <linux/percpu-refcount.h> 63 64 #include <trace/events/block.h> 65 #include "md.h" 66 #include "md-bitmap.h" 67 #include "md-cluster.h" 68 69 #ifndef MODULE 70 static void autostart_arrays(int part); 71 #endif 72 73 /* pers_list is a list of registered personalities protected 74 * by pers_lock. 75 * pers_lock does extra service to protect accesses to 76 * mddev->thread when the mutex cannot be held. 77 */ 78 static LIST_HEAD(pers_list); 79 static DEFINE_SPINLOCK(pers_lock); 80 81 static struct kobj_type md_ktype; 82 83 struct md_cluster_operations *md_cluster_ops; 84 EXPORT_SYMBOL(md_cluster_ops); 85 static struct module *md_cluster_mod; 86 87 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 88 static struct workqueue_struct *md_wq; 89 static struct workqueue_struct *md_misc_wq; 90 91 static int remove_and_add_spares(struct mddev *mddev, 92 struct md_rdev *this); 93 static void mddev_detach(struct mddev *mddev); 94 95 /* 96 * Default number of read corrections we'll attempt on an rdev 97 * before ejecting it from the array. We divide the read error 98 * count by 2 for every hour elapsed between read errors. 99 */ 100 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 101 /* 102 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 103 * is 1000 KB/sec, so the extra system load does not show up that much. 104 * Increase it if you want to have more _guaranteed_ speed. Note that 105 * the RAID driver will use the maximum available bandwidth if the IO 106 * subsystem is idle. There is also an 'absolute maximum' reconstruction 107 * speed limit - in case reconstruction slows down your system despite 108 * idle IO detection. 109 * 110 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 111 * or /sys/block/mdX/md/sync_speed_{min,max} 112 */ 113 114 static int sysctl_speed_limit_min = 1000; 115 static int sysctl_speed_limit_max = 200000; 116 static inline int speed_min(struct mddev *mddev) 117 { 118 return mddev->sync_speed_min ? 119 mddev->sync_speed_min : sysctl_speed_limit_min; 120 } 121 122 static inline int speed_max(struct mddev *mddev) 123 { 124 return mddev->sync_speed_max ? 125 mddev->sync_speed_max : sysctl_speed_limit_max; 126 } 127 128 static int rdev_init_serial(struct md_rdev *rdev) 129 { 130 if (rdev->bdev->bd_queue->nr_hw_queues == 1) 131 return 0; 132 133 spin_lock_init(&rdev->serial_list_lock); 134 INIT_LIST_HEAD(&rdev->serial_list); 135 init_waitqueue_head(&rdev->serial_io_wait); 136 set_bit(CollisionCheck, &rdev->flags); 137 138 return 1; 139 } 140 141 /* 142 * Create serial_info_pool if rdev is the first multi-queue device flaged 143 * with writemostly, also write-behind mode is enabled. 144 */ 145 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, 146 bool is_suspend) 147 { 148 if (mddev->bitmap_info.max_write_behind == 0) 149 return; 150 151 if (!test_bit(WriteMostly, &rdev->flags) || !rdev_init_serial(rdev)) 152 return; 153 154 if (mddev->serial_info_pool == NULL) { 155 unsigned int noio_flag; 156 157 if (!is_suspend) 158 mddev_suspend(mddev); 159 noio_flag = memalloc_noio_save(); 160 mddev->serial_info_pool = 161 mempool_create_kmalloc_pool(NR_SERIAL_INFOS, 162 sizeof(struct serial_info)); 163 memalloc_noio_restore(noio_flag); 164 if (!mddev->serial_info_pool) 165 pr_err("can't alloc memory pool for serialization\n"); 166 if (!is_suspend) 167 mddev_resume(mddev); 168 } 169 } 170 EXPORT_SYMBOL_GPL(mddev_create_serial_pool); 171 172 /* 173 * Destroy serial_info_pool if rdev is the last device flaged with 174 * CollisionCheck. 175 */ 176 static void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev) 177 { 178 if (!test_and_clear_bit(CollisionCheck, &rdev->flags)) 179 return; 180 181 if (mddev->serial_info_pool) { 182 struct md_rdev *temp; 183 int num = 0; 184 185 /* 186 * Check if other rdevs need serial_info_pool. 187 */ 188 rdev_for_each(temp, mddev) 189 if (temp != rdev && 190 test_bit(CollisionCheck, &temp->flags)) 191 num++; 192 if (!num) { 193 mddev_suspend(rdev->mddev); 194 mempool_destroy(mddev->serial_info_pool); 195 mddev->serial_info_pool = NULL; 196 mddev_resume(rdev->mddev); 197 } 198 } 199 } 200 201 static struct ctl_table_header *raid_table_header; 202 203 static struct ctl_table raid_table[] = { 204 { 205 .procname = "speed_limit_min", 206 .data = &sysctl_speed_limit_min, 207 .maxlen = sizeof(int), 208 .mode = S_IRUGO|S_IWUSR, 209 .proc_handler = proc_dointvec, 210 }, 211 { 212 .procname = "speed_limit_max", 213 .data = &sysctl_speed_limit_max, 214 .maxlen = sizeof(int), 215 .mode = S_IRUGO|S_IWUSR, 216 .proc_handler = proc_dointvec, 217 }, 218 { } 219 }; 220 221 static struct ctl_table raid_dir_table[] = { 222 { 223 .procname = "raid", 224 .maxlen = 0, 225 .mode = S_IRUGO|S_IXUGO, 226 .child = raid_table, 227 }, 228 { } 229 }; 230 231 static struct ctl_table raid_root_table[] = { 232 { 233 .procname = "dev", 234 .maxlen = 0, 235 .mode = 0555, 236 .child = raid_dir_table, 237 }, 238 { } 239 }; 240 241 static const struct block_device_operations md_fops; 242 243 static int start_readonly; 244 245 /* 246 * The original mechanism for creating an md device is to create 247 * a device node in /dev and to open it. This causes races with device-close. 248 * The preferred method is to write to the "new_array" module parameter. 249 * This can avoid races. 250 * Setting create_on_open to false disables the original mechanism 251 * so all the races disappear. 252 */ 253 static bool create_on_open = true; 254 255 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 256 struct mddev *mddev) 257 { 258 if (!mddev || !bioset_initialized(&mddev->bio_set)) 259 return bio_alloc(gfp_mask, nr_iovecs); 260 261 return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); 262 } 263 EXPORT_SYMBOL_GPL(bio_alloc_mddev); 264 265 static struct bio *md_bio_alloc_sync(struct mddev *mddev) 266 { 267 if (!mddev || !bioset_initialized(&mddev->sync_set)) 268 return bio_alloc(GFP_NOIO, 1); 269 270 return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set); 271 } 272 273 /* 274 * We have a system wide 'event count' that is incremented 275 * on any 'interesting' event, and readers of /proc/mdstat 276 * can use 'poll' or 'select' to find out when the event 277 * count increases. 278 * 279 * Events are: 280 * start array, stop array, error, add device, remove device, 281 * start build, activate spare 282 */ 283 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 284 static atomic_t md_event_count; 285 void md_new_event(struct mddev *mddev) 286 { 287 atomic_inc(&md_event_count); 288 wake_up(&md_event_waiters); 289 } 290 EXPORT_SYMBOL_GPL(md_new_event); 291 292 /* 293 * Enables to iterate over all existing md arrays 294 * all_mddevs_lock protects this list. 295 */ 296 static LIST_HEAD(all_mddevs); 297 static DEFINE_SPINLOCK(all_mddevs_lock); 298 299 /* 300 * iterates through all used mddevs in the system. 301 * We take care to grab the all_mddevs_lock whenever navigating 302 * the list, and to always hold a refcount when unlocked. 303 * Any code which breaks out of this loop while own 304 * a reference to the current mddev and must mddev_put it. 305 */ 306 #define for_each_mddev(_mddev,_tmp) \ 307 \ 308 for (({ spin_lock(&all_mddevs_lock); \ 309 _tmp = all_mddevs.next; \ 310 _mddev = NULL;}); \ 311 ({ if (_tmp != &all_mddevs) \ 312 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\ 313 spin_unlock(&all_mddevs_lock); \ 314 if (_mddev) mddev_put(_mddev); \ 315 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \ 316 _tmp != &all_mddevs;}); \ 317 ({ spin_lock(&all_mddevs_lock); \ 318 _tmp = _tmp->next;}) \ 319 ) 320 321 /* Rather than calling directly into the personality make_request function, 322 * IO requests come here first so that we can check if the device is 323 * being suspended pending a reconfiguration. 324 * We hold a refcount over the call to ->make_request. By the time that 325 * call has finished, the bio has been linked into some internal structure 326 * and so is visible to ->quiesce(), so we don't need the refcount any more. 327 */ 328 static bool is_suspended(struct mddev *mddev, struct bio *bio) 329 { 330 if (mddev->suspended) 331 return true; 332 if (bio_data_dir(bio) != WRITE) 333 return false; 334 if (mddev->suspend_lo >= mddev->suspend_hi) 335 return false; 336 if (bio->bi_iter.bi_sector >= mddev->suspend_hi) 337 return false; 338 if (bio_end_sector(bio) < mddev->suspend_lo) 339 return false; 340 return true; 341 } 342 343 void md_handle_request(struct mddev *mddev, struct bio *bio) 344 { 345 check_suspended: 346 rcu_read_lock(); 347 if (is_suspended(mddev, bio)) { 348 DEFINE_WAIT(__wait); 349 for (;;) { 350 prepare_to_wait(&mddev->sb_wait, &__wait, 351 TASK_UNINTERRUPTIBLE); 352 if (!is_suspended(mddev, bio)) 353 break; 354 rcu_read_unlock(); 355 schedule(); 356 rcu_read_lock(); 357 } 358 finish_wait(&mddev->sb_wait, &__wait); 359 } 360 atomic_inc(&mddev->active_io); 361 rcu_read_unlock(); 362 363 if (!mddev->pers->make_request(mddev, bio)) { 364 atomic_dec(&mddev->active_io); 365 wake_up(&mddev->sb_wait); 366 goto check_suspended; 367 } 368 369 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 370 wake_up(&mddev->sb_wait); 371 } 372 EXPORT_SYMBOL(md_handle_request); 373 374 static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) 375 { 376 const int rw = bio_data_dir(bio); 377 const int sgrp = op_stat_group(bio_op(bio)); 378 struct mddev *mddev = q->queuedata; 379 unsigned int sectors; 380 381 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { 382 bio_io_error(bio); 383 return BLK_QC_T_NONE; 384 } 385 386 blk_queue_split(q, &bio); 387 388 if (mddev == NULL || mddev->pers == NULL) { 389 bio_io_error(bio); 390 return BLK_QC_T_NONE; 391 } 392 if (mddev->ro == 1 && unlikely(rw == WRITE)) { 393 if (bio_sectors(bio) != 0) 394 bio->bi_status = BLK_STS_IOERR; 395 bio_endio(bio); 396 return BLK_QC_T_NONE; 397 } 398 399 /* 400 * save the sectors now since our bio can 401 * go away inside make_request 402 */ 403 sectors = bio_sectors(bio); 404 /* bio could be mergeable after passing to underlayer */ 405 bio->bi_opf &= ~REQ_NOMERGE; 406 407 md_handle_request(mddev, bio); 408 409 part_stat_lock(); 410 part_stat_inc(&mddev->gendisk->part0, ios[sgrp]); 411 part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors); 412 part_stat_unlock(); 413 414 return BLK_QC_T_NONE; 415 } 416 417 /* mddev_suspend makes sure no new requests are submitted 418 * to the device, and that any requests that have been submitted 419 * are completely handled. 420 * Once mddev_detach() is called and completes, the module will be 421 * completely unused. 422 */ 423 void mddev_suspend(struct mddev *mddev) 424 { 425 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); 426 lockdep_assert_held(&mddev->reconfig_mutex); 427 if (mddev->suspended++) 428 return; 429 synchronize_rcu(); 430 wake_up(&mddev->sb_wait); 431 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags); 432 smp_mb__after_atomic(); 433 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 434 mddev->pers->quiesce(mddev, 1); 435 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); 436 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags)); 437 438 del_timer_sync(&mddev->safemode_timer); 439 } 440 EXPORT_SYMBOL_GPL(mddev_suspend); 441 442 void mddev_resume(struct mddev *mddev) 443 { 444 lockdep_assert_held(&mddev->reconfig_mutex); 445 if (--mddev->suspended) 446 return; 447 wake_up(&mddev->sb_wait); 448 mddev->pers->quiesce(mddev, 0); 449 450 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 451 md_wakeup_thread(mddev->thread); 452 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 453 } 454 EXPORT_SYMBOL_GPL(mddev_resume); 455 456 int mddev_congested(struct mddev *mddev, int bits) 457 { 458 struct md_personality *pers = mddev->pers; 459 int ret = 0; 460 461 rcu_read_lock(); 462 if (mddev->suspended) 463 ret = 1; 464 else if (pers && pers->congested) 465 ret = pers->congested(mddev, bits); 466 rcu_read_unlock(); 467 return ret; 468 } 469 EXPORT_SYMBOL_GPL(mddev_congested); 470 static int md_congested(void *data, int bits) 471 { 472 struct mddev *mddev = data; 473 return mddev_congested(mddev, bits); 474 } 475 476 /* 477 * Generic flush handling for md 478 */ 479 480 static void md_end_flush(struct bio *bio) 481 { 482 struct md_rdev *rdev = bio->bi_private; 483 struct mddev *mddev = rdev->mddev; 484 485 rdev_dec_pending(rdev, mddev); 486 487 if (atomic_dec_and_test(&mddev->flush_pending)) { 488 /* The pre-request flush has finished */ 489 queue_work(md_wq, &mddev->flush_work); 490 } 491 bio_put(bio); 492 } 493 494 static void md_submit_flush_data(struct work_struct *ws); 495 496 static void submit_flushes(struct work_struct *ws) 497 { 498 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 499 struct md_rdev *rdev; 500 501 mddev->start_flush = ktime_get_boottime(); 502 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 503 atomic_set(&mddev->flush_pending, 1); 504 rcu_read_lock(); 505 rdev_for_each_rcu(rdev, mddev) 506 if (rdev->raid_disk >= 0 && 507 !test_bit(Faulty, &rdev->flags)) { 508 /* Take two references, one is dropped 509 * when request finishes, one after 510 * we reclaim rcu_read_lock 511 */ 512 struct bio *bi; 513 atomic_inc(&rdev->nr_pending); 514 atomic_inc(&rdev->nr_pending); 515 rcu_read_unlock(); 516 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); 517 bi->bi_end_io = md_end_flush; 518 bi->bi_private = rdev; 519 bio_set_dev(bi, rdev->bdev); 520 bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 521 atomic_inc(&mddev->flush_pending); 522 submit_bio(bi); 523 rcu_read_lock(); 524 rdev_dec_pending(rdev, mddev); 525 } 526 rcu_read_unlock(); 527 if (atomic_dec_and_test(&mddev->flush_pending)) 528 queue_work(md_wq, &mddev->flush_work); 529 } 530 531 static void md_submit_flush_data(struct work_struct *ws) 532 { 533 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 534 struct bio *bio = mddev->flush_bio; 535 536 /* 537 * must reset flush_bio before calling into md_handle_request to avoid a 538 * deadlock, because other bios passed md_handle_request suspend check 539 * could wait for this and below md_handle_request could wait for those 540 * bios because of suspend check 541 */ 542 mddev->last_flush = mddev->start_flush; 543 mddev->flush_bio = NULL; 544 wake_up(&mddev->sb_wait); 545 546 if (bio->bi_iter.bi_size == 0) { 547 /* an empty barrier - all done */ 548 bio_endio(bio); 549 } else { 550 bio->bi_opf &= ~REQ_PREFLUSH; 551 md_handle_request(mddev, bio); 552 } 553 } 554 555 /* 556 * Manages consolidation of flushes and submitting any flushes needed for 557 * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is 558 * being finished in another context. Returns false if the flushing is 559 * complete but still needs the I/O portion of the bio to be processed. 560 */ 561 bool md_flush_request(struct mddev *mddev, struct bio *bio) 562 { 563 ktime_t start = ktime_get_boottime(); 564 spin_lock_irq(&mddev->lock); 565 wait_event_lock_irq(mddev->sb_wait, 566 !mddev->flush_bio || 567 ktime_after(mddev->last_flush, start), 568 mddev->lock); 569 if (!ktime_after(mddev->last_flush, start)) { 570 WARN_ON(mddev->flush_bio); 571 mddev->flush_bio = bio; 572 bio = NULL; 573 } 574 spin_unlock_irq(&mddev->lock); 575 576 if (!bio) { 577 INIT_WORK(&mddev->flush_work, submit_flushes); 578 queue_work(md_wq, &mddev->flush_work); 579 } else { 580 /* flush was performed for some other bio while we waited. */ 581 if (bio->bi_iter.bi_size == 0) 582 /* an empty barrier - all done */ 583 bio_endio(bio); 584 else { 585 bio->bi_opf &= ~REQ_PREFLUSH; 586 return false; 587 } 588 } 589 return true; 590 } 591 EXPORT_SYMBOL(md_flush_request); 592 593 static inline struct mddev *mddev_get(struct mddev *mddev) 594 { 595 atomic_inc(&mddev->active); 596 return mddev; 597 } 598 599 static void mddev_delayed_delete(struct work_struct *ws); 600 601 static void mddev_put(struct mddev *mddev) 602 { 603 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 604 return; 605 if (!mddev->raid_disks && list_empty(&mddev->disks) && 606 mddev->ctime == 0 && !mddev->hold_active) { 607 /* Array is not configured at all, and not held active, 608 * so destroy it */ 609 list_del_init(&mddev->all_mddevs); 610 611 /* 612 * Call queue_work inside the spinlock so that 613 * flush_workqueue() after mddev_find will succeed in waiting 614 * for the work to be done. 615 */ 616 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 617 queue_work(md_misc_wq, &mddev->del_work); 618 } 619 spin_unlock(&all_mddevs_lock); 620 } 621 622 static void md_safemode_timeout(struct timer_list *t); 623 624 void mddev_init(struct mddev *mddev) 625 { 626 kobject_init(&mddev->kobj, &md_ktype); 627 mutex_init(&mddev->open_mutex); 628 mutex_init(&mddev->reconfig_mutex); 629 mutex_init(&mddev->bitmap_info.mutex); 630 INIT_LIST_HEAD(&mddev->disks); 631 INIT_LIST_HEAD(&mddev->all_mddevs); 632 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); 633 atomic_set(&mddev->active, 1); 634 atomic_set(&mddev->openers, 0); 635 atomic_set(&mddev->active_io, 0); 636 spin_lock_init(&mddev->lock); 637 atomic_set(&mddev->flush_pending, 0); 638 init_waitqueue_head(&mddev->sb_wait); 639 init_waitqueue_head(&mddev->recovery_wait); 640 mddev->reshape_position = MaxSector; 641 mddev->reshape_backwards = 0; 642 mddev->last_sync_action = "none"; 643 mddev->resync_min = 0; 644 mddev->resync_max = MaxSector; 645 mddev->level = LEVEL_NONE; 646 } 647 EXPORT_SYMBOL_GPL(mddev_init); 648 649 static struct mddev *mddev_find(dev_t unit) 650 { 651 struct mddev *mddev, *new = NULL; 652 653 if (unit && MAJOR(unit) != MD_MAJOR) 654 unit &= ~((1<<MdpMinorShift)-1); 655 656 retry: 657 spin_lock(&all_mddevs_lock); 658 659 if (unit) { 660 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 661 if (mddev->unit == unit) { 662 mddev_get(mddev); 663 spin_unlock(&all_mddevs_lock); 664 kfree(new); 665 return mddev; 666 } 667 668 if (new) { 669 list_add(&new->all_mddevs, &all_mddevs); 670 spin_unlock(&all_mddevs_lock); 671 new->hold_active = UNTIL_IOCTL; 672 return new; 673 } 674 } else if (new) { 675 /* find an unused unit number */ 676 static int next_minor = 512; 677 int start = next_minor; 678 int is_free = 0; 679 int dev = 0; 680 while (!is_free) { 681 dev = MKDEV(MD_MAJOR, next_minor); 682 next_minor++; 683 if (next_minor > MINORMASK) 684 next_minor = 0; 685 if (next_minor == start) { 686 /* Oh dear, all in use. */ 687 spin_unlock(&all_mddevs_lock); 688 kfree(new); 689 return NULL; 690 } 691 692 is_free = 1; 693 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 694 if (mddev->unit == dev) { 695 is_free = 0; 696 break; 697 } 698 } 699 new->unit = dev; 700 new->md_minor = MINOR(dev); 701 new->hold_active = UNTIL_STOP; 702 list_add(&new->all_mddevs, &all_mddevs); 703 spin_unlock(&all_mddevs_lock); 704 return new; 705 } 706 spin_unlock(&all_mddevs_lock); 707 708 new = kzalloc(sizeof(*new), GFP_KERNEL); 709 if (!new) 710 return NULL; 711 712 new->unit = unit; 713 if (MAJOR(unit) == MD_MAJOR) 714 new->md_minor = MINOR(unit); 715 else 716 new->md_minor = MINOR(unit) >> MdpMinorShift; 717 718 mddev_init(new); 719 720 goto retry; 721 } 722 723 static struct attribute_group md_redundancy_group; 724 725 void mddev_unlock(struct mddev *mddev) 726 { 727 if (mddev->to_remove) { 728 /* These cannot be removed under reconfig_mutex as 729 * an access to the files will try to take reconfig_mutex 730 * while holding the file unremovable, which leads to 731 * a deadlock. 732 * So hold set sysfs_active while the remove in happeing, 733 * and anything else which might set ->to_remove or my 734 * otherwise change the sysfs namespace will fail with 735 * -EBUSY if sysfs_active is still set. 736 * We set sysfs_active under reconfig_mutex and elsewhere 737 * test it under the same mutex to ensure its correct value 738 * is seen. 739 */ 740 struct attribute_group *to_remove = mddev->to_remove; 741 mddev->to_remove = NULL; 742 mddev->sysfs_active = 1; 743 mutex_unlock(&mddev->reconfig_mutex); 744 745 if (mddev->kobj.sd) { 746 if (to_remove != &md_redundancy_group) 747 sysfs_remove_group(&mddev->kobj, to_remove); 748 if (mddev->pers == NULL || 749 mddev->pers->sync_request == NULL) { 750 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 751 if (mddev->sysfs_action) 752 sysfs_put(mddev->sysfs_action); 753 mddev->sysfs_action = NULL; 754 } 755 } 756 mddev->sysfs_active = 0; 757 } else 758 mutex_unlock(&mddev->reconfig_mutex); 759 760 /* As we've dropped the mutex we need a spinlock to 761 * make sure the thread doesn't disappear 762 */ 763 spin_lock(&pers_lock); 764 md_wakeup_thread(mddev->thread); 765 wake_up(&mddev->sb_wait); 766 spin_unlock(&pers_lock); 767 } 768 EXPORT_SYMBOL_GPL(mddev_unlock); 769 770 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) 771 { 772 struct md_rdev *rdev; 773 774 rdev_for_each_rcu(rdev, mddev) 775 if (rdev->desc_nr == nr) 776 return rdev; 777 778 return NULL; 779 } 780 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu); 781 782 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) 783 { 784 struct md_rdev *rdev; 785 786 rdev_for_each(rdev, mddev) 787 if (rdev->bdev->bd_dev == dev) 788 return rdev; 789 790 return NULL; 791 } 792 793 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev) 794 { 795 struct md_rdev *rdev; 796 797 rdev_for_each_rcu(rdev, mddev) 798 if (rdev->bdev->bd_dev == dev) 799 return rdev; 800 801 return NULL; 802 } 803 EXPORT_SYMBOL_GPL(md_find_rdev_rcu); 804 805 static struct md_personality *find_pers(int level, char *clevel) 806 { 807 struct md_personality *pers; 808 list_for_each_entry(pers, &pers_list, list) { 809 if (level != LEVEL_NONE && pers->level == level) 810 return pers; 811 if (strcmp(pers->name, clevel)==0) 812 return pers; 813 } 814 return NULL; 815 } 816 817 /* return the offset of the super block in 512byte sectors */ 818 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) 819 { 820 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; 821 return MD_NEW_SIZE_SECTORS(num_sectors); 822 } 823 824 static int alloc_disk_sb(struct md_rdev *rdev) 825 { 826 rdev->sb_page = alloc_page(GFP_KERNEL); 827 if (!rdev->sb_page) 828 return -ENOMEM; 829 return 0; 830 } 831 832 void md_rdev_clear(struct md_rdev *rdev) 833 { 834 if (rdev->sb_page) { 835 put_page(rdev->sb_page); 836 rdev->sb_loaded = 0; 837 rdev->sb_page = NULL; 838 rdev->sb_start = 0; 839 rdev->sectors = 0; 840 } 841 if (rdev->bb_page) { 842 put_page(rdev->bb_page); 843 rdev->bb_page = NULL; 844 } 845 badblocks_exit(&rdev->badblocks); 846 } 847 EXPORT_SYMBOL_GPL(md_rdev_clear); 848 849 static void super_written(struct bio *bio) 850 { 851 struct md_rdev *rdev = bio->bi_private; 852 struct mddev *mddev = rdev->mddev; 853 854 if (bio->bi_status) { 855 pr_err("md: super_written gets error=%d\n", bio->bi_status); 856 md_error(mddev, rdev); 857 if (!test_bit(Faulty, &rdev->flags) 858 && (bio->bi_opf & MD_FAILFAST)) { 859 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); 860 set_bit(LastDev, &rdev->flags); 861 } 862 } else 863 clear_bit(LastDev, &rdev->flags); 864 865 if (atomic_dec_and_test(&mddev->pending_writes)) 866 wake_up(&mddev->sb_wait); 867 rdev_dec_pending(rdev, mddev); 868 bio_put(bio); 869 } 870 871 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, 872 sector_t sector, int size, struct page *page) 873 { 874 /* write first size bytes of page to sector of rdev 875 * Increment mddev->pending_writes before returning 876 * and decrement it on completion, waking up sb_wait 877 * if zero is reached. 878 * If an error occurred, call md_error 879 */ 880 struct bio *bio; 881 int ff = 0; 882 883 if (!page) 884 return; 885 886 if (test_bit(Faulty, &rdev->flags)) 887 return; 888 889 bio = md_bio_alloc_sync(mddev); 890 891 atomic_inc(&rdev->nr_pending); 892 893 bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev); 894 bio->bi_iter.bi_sector = sector; 895 bio_add_page(bio, page, size, 0); 896 bio->bi_private = rdev; 897 bio->bi_end_io = super_written; 898 899 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && 900 test_bit(FailFast, &rdev->flags) && 901 !test_bit(LastDev, &rdev->flags)) 902 ff = MD_FAILFAST; 903 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff; 904 905 atomic_inc(&mddev->pending_writes); 906 submit_bio(bio); 907 } 908 909 int md_super_wait(struct mddev *mddev) 910 { 911 /* wait for all superblock writes that were scheduled to complete */ 912 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); 913 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) 914 return -EAGAIN; 915 return 0; 916 } 917 918 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 919 struct page *page, int op, int op_flags, bool metadata_op) 920 { 921 struct bio *bio = md_bio_alloc_sync(rdev->mddev); 922 int ret; 923 924 if (metadata_op && rdev->meta_bdev) 925 bio_set_dev(bio, rdev->meta_bdev); 926 else 927 bio_set_dev(bio, rdev->bdev); 928 bio_set_op_attrs(bio, op, op_flags); 929 if (metadata_op) 930 bio->bi_iter.bi_sector = sector + rdev->sb_start; 931 else if (rdev->mddev->reshape_position != MaxSector && 932 (rdev->mddev->reshape_backwards == 933 (sector >= rdev->mddev->reshape_position))) 934 bio->bi_iter.bi_sector = sector + rdev->new_data_offset; 935 else 936 bio->bi_iter.bi_sector = sector + rdev->data_offset; 937 bio_add_page(bio, page, size, 0); 938 939 submit_bio_wait(bio); 940 941 ret = !bio->bi_status; 942 bio_put(bio); 943 return ret; 944 } 945 EXPORT_SYMBOL_GPL(sync_page_io); 946 947 static int read_disk_sb(struct md_rdev *rdev, int size) 948 { 949 char b[BDEVNAME_SIZE]; 950 951 if (rdev->sb_loaded) 952 return 0; 953 954 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) 955 goto fail; 956 rdev->sb_loaded = 1; 957 return 0; 958 959 fail: 960 pr_err("md: disabled device %s, could not read superblock.\n", 961 bdevname(rdev->bdev,b)); 962 return -EINVAL; 963 } 964 965 static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 966 { 967 return sb1->set_uuid0 == sb2->set_uuid0 && 968 sb1->set_uuid1 == sb2->set_uuid1 && 969 sb1->set_uuid2 == sb2->set_uuid2 && 970 sb1->set_uuid3 == sb2->set_uuid3; 971 } 972 973 static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 974 { 975 int ret; 976 mdp_super_t *tmp1, *tmp2; 977 978 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 979 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 980 981 if (!tmp1 || !tmp2) { 982 ret = 0; 983 goto abort; 984 } 985 986 *tmp1 = *sb1; 987 *tmp2 = *sb2; 988 989 /* 990 * nr_disks is not constant 991 */ 992 tmp1->nr_disks = 0; 993 tmp2->nr_disks = 0; 994 995 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 996 abort: 997 kfree(tmp1); 998 kfree(tmp2); 999 return ret; 1000 } 1001 1002 static u32 md_csum_fold(u32 csum) 1003 { 1004 csum = (csum & 0xffff) + (csum >> 16); 1005 return (csum & 0xffff) + (csum >> 16); 1006 } 1007 1008 static unsigned int calc_sb_csum(mdp_super_t *sb) 1009 { 1010 u64 newcsum = 0; 1011 u32 *sb32 = (u32*)sb; 1012 int i; 1013 unsigned int disk_csum, csum; 1014 1015 disk_csum = sb->sb_csum; 1016 sb->sb_csum = 0; 1017 1018 for (i = 0; i < MD_SB_BYTES/4 ; i++) 1019 newcsum += sb32[i]; 1020 csum = (newcsum & 0xffffffff) + (newcsum>>32); 1021 1022 #ifdef CONFIG_ALPHA 1023 /* This used to use csum_partial, which was wrong for several 1024 * reasons including that different results are returned on 1025 * different architectures. It isn't critical that we get exactly 1026 * the same return value as before (we always csum_fold before 1027 * testing, and that removes any differences). However as we 1028 * know that csum_partial always returned a 16bit value on 1029 * alphas, do a fold to maximise conformity to previous behaviour. 1030 */ 1031 sb->sb_csum = md_csum_fold(disk_csum); 1032 #else 1033 sb->sb_csum = disk_csum; 1034 #endif 1035 return csum; 1036 } 1037 1038 /* 1039 * Handle superblock details. 1040 * We want to be able to handle multiple superblock formats 1041 * so we have a common interface to them all, and an array of 1042 * different handlers. 1043 * We rely on user-space to write the initial superblock, and support 1044 * reading and updating of superblocks. 1045 * Interface methods are: 1046 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version) 1047 * loads and validates a superblock on dev. 1048 * if refdev != NULL, compare superblocks on both devices 1049 * Return: 1050 * 0 - dev has a superblock that is compatible with refdev 1051 * 1 - dev has a superblock that is compatible and newer than refdev 1052 * so dev should be used as the refdev in future 1053 * -EINVAL superblock incompatible or invalid 1054 * -othererror e.g. -EIO 1055 * 1056 * int validate_super(struct mddev *mddev, struct md_rdev *dev) 1057 * Verify that dev is acceptable into mddev. 1058 * The first time, mddev->raid_disks will be 0, and data from 1059 * dev should be merged in. Subsequent calls check that dev 1060 * is new enough. Return 0 or -EINVAL 1061 * 1062 * void sync_super(struct mddev *mddev, struct md_rdev *dev) 1063 * Update the superblock for rdev with data in mddev 1064 * This does not write to disc. 1065 * 1066 */ 1067 1068 struct super_type { 1069 char *name; 1070 struct module *owner; 1071 int (*load_super)(struct md_rdev *rdev, 1072 struct md_rdev *refdev, 1073 int minor_version); 1074 int (*validate_super)(struct mddev *mddev, 1075 struct md_rdev *rdev); 1076 void (*sync_super)(struct mddev *mddev, 1077 struct md_rdev *rdev); 1078 unsigned long long (*rdev_size_change)(struct md_rdev *rdev, 1079 sector_t num_sectors); 1080 int (*allow_new_offset)(struct md_rdev *rdev, 1081 unsigned long long new_offset); 1082 }; 1083 1084 /* 1085 * Check that the given mddev has no bitmap. 1086 * 1087 * This function is called from the run method of all personalities that do not 1088 * support bitmaps. It prints an error message and returns non-zero if mddev 1089 * has a bitmap. Otherwise, it returns 0. 1090 * 1091 */ 1092 int md_check_no_bitmap(struct mddev *mddev) 1093 { 1094 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 1095 return 0; 1096 pr_warn("%s: bitmaps are not supported for %s\n", 1097 mdname(mddev), mddev->pers->name); 1098 return 1; 1099 } 1100 EXPORT_SYMBOL(md_check_no_bitmap); 1101 1102 /* 1103 * load_super for 0.90.0 1104 */ 1105 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1106 { 1107 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1108 mdp_super_t *sb; 1109 int ret; 1110 bool spare_disk = true; 1111 1112 /* 1113 * Calculate the position of the superblock (512byte sectors), 1114 * it's at the end of the disk. 1115 * 1116 * It also happens to be a multiple of 4Kb. 1117 */ 1118 rdev->sb_start = calc_dev_sboffset(rdev); 1119 1120 ret = read_disk_sb(rdev, MD_SB_BYTES); 1121 if (ret) 1122 return ret; 1123 1124 ret = -EINVAL; 1125 1126 bdevname(rdev->bdev, b); 1127 sb = page_address(rdev->sb_page); 1128 1129 if (sb->md_magic != MD_SB_MAGIC) { 1130 pr_warn("md: invalid raid superblock magic on %s\n", b); 1131 goto abort; 1132 } 1133 1134 if (sb->major_version != 0 || 1135 sb->minor_version < 90 || 1136 sb->minor_version > 91) { 1137 pr_warn("Bad version number %d.%d on %s\n", 1138 sb->major_version, sb->minor_version, b); 1139 goto abort; 1140 } 1141 1142 if (sb->raid_disks <= 0) 1143 goto abort; 1144 1145 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 1146 pr_warn("md: invalid superblock checksum on %s\n", b); 1147 goto abort; 1148 } 1149 1150 rdev->preferred_minor = sb->md_minor; 1151 rdev->data_offset = 0; 1152 rdev->new_data_offset = 0; 1153 rdev->sb_size = MD_SB_BYTES; 1154 rdev->badblocks.shift = -1; 1155 1156 if (sb->level == LEVEL_MULTIPATH) 1157 rdev->desc_nr = -1; 1158 else 1159 rdev->desc_nr = sb->this_disk.number; 1160 1161 /* not spare disk, or LEVEL_MULTIPATH */ 1162 if (sb->level == LEVEL_MULTIPATH || 1163 (rdev->desc_nr >= 0 && 1164 rdev->desc_nr < MD_SB_DISKS && 1165 sb->disks[rdev->desc_nr].state & 1166 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))) 1167 spare_disk = false; 1168 1169 if (!refdev) { 1170 if (!spare_disk) 1171 ret = 1; 1172 else 1173 ret = 0; 1174 } else { 1175 __u64 ev1, ev2; 1176 mdp_super_t *refsb = page_address(refdev->sb_page); 1177 if (!md_uuid_equal(refsb, sb)) { 1178 pr_warn("md: %s has different UUID to %s\n", 1179 b, bdevname(refdev->bdev,b2)); 1180 goto abort; 1181 } 1182 if (!md_sb_equal(refsb, sb)) { 1183 pr_warn("md: %s has same UUID but different superblock to %s\n", 1184 b, bdevname(refdev->bdev, b2)); 1185 goto abort; 1186 } 1187 ev1 = md_event(sb); 1188 ev2 = md_event(refsb); 1189 1190 if (!spare_disk && ev1 > ev2) 1191 ret = 1; 1192 else 1193 ret = 0; 1194 } 1195 rdev->sectors = rdev->sb_start; 1196 /* Limit to 4TB as metadata cannot record more than that. 1197 * (not needed for Linear and RAID0 as metadata doesn't 1198 * record this size) 1199 */ 1200 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1) 1201 rdev->sectors = (sector_t)(2ULL << 32) - 2; 1202 1203 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) 1204 /* "this cannot possibly happen" ... */ 1205 ret = -EINVAL; 1206 1207 abort: 1208 return ret; 1209 } 1210 1211 /* 1212 * validate_super for 0.90.0 1213 */ 1214 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) 1215 { 1216 mdp_disk_t *desc; 1217 mdp_super_t *sb = page_address(rdev->sb_page); 1218 __u64 ev1 = md_event(sb); 1219 1220 rdev->raid_disk = -1; 1221 clear_bit(Faulty, &rdev->flags); 1222 clear_bit(In_sync, &rdev->flags); 1223 clear_bit(Bitmap_sync, &rdev->flags); 1224 clear_bit(WriteMostly, &rdev->flags); 1225 1226 if (mddev->raid_disks == 0) { 1227 mddev->major_version = 0; 1228 mddev->minor_version = sb->minor_version; 1229 mddev->patch_version = sb->patch_version; 1230 mddev->external = 0; 1231 mddev->chunk_sectors = sb->chunk_size >> 9; 1232 mddev->ctime = sb->ctime; 1233 mddev->utime = sb->utime; 1234 mddev->level = sb->level; 1235 mddev->clevel[0] = 0; 1236 mddev->layout = sb->layout; 1237 mddev->raid_disks = sb->raid_disks; 1238 mddev->dev_sectors = ((sector_t)sb->size) * 2; 1239 mddev->events = ev1; 1240 mddev->bitmap_info.offset = 0; 1241 mddev->bitmap_info.space = 0; 1242 /* bitmap can use 60 K after the 4K superblocks */ 1243 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1244 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 1245 mddev->reshape_backwards = 0; 1246 1247 if (mddev->minor_version >= 91) { 1248 mddev->reshape_position = sb->reshape_position; 1249 mddev->delta_disks = sb->delta_disks; 1250 mddev->new_level = sb->new_level; 1251 mddev->new_layout = sb->new_layout; 1252 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1253 if (mddev->delta_disks < 0) 1254 mddev->reshape_backwards = 1; 1255 } else { 1256 mddev->reshape_position = MaxSector; 1257 mddev->delta_disks = 0; 1258 mddev->new_level = mddev->level; 1259 mddev->new_layout = mddev->layout; 1260 mddev->new_chunk_sectors = mddev->chunk_sectors; 1261 } 1262 if (mddev->level == 0) 1263 mddev->layout = -1; 1264 1265 if (sb->state & (1<<MD_SB_CLEAN)) 1266 mddev->recovery_cp = MaxSector; 1267 else { 1268 if (sb->events_hi == sb->cp_events_hi && 1269 sb->events_lo == sb->cp_events_lo) { 1270 mddev->recovery_cp = sb->recovery_cp; 1271 } else 1272 mddev->recovery_cp = 0; 1273 } 1274 1275 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1276 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1277 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1278 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1279 1280 mddev->max_disks = MD_SB_DISKS; 1281 1282 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1283 mddev->bitmap_info.file == NULL) { 1284 mddev->bitmap_info.offset = 1285 mddev->bitmap_info.default_offset; 1286 mddev->bitmap_info.space = 1287 mddev->bitmap_info.default_space; 1288 } 1289 1290 } else if (mddev->pers == NULL) { 1291 /* Insist on good event counter while assembling, except 1292 * for spares (which don't need an event count) */ 1293 ++ev1; 1294 if (sb->disks[rdev->desc_nr].state & ( 1295 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1296 if (ev1 < mddev->events) 1297 return -EINVAL; 1298 } else if (mddev->bitmap) { 1299 /* if adding to array with a bitmap, then we can accept an 1300 * older device ... but not too old. 1301 */ 1302 if (ev1 < mddev->bitmap->events_cleared) 1303 return 0; 1304 if (ev1 < mddev->events) 1305 set_bit(Bitmap_sync, &rdev->flags); 1306 } else { 1307 if (ev1 < mddev->events) 1308 /* just a hot-add of a new device, leave raid_disk at -1 */ 1309 return 0; 1310 } 1311 1312 if (mddev->level != LEVEL_MULTIPATH) { 1313 desc = sb->disks + rdev->desc_nr; 1314 1315 if (desc->state & (1<<MD_DISK_FAULTY)) 1316 set_bit(Faulty, &rdev->flags); 1317 else if (desc->state & (1<<MD_DISK_SYNC) /* && 1318 desc->raid_disk < mddev->raid_disks */) { 1319 set_bit(In_sync, &rdev->flags); 1320 rdev->raid_disk = desc->raid_disk; 1321 rdev->saved_raid_disk = desc->raid_disk; 1322 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1323 /* active but not in sync implies recovery up to 1324 * reshape position. We don't know exactly where 1325 * that is, so set to zero for now */ 1326 if (mddev->minor_version >= 91) { 1327 rdev->recovery_offset = 0; 1328 rdev->raid_disk = desc->raid_disk; 1329 } 1330 } 1331 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1332 set_bit(WriteMostly, &rdev->flags); 1333 if (desc->state & (1<<MD_DISK_FAILFAST)) 1334 set_bit(FailFast, &rdev->flags); 1335 } else /* MULTIPATH are always insync */ 1336 set_bit(In_sync, &rdev->flags); 1337 return 0; 1338 } 1339 1340 /* 1341 * sync_super for 0.90.0 1342 */ 1343 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) 1344 { 1345 mdp_super_t *sb; 1346 struct md_rdev *rdev2; 1347 int next_spare = mddev->raid_disks; 1348 1349 /* make rdev->sb match mddev data.. 1350 * 1351 * 1/ zero out disks 1352 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1353 * 3/ any empty disks < next_spare become removed 1354 * 1355 * disks[0] gets initialised to REMOVED because 1356 * we cannot be sure from other fields if it has 1357 * been initialised or not. 1358 */ 1359 int i; 1360 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1361 1362 rdev->sb_size = MD_SB_BYTES; 1363 1364 sb = page_address(rdev->sb_page); 1365 1366 memset(sb, 0, sizeof(*sb)); 1367 1368 sb->md_magic = MD_SB_MAGIC; 1369 sb->major_version = mddev->major_version; 1370 sb->patch_version = mddev->patch_version; 1371 sb->gvalid_words = 0; /* ignored */ 1372 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1373 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1374 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1375 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1376 1377 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); 1378 sb->level = mddev->level; 1379 sb->size = mddev->dev_sectors / 2; 1380 sb->raid_disks = mddev->raid_disks; 1381 sb->md_minor = mddev->md_minor; 1382 sb->not_persistent = 0; 1383 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); 1384 sb->state = 0; 1385 sb->events_hi = (mddev->events>>32); 1386 sb->events_lo = (u32)mddev->events; 1387 1388 if (mddev->reshape_position == MaxSector) 1389 sb->minor_version = 90; 1390 else { 1391 sb->minor_version = 91; 1392 sb->reshape_position = mddev->reshape_position; 1393 sb->new_level = mddev->new_level; 1394 sb->delta_disks = mddev->delta_disks; 1395 sb->new_layout = mddev->new_layout; 1396 sb->new_chunk = mddev->new_chunk_sectors << 9; 1397 } 1398 mddev->minor_version = sb->minor_version; 1399 if (mddev->in_sync) 1400 { 1401 sb->recovery_cp = mddev->recovery_cp; 1402 sb->cp_events_hi = (mddev->events>>32); 1403 sb->cp_events_lo = (u32)mddev->events; 1404 if (mddev->recovery_cp == MaxSector) 1405 sb->state = (1<< MD_SB_CLEAN); 1406 } else 1407 sb->recovery_cp = 0; 1408 1409 sb->layout = mddev->layout; 1410 sb->chunk_size = mddev->chunk_sectors << 9; 1411 1412 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1413 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1414 1415 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1416 rdev_for_each(rdev2, mddev) { 1417 mdp_disk_t *d; 1418 int desc_nr; 1419 int is_active = test_bit(In_sync, &rdev2->flags); 1420 1421 if (rdev2->raid_disk >= 0 && 1422 sb->minor_version >= 91) 1423 /* we have nowhere to store the recovery_offset, 1424 * but if it is not below the reshape_position, 1425 * we can piggy-back on that. 1426 */ 1427 is_active = 1; 1428 if (rdev2->raid_disk < 0 || 1429 test_bit(Faulty, &rdev2->flags)) 1430 is_active = 0; 1431 if (is_active) 1432 desc_nr = rdev2->raid_disk; 1433 else 1434 desc_nr = next_spare++; 1435 rdev2->desc_nr = desc_nr; 1436 d = &sb->disks[rdev2->desc_nr]; 1437 nr_disks++; 1438 d->number = rdev2->desc_nr; 1439 d->major = MAJOR(rdev2->bdev->bd_dev); 1440 d->minor = MINOR(rdev2->bdev->bd_dev); 1441 if (is_active) 1442 d->raid_disk = rdev2->raid_disk; 1443 else 1444 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1445 if (test_bit(Faulty, &rdev2->flags)) 1446 d->state = (1<<MD_DISK_FAULTY); 1447 else if (is_active) { 1448 d->state = (1<<MD_DISK_ACTIVE); 1449 if (test_bit(In_sync, &rdev2->flags)) 1450 d->state |= (1<<MD_DISK_SYNC); 1451 active++; 1452 working++; 1453 } else { 1454 d->state = 0; 1455 spare++; 1456 working++; 1457 } 1458 if (test_bit(WriteMostly, &rdev2->flags)) 1459 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1460 if (test_bit(FailFast, &rdev2->flags)) 1461 d->state |= (1<<MD_DISK_FAILFAST); 1462 } 1463 /* now set the "removed" and "faulty" bits on any missing devices */ 1464 for (i=0 ; i < mddev->raid_disks ; i++) { 1465 mdp_disk_t *d = &sb->disks[i]; 1466 if (d->state == 0 && d->number == 0) { 1467 d->number = i; 1468 d->raid_disk = i; 1469 d->state = (1<<MD_DISK_REMOVED); 1470 d->state |= (1<<MD_DISK_FAULTY); 1471 failed++; 1472 } 1473 } 1474 sb->nr_disks = nr_disks; 1475 sb->active_disks = active; 1476 sb->working_disks = working; 1477 sb->failed_disks = failed; 1478 sb->spare_disks = spare; 1479 1480 sb->this_disk = sb->disks[rdev->desc_nr]; 1481 sb->sb_csum = calc_sb_csum(sb); 1482 } 1483 1484 /* 1485 * rdev_size_change for 0.90.0 1486 */ 1487 static unsigned long long 1488 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1489 { 1490 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1491 return 0; /* component must fit device */ 1492 if (rdev->mddev->bitmap_info.offset) 1493 return 0; /* can't move bitmap */ 1494 rdev->sb_start = calc_dev_sboffset(rdev); 1495 if (!num_sectors || num_sectors > rdev->sb_start) 1496 num_sectors = rdev->sb_start; 1497 /* Limit to 4TB as metadata cannot record more than that. 1498 * 4TB == 2^32 KB, or 2*2^32 sectors. 1499 */ 1500 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) 1501 num_sectors = (sector_t)(2ULL << 32) - 2; 1502 do { 1503 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1504 rdev->sb_page); 1505 } while (md_super_wait(rdev->mddev) < 0); 1506 return num_sectors; 1507 } 1508 1509 static int 1510 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) 1511 { 1512 /* non-zero offset changes not possible with v0.90 */ 1513 return new_offset == 0; 1514 } 1515 1516 /* 1517 * version 1 superblock 1518 */ 1519 1520 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb) 1521 { 1522 __le32 disk_csum; 1523 u32 csum; 1524 unsigned long long newcsum; 1525 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1526 __le32 *isuper = (__le32*)sb; 1527 1528 disk_csum = sb->sb_csum; 1529 sb->sb_csum = 0; 1530 newcsum = 0; 1531 for (; size >= 4; size -= 4) 1532 newcsum += le32_to_cpu(*isuper++); 1533 1534 if (size == 2) 1535 newcsum += le16_to_cpu(*(__le16*) isuper); 1536 1537 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1538 sb->sb_csum = disk_csum; 1539 return cpu_to_le32(csum); 1540 } 1541 1542 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1543 { 1544 struct mdp_superblock_1 *sb; 1545 int ret; 1546 sector_t sb_start; 1547 sector_t sectors; 1548 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1549 int bmask; 1550 bool spare_disk = true; 1551 1552 /* 1553 * Calculate the position of the superblock in 512byte sectors. 1554 * It is always aligned to a 4K boundary and 1555 * depeding on minor_version, it can be: 1556 * 0: At least 8K, but less than 12K, from end of device 1557 * 1: At start of device 1558 * 2: 4K from start of device. 1559 */ 1560 switch(minor_version) { 1561 case 0: 1562 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; 1563 sb_start -= 8*2; 1564 sb_start &= ~(sector_t)(4*2-1); 1565 break; 1566 case 1: 1567 sb_start = 0; 1568 break; 1569 case 2: 1570 sb_start = 8; 1571 break; 1572 default: 1573 return -EINVAL; 1574 } 1575 rdev->sb_start = sb_start; 1576 1577 /* superblock is rarely larger than 1K, but it can be larger, 1578 * and it is safe to read 4k, so we do that 1579 */ 1580 ret = read_disk_sb(rdev, 4096); 1581 if (ret) return ret; 1582 1583 sb = page_address(rdev->sb_page); 1584 1585 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1586 sb->major_version != cpu_to_le32(1) || 1587 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1588 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1589 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1590 return -EINVAL; 1591 1592 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1593 pr_warn("md: invalid superblock checksum on %s\n", 1594 bdevname(rdev->bdev,b)); 1595 return -EINVAL; 1596 } 1597 if (le64_to_cpu(sb->data_size) < 10) { 1598 pr_warn("md: data_size too small on %s\n", 1599 bdevname(rdev->bdev,b)); 1600 return -EINVAL; 1601 } 1602 if (sb->pad0 || 1603 sb->pad3[0] || 1604 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) 1605 /* Some padding is non-zero, might be a new feature */ 1606 return -EINVAL; 1607 1608 rdev->preferred_minor = 0xffff; 1609 rdev->data_offset = le64_to_cpu(sb->data_offset); 1610 rdev->new_data_offset = rdev->data_offset; 1611 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) && 1612 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) 1613 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); 1614 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1615 1616 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1617 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1618 if (rdev->sb_size & bmask) 1619 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1620 1621 if (minor_version 1622 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1623 return -EINVAL; 1624 if (minor_version 1625 && rdev->new_data_offset < sb_start + (rdev->sb_size/512)) 1626 return -EINVAL; 1627 1628 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1629 rdev->desc_nr = -1; 1630 else 1631 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1632 1633 if (!rdev->bb_page) { 1634 rdev->bb_page = alloc_page(GFP_KERNEL); 1635 if (!rdev->bb_page) 1636 return -ENOMEM; 1637 } 1638 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && 1639 rdev->badblocks.count == 0) { 1640 /* need to load the bad block list. 1641 * Currently we limit it to one page. 1642 */ 1643 s32 offset; 1644 sector_t bb_sector; 1645 __le64 *bbp; 1646 int i; 1647 int sectors = le16_to_cpu(sb->bblog_size); 1648 if (sectors > (PAGE_SIZE / 512)) 1649 return -EINVAL; 1650 offset = le32_to_cpu(sb->bblog_offset); 1651 if (offset == 0) 1652 return -EINVAL; 1653 bb_sector = (long long)offset; 1654 if (!sync_page_io(rdev, bb_sector, sectors << 9, 1655 rdev->bb_page, REQ_OP_READ, 0, true)) 1656 return -EIO; 1657 bbp = (__le64 *)page_address(rdev->bb_page); 1658 rdev->badblocks.shift = sb->bblog_shift; 1659 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { 1660 u64 bb = le64_to_cpu(*bbp); 1661 int count = bb & (0x3ff); 1662 u64 sector = bb >> 10; 1663 sector <<= sb->bblog_shift; 1664 count <<= sb->bblog_shift; 1665 if (bb + 1 == 0) 1666 break; 1667 if (badblocks_set(&rdev->badblocks, sector, count, 1)) 1668 return -EINVAL; 1669 } 1670 } else if (sb->bblog_offset != 0) 1671 rdev->badblocks.shift = 0; 1672 1673 if ((le32_to_cpu(sb->feature_map) & 1674 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) { 1675 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset); 1676 rdev->ppl.size = le16_to_cpu(sb->ppl.size); 1677 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset; 1678 } 1679 1680 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) && 1681 sb->level != 0) 1682 return -EINVAL; 1683 1684 /* not spare disk, or LEVEL_MULTIPATH */ 1685 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) || 1686 (rdev->desc_nr >= 0 && 1687 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1688 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || 1689 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))) 1690 spare_disk = false; 1691 1692 if (!refdev) { 1693 if (!spare_disk) 1694 ret = 1; 1695 else 1696 ret = 0; 1697 } else { 1698 __u64 ev1, ev2; 1699 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); 1700 1701 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1702 sb->level != refsb->level || 1703 sb->layout != refsb->layout || 1704 sb->chunksize != refsb->chunksize) { 1705 pr_warn("md: %s has strangely different superblock to %s\n", 1706 bdevname(rdev->bdev,b), 1707 bdevname(refdev->bdev,b2)); 1708 return -EINVAL; 1709 } 1710 ev1 = le64_to_cpu(sb->events); 1711 ev2 = le64_to_cpu(refsb->events); 1712 1713 if (!spare_disk && ev1 > ev2) 1714 ret = 1; 1715 else 1716 ret = 0; 1717 } 1718 if (minor_version) { 1719 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); 1720 sectors -= rdev->data_offset; 1721 } else 1722 sectors = rdev->sb_start; 1723 if (sectors < le64_to_cpu(sb->data_size)) 1724 return -EINVAL; 1725 rdev->sectors = le64_to_cpu(sb->data_size); 1726 return ret; 1727 } 1728 1729 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) 1730 { 1731 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 1732 __u64 ev1 = le64_to_cpu(sb->events); 1733 1734 rdev->raid_disk = -1; 1735 clear_bit(Faulty, &rdev->flags); 1736 clear_bit(In_sync, &rdev->flags); 1737 clear_bit(Bitmap_sync, &rdev->flags); 1738 clear_bit(WriteMostly, &rdev->flags); 1739 1740 if (mddev->raid_disks == 0) { 1741 mddev->major_version = 1; 1742 mddev->patch_version = 0; 1743 mddev->external = 0; 1744 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1745 mddev->ctime = le64_to_cpu(sb->ctime); 1746 mddev->utime = le64_to_cpu(sb->utime); 1747 mddev->level = le32_to_cpu(sb->level); 1748 mddev->clevel[0] = 0; 1749 mddev->layout = le32_to_cpu(sb->layout); 1750 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1751 mddev->dev_sectors = le64_to_cpu(sb->size); 1752 mddev->events = ev1; 1753 mddev->bitmap_info.offset = 0; 1754 mddev->bitmap_info.space = 0; 1755 /* Default location for bitmap is 1K after superblock 1756 * using 3K - total of 4K 1757 */ 1758 mddev->bitmap_info.default_offset = 1024 >> 9; 1759 mddev->bitmap_info.default_space = (4096-1024) >> 9; 1760 mddev->reshape_backwards = 0; 1761 1762 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1763 memcpy(mddev->uuid, sb->set_uuid, 16); 1764 1765 mddev->max_disks = (4096-256)/2; 1766 1767 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1768 mddev->bitmap_info.file == NULL) { 1769 mddev->bitmap_info.offset = 1770 (__s32)le32_to_cpu(sb->bitmap_offset); 1771 /* Metadata doesn't record how much space is available. 1772 * For 1.0, we assume we can use up to the superblock 1773 * if before, else to 4K beyond superblock. 1774 * For others, assume no change is possible. 1775 */ 1776 if (mddev->minor_version > 0) 1777 mddev->bitmap_info.space = 0; 1778 else if (mddev->bitmap_info.offset > 0) 1779 mddev->bitmap_info.space = 1780 8 - mddev->bitmap_info.offset; 1781 else 1782 mddev->bitmap_info.space = 1783 -mddev->bitmap_info.offset; 1784 } 1785 1786 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1787 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1788 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1789 mddev->new_level = le32_to_cpu(sb->new_level); 1790 mddev->new_layout = le32_to_cpu(sb->new_layout); 1791 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1792 if (mddev->delta_disks < 0 || 1793 (mddev->delta_disks == 0 && 1794 (le32_to_cpu(sb->feature_map) 1795 & MD_FEATURE_RESHAPE_BACKWARDS))) 1796 mddev->reshape_backwards = 1; 1797 } else { 1798 mddev->reshape_position = MaxSector; 1799 mddev->delta_disks = 0; 1800 mddev->new_level = mddev->level; 1801 mddev->new_layout = mddev->layout; 1802 mddev->new_chunk_sectors = mddev->chunk_sectors; 1803 } 1804 1805 if (mddev->level == 0 && 1806 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT)) 1807 mddev->layout = -1; 1808 1809 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) 1810 set_bit(MD_HAS_JOURNAL, &mddev->flags); 1811 1812 if (le32_to_cpu(sb->feature_map) & 1813 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) { 1814 if (le32_to_cpu(sb->feature_map) & 1815 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL)) 1816 return -EINVAL; 1817 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) && 1818 (le32_to_cpu(sb->feature_map) & 1819 MD_FEATURE_MULTIPLE_PPLS)) 1820 return -EINVAL; 1821 set_bit(MD_HAS_PPL, &mddev->flags); 1822 } 1823 } else if (mddev->pers == NULL) { 1824 /* Insist of good event counter while assembling, except for 1825 * spares (which don't need an event count) */ 1826 ++ev1; 1827 if (rdev->desc_nr >= 0 && 1828 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1829 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || 1830 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)) 1831 if (ev1 < mddev->events) 1832 return -EINVAL; 1833 } else if (mddev->bitmap) { 1834 /* If adding to array with a bitmap, then we can accept an 1835 * older device, but not too old. 1836 */ 1837 if (ev1 < mddev->bitmap->events_cleared) 1838 return 0; 1839 if (ev1 < mddev->events) 1840 set_bit(Bitmap_sync, &rdev->flags); 1841 } else { 1842 if (ev1 < mddev->events) 1843 /* just a hot-add of a new device, leave raid_disk at -1 */ 1844 return 0; 1845 } 1846 if (mddev->level != LEVEL_MULTIPATH) { 1847 int role; 1848 if (rdev->desc_nr < 0 || 1849 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1850 role = MD_DISK_ROLE_SPARE; 1851 rdev->desc_nr = -1; 1852 } else 1853 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1854 switch(role) { 1855 case MD_DISK_ROLE_SPARE: /* spare */ 1856 break; 1857 case MD_DISK_ROLE_FAULTY: /* faulty */ 1858 set_bit(Faulty, &rdev->flags); 1859 break; 1860 case MD_DISK_ROLE_JOURNAL: /* journal device */ 1861 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) { 1862 /* journal device without journal feature */ 1863 pr_warn("md: journal device provided without journal feature, ignoring the device\n"); 1864 return -EINVAL; 1865 } 1866 set_bit(Journal, &rdev->flags); 1867 rdev->journal_tail = le64_to_cpu(sb->journal_tail); 1868 rdev->raid_disk = 0; 1869 break; 1870 default: 1871 rdev->saved_raid_disk = role; 1872 if ((le32_to_cpu(sb->feature_map) & 1873 MD_FEATURE_RECOVERY_OFFSET)) { 1874 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1875 if (!(le32_to_cpu(sb->feature_map) & 1876 MD_FEATURE_RECOVERY_BITMAP)) 1877 rdev->saved_raid_disk = -1; 1878 } else { 1879 /* 1880 * If the array is FROZEN, then the device can't 1881 * be in_sync with rest of array. 1882 */ 1883 if (!test_bit(MD_RECOVERY_FROZEN, 1884 &mddev->recovery)) 1885 set_bit(In_sync, &rdev->flags); 1886 } 1887 rdev->raid_disk = role; 1888 break; 1889 } 1890 if (sb->devflags & WriteMostly1) 1891 set_bit(WriteMostly, &rdev->flags); 1892 if (sb->devflags & FailFast1) 1893 set_bit(FailFast, &rdev->flags); 1894 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) 1895 set_bit(Replacement, &rdev->flags); 1896 } else /* MULTIPATH are always insync */ 1897 set_bit(In_sync, &rdev->flags); 1898 1899 return 0; 1900 } 1901 1902 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) 1903 { 1904 struct mdp_superblock_1 *sb; 1905 struct md_rdev *rdev2; 1906 int max_dev, i; 1907 /* make rdev->sb match mddev and rdev data. */ 1908 1909 sb = page_address(rdev->sb_page); 1910 1911 sb->feature_map = 0; 1912 sb->pad0 = 0; 1913 sb->recovery_offset = cpu_to_le64(0); 1914 memset(sb->pad3, 0, sizeof(sb->pad3)); 1915 1916 sb->utime = cpu_to_le64((__u64)mddev->utime); 1917 sb->events = cpu_to_le64(mddev->events); 1918 if (mddev->in_sync) 1919 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1920 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) 1921 sb->resync_offset = cpu_to_le64(MaxSector); 1922 else 1923 sb->resync_offset = cpu_to_le64(0); 1924 1925 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1926 1927 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1928 sb->size = cpu_to_le64(mddev->dev_sectors); 1929 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 1930 sb->level = cpu_to_le32(mddev->level); 1931 sb->layout = cpu_to_le32(mddev->layout); 1932 if (test_bit(FailFast, &rdev->flags)) 1933 sb->devflags |= FailFast1; 1934 else 1935 sb->devflags &= ~FailFast1; 1936 1937 if (test_bit(WriteMostly, &rdev->flags)) 1938 sb->devflags |= WriteMostly1; 1939 else 1940 sb->devflags &= ~WriteMostly1; 1941 sb->data_offset = cpu_to_le64(rdev->data_offset); 1942 sb->data_size = cpu_to_le64(rdev->sectors); 1943 1944 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1945 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 1946 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1947 } 1948 1949 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) && 1950 !test_bit(In_sync, &rdev->flags)) { 1951 sb->feature_map |= 1952 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1953 sb->recovery_offset = 1954 cpu_to_le64(rdev->recovery_offset); 1955 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) 1956 sb->feature_map |= 1957 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP); 1958 } 1959 /* Note: recovery_offset and journal_tail share space */ 1960 if (test_bit(Journal, &rdev->flags)) 1961 sb->journal_tail = cpu_to_le64(rdev->journal_tail); 1962 if (test_bit(Replacement, &rdev->flags)) 1963 sb->feature_map |= 1964 cpu_to_le32(MD_FEATURE_REPLACEMENT); 1965 1966 if (mddev->reshape_position != MaxSector) { 1967 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1968 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1969 sb->new_layout = cpu_to_le32(mddev->new_layout); 1970 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1971 sb->new_level = cpu_to_le32(mddev->new_level); 1972 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 1973 if (mddev->delta_disks == 0 && 1974 mddev->reshape_backwards) 1975 sb->feature_map 1976 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS); 1977 if (rdev->new_data_offset != rdev->data_offset) { 1978 sb->feature_map 1979 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET); 1980 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset 1981 - rdev->data_offset)); 1982 } 1983 } 1984 1985 if (mddev_is_clustered(mddev)) 1986 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED); 1987 1988 if (rdev->badblocks.count == 0) 1989 /* Nothing to do for bad blocks*/ ; 1990 else if (sb->bblog_offset == 0) 1991 /* Cannot record bad blocks on this device */ 1992 md_error(mddev, rdev); 1993 else { 1994 struct badblocks *bb = &rdev->badblocks; 1995 __le64 *bbp = (__le64 *)page_address(rdev->bb_page); 1996 u64 *p = bb->page; 1997 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); 1998 if (bb->changed) { 1999 unsigned seq; 2000 2001 retry: 2002 seq = read_seqbegin(&bb->lock); 2003 2004 memset(bbp, 0xff, PAGE_SIZE); 2005 2006 for (i = 0 ; i < bb->count ; i++) { 2007 u64 internal_bb = p[i]; 2008 u64 store_bb = ((BB_OFFSET(internal_bb) << 10) 2009 | BB_LEN(internal_bb)); 2010 bbp[i] = cpu_to_le64(store_bb); 2011 } 2012 bb->changed = 0; 2013 if (read_seqretry(&bb->lock, seq)) 2014 goto retry; 2015 2016 bb->sector = (rdev->sb_start + 2017 (int)le32_to_cpu(sb->bblog_offset)); 2018 bb->size = le16_to_cpu(sb->bblog_size); 2019 } 2020 } 2021 2022 max_dev = 0; 2023 rdev_for_each(rdev2, mddev) 2024 if (rdev2->desc_nr+1 > max_dev) 2025 max_dev = rdev2->desc_nr+1; 2026 2027 if (max_dev > le32_to_cpu(sb->max_dev)) { 2028 int bmask; 2029 sb->max_dev = cpu_to_le32(max_dev); 2030 rdev->sb_size = max_dev * 2 + 256; 2031 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 2032 if (rdev->sb_size & bmask) 2033 rdev->sb_size = (rdev->sb_size | bmask) + 1; 2034 } else 2035 max_dev = le32_to_cpu(sb->max_dev); 2036 2037 for (i=0; i<max_dev;i++) 2038 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 2039 2040 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) 2041 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL); 2042 2043 if (test_bit(MD_HAS_PPL, &mddev->flags)) { 2044 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags)) 2045 sb->feature_map |= 2046 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS); 2047 else 2048 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL); 2049 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset); 2050 sb->ppl.size = cpu_to_le16(rdev->ppl.size); 2051 } 2052 2053 rdev_for_each(rdev2, mddev) { 2054 i = rdev2->desc_nr; 2055 if (test_bit(Faulty, &rdev2->flags)) 2056 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); 2057 else if (test_bit(In_sync, &rdev2->flags)) 2058 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 2059 else if (test_bit(Journal, &rdev2->flags)) 2060 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL); 2061 else if (rdev2->raid_disk >= 0) 2062 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 2063 else 2064 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 2065 } 2066 2067 sb->sb_csum = calc_sb_1_csum(sb); 2068 } 2069 2070 static unsigned long long 2071 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 2072 { 2073 struct mdp_superblock_1 *sb; 2074 sector_t max_sectors; 2075 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 2076 return 0; /* component must fit device */ 2077 if (rdev->data_offset != rdev->new_data_offset) 2078 return 0; /* too confusing */ 2079 if (rdev->sb_start < rdev->data_offset) { 2080 /* minor versions 1 and 2; superblock before data */ 2081 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; 2082 max_sectors -= rdev->data_offset; 2083 if (!num_sectors || num_sectors > max_sectors) 2084 num_sectors = max_sectors; 2085 } else if (rdev->mddev->bitmap_info.offset) { 2086 /* minor version 0 with bitmap we can't move */ 2087 return 0; 2088 } else { 2089 /* minor version 0; superblock after data */ 2090 sector_t sb_start; 2091 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2; 2092 sb_start &= ~(sector_t)(4*2 - 1); 2093 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 2094 if (!num_sectors || num_sectors > max_sectors) 2095 num_sectors = max_sectors; 2096 rdev->sb_start = sb_start; 2097 } 2098 sb = page_address(rdev->sb_page); 2099 sb->data_size = cpu_to_le64(num_sectors); 2100 sb->super_offset = cpu_to_le64(rdev->sb_start); 2101 sb->sb_csum = calc_sb_1_csum(sb); 2102 do { 2103 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 2104 rdev->sb_page); 2105 } while (md_super_wait(rdev->mddev) < 0); 2106 return num_sectors; 2107 2108 } 2109 2110 static int 2111 super_1_allow_new_offset(struct md_rdev *rdev, 2112 unsigned long long new_offset) 2113 { 2114 /* All necessary checks on new >= old have been done */ 2115 struct bitmap *bitmap; 2116 if (new_offset >= rdev->data_offset) 2117 return 1; 2118 2119 /* with 1.0 metadata, there is no metadata to tread on 2120 * so we can always move back */ 2121 if (rdev->mddev->minor_version == 0) 2122 return 1; 2123 2124 /* otherwise we must be sure not to step on 2125 * any metadata, so stay: 2126 * 36K beyond start of superblock 2127 * beyond end of badblocks 2128 * beyond write-intent bitmap 2129 */ 2130 if (rdev->sb_start + (32+4)*2 > new_offset) 2131 return 0; 2132 bitmap = rdev->mddev->bitmap; 2133 if (bitmap && !rdev->mddev->bitmap_info.file && 2134 rdev->sb_start + rdev->mddev->bitmap_info.offset + 2135 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset) 2136 return 0; 2137 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) 2138 return 0; 2139 2140 return 1; 2141 } 2142 2143 static struct super_type super_types[] = { 2144 [0] = { 2145 .name = "0.90.0", 2146 .owner = THIS_MODULE, 2147 .load_super = super_90_load, 2148 .validate_super = super_90_validate, 2149 .sync_super = super_90_sync, 2150 .rdev_size_change = super_90_rdev_size_change, 2151 .allow_new_offset = super_90_allow_new_offset, 2152 }, 2153 [1] = { 2154 .name = "md-1", 2155 .owner = THIS_MODULE, 2156 .load_super = super_1_load, 2157 .validate_super = super_1_validate, 2158 .sync_super = super_1_sync, 2159 .rdev_size_change = super_1_rdev_size_change, 2160 .allow_new_offset = super_1_allow_new_offset, 2161 }, 2162 }; 2163 2164 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) 2165 { 2166 if (mddev->sync_super) { 2167 mddev->sync_super(mddev, rdev); 2168 return; 2169 } 2170 2171 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); 2172 2173 super_types[mddev->major_version].sync_super(mddev, rdev); 2174 } 2175 2176 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) 2177 { 2178 struct md_rdev *rdev, *rdev2; 2179 2180 rcu_read_lock(); 2181 rdev_for_each_rcu(rdev, mddev1) { 2182 if (test_bit(Faulty, &rdev->flags) || 2183 test_bit(Journal, &rdev->flags) || 2184 rdev->raid_disk == -1) 2185 continue; 2186 rdev_for_each_rcu(rdev2, mddev2) { 2187 if (test_bit(Faulty, &rdev2->flags) || 2188 test_bit(Journal, &rdev2->flags) || 2189 rdev2->raid_disk == -1) 2190 continue; 2191 if (rdev->bdev->bd_contains == 2192 rdev2->bdev->bd_contains) { 2193 rcu_read_unlock(); 2194 return 1; 2195 } 2196 } 2197 } 2198 rcu_read_unlock(); 2199 return 0; 2200 } 2201 2202 static LIST_HEAD(pending_raid_disks); 2203 2204 /* 2205 * Try to register data integrity profile for an mddev 2206 * 2207 * This is called when an array is started and after a disk has been kicked 2208 * from the array. It only succeeds if all working and active component devices 2209 * are integrity capable with matching profiles. 2210 */ 2211 int md_integrity_register(struct mddev *mddev) 2212 { 2213 struct md_rdev *rdev, *reference = NULL; 2214 2215 if (list_empty(&mddev->disks)) 2216 return 0; /* nothing to do */ 2217 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) 2218 return 0; /* shouldn't register, or already is */ 2219 rdev_for_each(rdev, mddev) { 2220 /* skip spares and non-functional disks */ 2221 if (test_bit(Faulty, &rdev->flags)) 2222 continue; 2223 if (rdev->raid_disk < 0) 2224 continue; 2225 if (!reference) { 2226 /* Use the first rdev as the reference */ 2227 reference = rdev; 2228 continue; 2229 } 2230 /* does this rdev's profile match the reference profile? */ 2231 if (blk_integrity_compare(reference->bdev->bd_disk, 2232 rdev->bdev->bd_disk) < 0) 2233 return -EINVAL; 2234 } 2235 if (!reference || !bdev_get_integrity(reference->bdev)) 2236 return 0; 2237 /* 2238 * All component devices are integrity capable and have matching 2239 * profiles, register the common profile for the md device. 2240 */ 2241 blk_integrity_register(mddev->gendisk, 2242 bdev_get_integrity(reference->bdev)); 2243 2244 pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); 2245 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) { 2246 pr_err("md: failed to create integrity pool for %s\n", 2247 mdname(mddev)); 2248 return -EINVAL; 2249 } 2250 return 0; 2251 } 2252 EXPORT_SYMBOL(md_integrity_register); 2253 2254 /* 2255 * Attempt to add an rdev, but only if it is consistent with the current 2256 * integrity profile 2257 */ 2258 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) 2259 { 2260 struct blk_integrity *bi_mddev; 2261 char name[BDEVNAME_SIZE]; 2262 2263 if (!mddev->gendisk) 2264 return 0; 2265 2266 bi_mddev = blk_get_integrity(mddev->gendisk); 2267 2268 if (!bi_mddev) /* nothing to do */ 2269 return 0; 2270 2271 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { 2272 pr_err("%s: incompatible integrity profile for %s\n", 2273 mdname(mddev), bdevname(rdev->bdev, name)); 2274 return -ENXIO; 2275 } 2276 2277 return 0; 2278 } 2279 EXPORT_SYMBOL(md_integrity_add_rdev); 2280 2281 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) 2282 { 2283 char b[BDEVNAME_SIZE]; 2284 struct kobject *ko; 2285 int err; 2286 2287 /* prevent duplicates */ 2288 if (find_rdev(mddev, rdev->bdev->bd_dev)) 2289 return -EEXIST; 2290 2291 if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) && 2292 mddev->pers) 2293 return -EROFS; 2294 2295 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 2296 if (!test_bit(Journal, &rdev->flags) && 2297 rdev->sectors && 2298 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { 2299 if (mddev->pers) { 2300 /* Cannot change size, so fail 2301 * If mddev->level <= 0, then we don't care 2302 * about aligning sizes (e.g. linear) 2303 */ 2304 if (mddev->level > 0) 2305 return -ENOSPC; 2306 } else 2307 mddev->dev_sectors = rdev->sectors; 2308 } 2309 2310 /* Verify rdev->desc_nr is unique. 2311 * If it is -1, assign a free number, else 2312 * check number is not in use 2313 */ 2314 rcu_read_lock(); 2315 if (rdev->desc_nr < 0) { 2316 int choice = 0; 2317 if (mddev->pers) 2318 choice = mddev->raid_disks; 2319 while (md_find_rdev_nr_rcu(mddev, choice)) 2320 choice++; 2321 rdev->desc_nr = choice; 2322 } else { 2323 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { 2324 rcu_read_unlock(); 2325 return -EBUSY; 2326 } 2327 } 2328 rcu_read_unlock(); 2329 if (!test_bit(Journal, &rdev->flags) && 2330 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 2331 pr_warn("md: %s: array is limited to %d devices\n", 2332 mdname(mddev), mddev->max_disks); 2333 return -EBUSY; 2334 } 2335 bdevname(rdev->bdev,b); 2336 strreplace(b, '/', '!'); 2337 2338 rdev->mddev = mddev; 2339 pr_debug("md: bind<%s>\n", b); 2340 2341 if (mddev->raid_disks) 2342 mddev_create_serial_pool(mddev, rdev, false); 2343 2344 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 2345 goto fail; 2346 2347 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 2348 if (sysfs_create_link(&rdev->kobj, ko, "block")) 2349 /* failure here is OK */; 2350 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 2351 2352 list_add_rcu(&rdev->same_set, &mddev->disks); 2353 bd_link_disk_holder(rdev->bdev, mddev->gendisk); 2354 2355 /* May as well allow recovery to be retried once */ 2356 mddev->recovery_disabled++; 2357 2358 return 0; 2359 2360 fail: 2361 pr_warn("md: failed to register dev-%s for %s\n", 2362 b, mdname(mddev)); 2363 return err; 2364 } 2365 2366 static void md_delayed_delete(struct work_struct *ws) 2367 { 2368 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); 2369 kobject_del(&rdev->kobj); 2370 kobject_put(&rdev->kobj); 2371 } 2372 2373 static void unbind_rdev_from_array(struct md_rdev *rdev) 2374 { 2375 char b[BDEVNAME_SIZE]; 2376 2377 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); 2378 list_del_rcu(&rdev->same_set); 2379 pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b)); 2380 mddev_destroy_serial_pool(rdev->mddev, rdev); 2381 rdev->mddev = NULL; 2382 sysfs_remove_link(&rdev->kobj, "block"); 2383 sysfs_put(rdev->sysfs_state); 2384 rdev->sysfs_state = NULL; 2385 rdev->badblocks.count = 0; 2386 /* We need to delay this, otherwise we can deadlock when 2387 * writing to 'remove' to "dev/state". We also need 2388 * to delay it due to rcu usage. 2389 */ 2390 synchronize_rcu(); 2391 INIT_WORK(&rdev->del_work, md_delayed_delete); 2392 kobject_get(&rdev->kobj); 2393 queue_work(md_misc_wq, &rdev->del_work); 2394 } 2395 2396 /* 2397 * prevent the device from being mounted, repartitioned or 2398 * otherwise reused by a RAID array (or any other kernel 2399 * subsystem), by bd_claiming the device. 2400 */ 2401 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) 2402 { 2403 int err = 0; 2404 struct block_device *bdev; 2405 char b[BDEVNAME_SIZE]; 2406 2407 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2408 shared ? (struct md_rdev *)lock_rdev : rdev); 2409 if (IS_ERR(bdev)) { 2410 pr_warn("md: could not open %s.\n", __bdevname(dev, b)); 2411 return PTR_ERR(bdev); 2412 } 2413 rdev->bdev = bdev; 2414 return err; 2415 } 2416 2417 static void unlock_rdev(struct md_rdev *rdev) 2418 { 2419 struct block_device *bdev = rdev->bdev; 2420 rdev->bdev = NULL; 2421 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2422 } 2423 2424 void md_autodetect_dev(dev_t dev); 2425 2426 static void export_rdev(struct md_rdev *rdev) 2427 { 2428 char b[BDEVNAME_SIZE]; 2429 2430 pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b)); 2431 md_rdev_clear(rdev); 2432 #ifndef MODULE 2433 if (test_bit(AutoDetected, &rdev->flags)) 2434 md_autodetect_dev(rdev->bdev->bd_dev); 2435 #endif 2436 unlock_rdev(rdev); 2437 kobject_put(&rdev->kobj); 2438 } 2439 2440 void md_kick_rdev_from_array(struct md_rdev *rdev) 2441 { 2442 unbind_rdev_from_array(rdev); 2443 export_rdev(rdev); 2444 } 2445 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array); 2446 2447 static void export_array(struct mddev *mddev) 2448 { 2449 struct md_rdev *rdev; 2450 2451 while (!list_empty(&mddev->disks)) { 2452 rdev = list_first_entry(&mddev->disks, struct md_rdev, 2453 same_set); 2454 md_kick_rdev_from_array(rdev); 2455 } 2456 mddev->raid_disks = 0; 2457 mddev->major_version = 0; 2458 } 2459 2460 static bool set_in_sync(struct mddev *mddev) 2461 { 2462 lockdep_assert_held(&mddev->lock); 2463 if (!mddev->in_sync) { 2464 mddev->sync_checkers++; 2465 spin_unlock(&mddev->lock); 2466 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending); 2467 spin_lock(&mddev->lock); 2468 if (!mddev->in_sync && 2469 percpu_ref_is_zero(&mddev->writes_pending)) { 2470 mddev->in_sync = 1; 2471 /* 2472 * Ensure ->in_sync is visible before we clear 2473 * ->sync_checkers. 2474 */ 2475 smp_mb(); 2476 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 2477 sysfs_notify_dirent_safe(mddev->sysfs_state); 2478 } 2479 if (--mddev->sync_checkers == 0) 2480 percpu_ref_switch_to_percpu(&mddev->writes_pending); 2481 } 2482 if (mddev->safemode == 1) 2483 mddev->safemode = 0; 2484 return mddev->in_sync; 2485 } 2486 2487 static void sync_sbs(struct mddev *mddev, int nospares) 2488 { 2489 /* Update each superblock (in-memory image), but 2490 * if we are allowed to, skip spares which already 2491 * have the right event counter, or have one earlier 2492 * (which would mean they aren't being marked as dirty 2493 * with the rest of the array) 2494 */ 2495 struct md_rdev *rdev; 2496 rdev_for_each(rdev, mddev) { 2497 if (rdev->sb_events == mddev->events || 2498 (nospares && 2499 rdev->raid_disk < 0 && 2500 rdev->sb_events+1 == mddev->events)) { 2501 /* Don't update this superblock */ 2502 rdev->sb_loaded = 2; 2503 } else { 2504 sync_super(mddev, rdev); 2505 rdev->sb_loaded = 1; 2506 } 2507 } 2508 } 2509 2510 static bool does_sb_need_changing(struct mddev *mddev) 2511 { 2512 struct md_rdev *rdev; 2513 struct mdp_superblock_1 *sb; 2514 int role; 2515 2516 /* Find a good rdev */ 2517 rdev_for_each(rdev, mddev) 2518 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags)) 2519 break; 2520 2521 /* No good device found. */ 2522 if (!rdev) 2523 return false; 2524 2525 sb = page_address(rdev->sb_page); 2526 /* Check if a device has become faulty or a spare become active */ 2527 rdev_for_each(rdev, mddev) { 2528 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 2529 /* Device activated? */ 2530 if (role == 0xffff && rdev->raid_disk >=0 && 2531 !test_bit(Faulty, &rdev->flags)) 2532 return true; 2533 /* Device turned faulty? */ 2534 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd)) 2535 return true; 2536 } 2537 2538 /* Check if any mddev parameters have changed */ 2539 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || 2540 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || 2541 (mddev->layout != le32_to_cpu(sb->layout)) || 2542 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || 2543 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) 2544 return true; 2545 2546 return false; 2547 } 2548 2549 void md_update_sb(struct mddev *mddev, int force_change) 2550 { 2551 struct md_rdev *rdev; 2552 int sync_req; 2553 int nospares = 0; 2554 int any_badblocks_changed = 0; 2555 int ret = -1; 2556 2557 if (mddev->ro) { 2558 if (force_change) 2559 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2560 return; 2561 } 2562 2563 repeat: 2564 if (mddev_is_clustered(mddev)) { 2565 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) 2566 force_change = 1; 2567 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) 2568 nospares = 1; 2569 ret = md_cluster_ops->metadata_update_start(mddev); 2570 /* Has someone else has updated the sb */ 2571 if (!does_sb_need_changing(mddev)) { 2572 if (ret == 0) 2573 md_cluster_ops->metadata_update_cancel(mddev); 2574 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), 2575 BIT(MD_SB_CHANGE_DEVS) | 2576 BIT(MD_SB_CHANGE_CLEAN)); 2577 return; 2578 } 2579 } 2580 2581 /* 2582 * First make sure individual recovery_offsets are correct 2583 * curr_resync_completed can only be used during recovery. 2584 * During reshape/resync it might use array-addresses rather 2585 * that device addresses. 2586 */ 2587 rdev_for_each(rdev, mddev) { 2588 if (rdev->raid_disk >= 0 && 2589 mddev->delta_disks >= 0 && 2590 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 2591 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && 2592 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 2593 !test_bit(Journal, &rdev->flags) && 2594 !test_bit(In_sync, &rdev->flags) && 2595 mddev->curr_resync_completed > rdev->recovery_offset) 2596 rdev->recovery_offset = mddev->curr_resync_completed; 2597 2598 } 2599 if (!mddev->persistent) { 2600 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 2601 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2602 if (!mddev->external) { 2603 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 2604 rdev_for_each(rdev, mddev) { 2605 if (rdev->badblocks.changed) { 2606 rdev->badblocks.changed = 0; 2607 ack_all_badblocks(&rdev->badblocks); 2608 md_error(mddev, rdev); 2609 } 2610 clear_bit(Blocked, &rdev->flags); 2611 clear_bit(BlockedBadBlocks, &rdev->flags); 2612 wake_up(&rdev->blocked_wait); 2613 } 2614 } 2615 wake_up(&mddev->sb_wait); 2616 return; 2617 } 2618 2619 spin_lock(&mddev->lock); 2620 2621 mddev->utime = ktime_get_real_seconds(); 2622 2623 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) 2624 force_change = 1; 2625 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) 2626 /* just a clean<-> dirty transition, possibly leave spares alone, 2627 * though if events isn't the right even/odd, we will have to do 2628 * spares after all 2629 */ 2630 nospares = 1; 2631 if (force_change) 2632 nospares = 0; 2633 if (mddev->degraded) 2634 /* If the array is degraded, then skipping spares is both 2635 * dangerous and fairly pointless. 2636 * Dangerous because a device that was removed from the array 2637 * might have a event_count that still looks up-to-date, 2638 * so it can be re-added without a resync. 2639 * Pointless because if there are any spares to skip, 2640 * then a recovery will happen and soon that array won't 2641 * be degraded any more and the spare can go back to sleep then. 2642 */ 2643 nospares = 0; 2644 2645 sync_req = mddev->in_sync; 2646 2647 /* If this is just a dirty<->clean transition, and the array is clean 2648 * and 'events' is odd, we can roll back to the previous clean state */ 2649 if (nospares 2650 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2651 && mddev->can_decrease_events 2652 && mddev->events != 1) { 2653 mddev->events--; 2654 mddev->can_decrease_events = 0; 2655 } else { 2656 /* otherwise we have to go forward and ... */ 2657 mddev->events ++; 2658 mddev->can_decrease_events = nospares; 2659 } 2660 2661 /* 2662 * This 64-bit counter should never wrap. 2663 * Either we are in around ~1 trillion A.C., assuming 2664 * 1 reboot per second, or we have a bug... 2665 */ 2666 WARN_ON(mddev->events == 0); 2667 2668 rdev_for_each(rdev, mddev) { 2669 if (rdev->badblocks.changed) 2670 any_badblocks_changed++; 2671 if (test_bit(Faulty, &rdev->flags)) 2672 set_bit(FaultRecorded, &rdev->flags); 2673 } 2674 2675 sync_sbs(mddev, nospares); 2676 spin_unlock(&mddev->lock); 2677 2678 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", 2679 mdname(mddev), mddev->in_sync); 2680 2681 if (mddev->queue) 2682 blk_add_trace_msg(mddev->queue, "md md_update_sb"); 2683 rewrite: 2684 md_bitmap_update_sb(mddev->bitmap); 2685 rdev_for_each(rdev, mddev) { 2686 char b[BDEVNAME_SIZE]; 2687 2688 if (rdev->sb_loaded != 1) 2689 continue; /* no noise on spare devices */ 2690 2691 if (!test_bit(Faulty, &rdev->flags)) { 2692 md_super_write(mddev,rdev, 2693 rdev->sb_start, rdev->sb_size, 2694 rdev->sb_page); 2695 pr_debug("md: (write) %s's sb offset: %llu\n", 2696 bdevname(rdev->bdev, b), 2697 (unsigned long long)rdev->sb_start); 2698 rdev->sb_events = mddev->events; 2699 if (rdev->badblocks.size) { 2700 md_super_write(mddev, rdev, 2701 rdev->badblocks.sector, 2702 rdev->badblocks.size << 9, 2703 rdev->bb_page); 2704 rdev->badblocks.size = 0; 2705 } 2706 2707 } else 2708 pr_debug("md: %s (skipping faulty)\n", 2709 bdevname(rdev->bdev, b)); 2710 2711 if (mddev->level == LEVEL_MULTIPATH) 2712 /* only need to write one superblock... */ 2713 break; 2714 } 2715 if (md_super_wait(mddev) < 0) 2716 goto rewrite; 2717 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */ 2718 2719 if (mddev_is_clustered(mddev) && ret == 0) 2720 md_cluster_ops->metadata_update_finish(mddev); 2721 2722 if (mddev->in_sync != sync_req || 2723 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), 2724 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN))) 2725 /* have to write it out again */ 2726 goto repeat; 2727 wake_up(&mddev->sb_wait); 2728 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2729 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2730 2731 rdev_for_each(rdev, mddev) { 2732 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) 2733 clear_bit(Blocked, &rdev->flags); 2734 2735 if (any_badblocks_changed) 2736 ack_all_badblocks(&rdev->badblocks); 2737 clear_bit(BlockedBadBlocks, &rdev->flags); 2738 wake_up(&rdev->blocked_wait); 2739 } 2740 } 2741 EXPORT_SYMBOL(md_update_sb); 2742 2743 static int add_bound_rdev(struct md_rdev *rdev) 2744 { 2745 struct mddev *mddev = rdev->mddev; 2746 int err = 0; 2747 bool add_journal = test_bit(Journal, &rdev->flags); 2748 2749 if (!mddev->pers->hot_remove_disk || add_journal) { 2750 /* If there is hot_add_disk but no hot_remove_disk 2751 * then added disks for geometry changes, 2752 * and should be added immediately. 2753 */ 2754 super_types[mddev->major_version]. 2755 validate_super(mddev, rdev); 2756 if (add_journal) 2757 mddev_suspend(mddev); 2758 err = mddev->pers->hot_add_disk(mddev, rdev); 2759 if (add_journal) 2760 mddev_resume(mddev); 2761 if (err) { 2762 md_kick_rdev_from_array(rdev); 2763 return err; 2764 } 2765 } 2766 sysfs_notify_dirent_safe(rdev->sysfs_state); 2767 2768 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2769 if (mddev->degraded) 2770 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 2771 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2772 md_new_event(mddev); 2773 md_wakeup_thread(mddev->thread); 2774 return 0; 2775 } 2776 2777 /* words written to sysfs files may, or may not, be \n terminated. 2778 * We want to accept with case. For this we use cmd_match. 2779 */ 2780 static int cmd_match(const char *cmd, const char *str) 2781 { 2782 /* See if cmd, written into a sysfs file, matches 2783 * str. They must either be the same, or cmd can 2784 * have a trailing newline 2785 */ 2786 while (*cmd && *str && *cmd == *str) { 2787 cmd++; 2788 str++; 2789 } 2790 if (*cmd == '\n') 2791 cmd++; 2792 if (*str || *cmd) 2793 return 0; 2794 return 1; 2795 } 2796 2797 struct rdev_sysfs_entry { 2798 struct attribute attr; 2799 ssize_t (*show)(struct md_rdev *, char *); 2800 ssize_t (*store)(struct md_rdev *, const char *, size_t); 2801 }; 2802 2803 static ssize_t 2804 state_show(struct md_rdev *rdev, char *page) 2805 { 2806 char *sep = ","; 2807 size_t len = 0; 2808 unsigned long flags = READ_ONCE(rdev->flags); 2809 2810 if (test_bit(Faulty, &flags) || 2811 (!test_bit(ExternalBbl, &flags) && 2812 rdev->badblocks.unacked_exist)) 2813 len += sprintf(page+len, "faulty%s", sep); 2814 if (test_bit(In_sync, &flags)) 2815 len += sprintf(page+len, "in_sync%s", sep); 2816 if (test_bit(Journal, &flags)) 2817 len += sprintf(page+len, "journal%s", sep); 2818 if (test_bit(WriteMostly, &flags)) 2819 len += sprintf(page+len, "write_mostly%s", sep); 2820 if (test_bit(Blocked, &flags) || 2821 (rdev->badblocks.unacked_exist 2822 && !test_bit(Faulty, &flags))) 2823 len += sprintf(page+len, "blocked%s", sep); 2824 if (!test_bit(Faulty, &flags) && 2825 !test_bit(Journal, &flags) && 2826 !test_bit(In_sync, &flags)) 2827 len += sprintf(page+len, "spare%s", sep); 2828 if (test_bit(WriteErrorSeen, &flags)) 2829 len += sprintf(page+len, "write_error%s", sep); 2830 if (test_bit(WantReplacement, &flags)) 2831 len += sprintf(page+len, "want_replacement%s", sep); 2832 if (test_bit(Replacement, &flags)) 2833 len += sprintf(page+len, "replacement%s", sep); 2834 if (test_bit(ExternalBbl, &flags)) 2835 len += sprintf(page+len, "external_bbl%s", sep); 2836 if (test_bit(FailFast, &flags)) 2837 len += sprintf(page+len, "failfast%s", sep); 2838 2839 if (len) 2840 len -= strlen(sep); 2841 2842 return len+sprintf(page+len, "\n"); 2843 } 2844 2845 static ssize_t 2846 state_store(struct md_rdev *rdev, const char *buf, size_t len) 2847 { 2848 /* can write 2849 * faulty - simulates an error 2850 * remove - disconnects the device 2851 * writemostly - sets write_mostly 2852 * -writemostly - clears write_mostly 2853 * blocked - sets the Blocked flags 2854 * -blocked - clears the Blocked and possibly simulates an error 2855 * insync - sets Insync providing device isn't active 2856 * -insync - clear Insync for a device with a slot assigned, 2857 * so that it gets rebuilt based on bitmap 2858 * write_error - sets WriteErrorSeen 2859 * -write_error - clears WriteErrorSeen 2860 * {,-}failfast - set/clear FailFast 2861 */ 2862 int err = -EINVAL; 2863 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2864 md_error(rdev->mddev, rdev); 2865 if (test_bit(Faulty, &rdev->flags)) 2866 err = 0; 2867 else 2868 err = -EBUSY; 2869 } else if (cmd_match(buf, "remove")) { 2870 if (rdev->mddev->pers) { 2871 clear_bit(Blocked, &rdev->flags); 2872 remove_and_add_spares(rdev->mddev, rdev); 2873 } 2874 if (rdev->raid_disk >= 0) 2875 err = -EBUSY; 2876 else { 2877 struct mddev *mddev = rdev->mddev; 2878 err = 0; 2879 if (mddev_is_clustered(mddev)) 2880 err = md_cluster_ops->remove_disk(mddev, rdev); 2881 2882 if (err == 0) { 2883 md_kick_rdev_from_array(rdev); 2884 if (mddev->pers) { 2885 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2886 md_wakeup_thread(mddev->thread); 2887 } 2888 md_new_event(mddev); 2889 } 2890 } 2891 } else if (cmd_match(buf, "writemostly")) { 2892 set_bit(WriteMostly, &rdev->flags); 2893 mddev_create_serial_pool(rdev->mddev, rdev, false); 2894 err = 0; 2895 } else if (cmd_match(buf, "-writemostly")) { 2896 mddev_destroy_serial_pool(rdev->mddev, rdev); 2897 clear_bit(WriteMostly, &rdev->flags); 2898 err = 0; 2899 } else if (cmd_match(buf, "blocked")) { 2900 set_bit(Blocked, &rdev->flags); 2901 err = 0; 2902 } else if (cmd_match(buf, "-blocked")) { 2903 if (!test_bit(Faulty, &rdev->flags) && 2904 !test_bit(ExternalBbl, &rdev->flags) && 2905 rdev->badblocks.unacked_exist) { 2906 /* metadata handler doesn't understand badblocks, 2907 * so we need to fail the device 2908 */ 2909 md_error(rdev->mddev, rdev); 2910 } 2911 clear_bit(Blocked, &rdev->flags); 2912 clear_bit(BlockedBadBlocks, &rdev->flags); 2913 wake_up(&rdev->blocked_wait); 2914 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2915 md_wakeup_thread(rdev->mddev->thread); 2916 2917 err = 0; 2918 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2919 set_bit(In_sync, &rdev->flags); 2920 err = 0; 2921 } else if (cmd_match(buf, "failfast")) { 2922 set_bit(FailFast, &rdev->flags); 2923 err = 0; 2924 } else if (cmd_match(buf, "-failfast")) { 2925 clear_bit(FailFast, &rdev->flags); 2926 err = 0; 2927 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 && 2928 !test_bit(Journal, &rdev->flags)) { 2929 if (rdev->mddev->pers == NULL) { 2930 clear_bit(In_sync, &rdev->flags); 2931 rdev->saved_raid_disk = rdev->raid_disk; 2932 rdev->raid_disk = -1; 2933 err = 0; 2934 } 2935 } else if (cmd_match(buf, "write_error")) { 2936 set_bit(WriteErrorSeen, &rdev->flags); 2937 err = 0; 2938 } else if (cmd_match(buf, "-write_error")) { 2939 clear_bit(WriteErrorSeen, &rdev->flags); 2940 err = 0; 2941 } else if (cmd_match(buf, "want_replacement")) { 2942 /* Any non-spare device that is not a replacement can 2943 * become want_replacement at any time, but we then need to 2944 * check if recovery is needed. 2945 */ 2946 if (rdev->raid_disk >= 0 && 2947 !test_bit(Journal, &rdev->flags) && 2948 !test_bit(Replacement, &rdev->flags)) 2949 set_bit(WantReplacement, &rdev->flags); 2950 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2951 md_wakeup_thread(rdev->mddev->thread); 2952 err = 0; 2953 } else if (cmd_match(buf, "-want_replacement")) { 2954 /* Clearing 'want_replacement' is always allowed. 2955 * Once replacements starts it is too late though. 2956 */ 2957 err = 0; 2958 clear_bit(WantReplacement, &rdev->flags); 2959 } else if (cmd_match(buf, "replacement")) { 2960 /* Can only set a device as a replacement when array has not 2961 * yet been started. Once running, replacement is automatic 2962 * from spares, or by assigning 'slot'. 2963 */ 2964 if (rdev->mddev->pers) 2965 err = -EBUSY; 2966 else { 2967 set_bit(Replacement, &rdev->flags); 2968 err = 0; 2969 } 2970 } else if (cmd_match(buf, "-replacement")) { 2971 /* Similarly, can only clear Replacement before start */ 2972 if (rdev->mddev->pers) 2973 err = -EBUSY; 2974 else { 2975 clear_bit(Replacement, &rdev->flags); 2976 err = 0; 2977 } 2978 } else if (cmd_match(buf, "re-add")) { 2979 if (!rdev->mddev->pers) 2980 err = -EINVAL; 2981 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) && 2982 rdev->saved_raid_disk >= 0) { 2983 /* clear_bit is performed _after_ all the devices 2984 * have their local Faulty bit cleared. If any writes 2985 * happen in the meantime in the local node, they 2986 * will land in the local bitmap, which will be synced 2987 * by this node eventually 2988 */ 2989 if (!mddev_is_clustered(rdev->mddev) || 2990 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) { 2991 clear_bit(Faulty, &rdev->flags); 2992 err = add_bound_rdev(rdev); 2993 } 2994 } else 2995 err = -EBUSY; 2996 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) { 2997 set_bit(ExternalBbl, &rdev->flags); 2998 rdev->badblocks.shift = 0; 2999 err = 0; 3000 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) { 3001 clear_bit(ExternalBbl, &rdev->flags); 3002 err = 0; 3003 } 3004 if (!err) 3005 sysfs_notify_dirent_safe(rdev->sysfs_state); 3006 return err ? err : len; 3007 } 3008 static struct rdev_sysfs_entry rdev_state = 3009 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store); 3010 3011 static ssize_t 3012 errors_show(struct md_rdev *rdev, char *page) 3013 { 3014 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 3015 } 3016 3017 static ssize_t 3018 errors_store(struct md_rdev *rdev, const char *buf, size_t len) 3019 { 3020 unsigned int n; 3021 int rv; 3022 3023 rv = kstrtouint(buf, 10, &n); 3024 if (rv < 0) 3025 return rv; 3026 atomic_set(&rdev->corrected_errors, n); 3027 return len; 3028 } 3029 static struct rdev_sysfs_entry rdev_errors = 3030 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 3031 3032 static ssize_t 3033 slot_show(struct md_rdev *rdev, char *page) 3034 { 3035 if (test_bit(Journal, &rdev->flags)) 3036 return sprintf(page, "journal\n"); 3037 else if (rdev->raid_disk < 0) 3038 return sprintf(page, "none\n"); 3039 else 3040 return sprintf(page, "%d\n", rdev->raid_disk); 3041 } 3042 3043 static ssize_t 3044 slot_store(struct md_rdev *rdev, const char *buf, size_t len) 3045 { 3046 int slot; 3047 int err; 3048 3049 if (test_bit(Journal, &rdev->flags)) 3050 return -EBUSY; 3051 if (strncmp(buf, "none", 4)==0) 3052 slot = -1; 3053 else { 3054 err = kstrtouint(buf, 10, (unsigned int *)&slot); 3055 if (err < 0) 3056 return err; 3057 } 3058 if (rdev->mddev->pers && slot == -1) { 3059 /* Setting 'slot' on an active array requires also 3060 * updating the 'rd%d' link, and communicating 3061 * with the personality with ->hot_*_disk. 3062 * For now we only support removing 3063 * failed/spare devices. This normally happens automatically, 3064 * but not when the metadata is externally managed. 3065 */ 3066 if (rdev->raid_disk == -1) 3067 return -EEXIST; 3068 /* personality does all needed checks */ 3069 if (rdev->mddev->pers->hot_remove_disk == NULL) 3070 return -EINVAL; 3071 clear_bit(Blocked, &rdev->flags); 3072 remove_and_add_spares(rdev->mddev, rdev); 3073 if (rdev->raid_disk >= 0) 3074 return -EBUSY; 3075 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 3076 md_wakeup_thread(rdev->mddev->thread); 3077 } else if (rdev->mddev->pers) { 3078 /* Activating a spare .. or possibly reactivating 3079 * if we ever get bitmaps working here. 3080 */ 3081 int err; 3082 3083 if (rdev->raid_disk != -1) 3084 return -EBUSY; 3085 3086 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) 3087 return -EBUSY; 3088 3089 if (rdev->mddev->pers->hot_add_disk == NULL) 3090 return -EINVAL; 3091 3092 if (slot >= rdev->mddev->raid_disks && 3093 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 3094 return -ENOSPC; 3095 3096 rdev->raid_disk = slot; 3097 if (test_bit(In_sync, &rdev->flags)) 3098 rdev->saved_raid_disk = slot; 3099 else 3100 rdev->saved_raid_disk = -1; 3101 clear_bit(In_sync, &rdev->flags); 3102 clear_bit(Bitmap_sync, &rdev->flags); 3103 err = rdev->mddev->pers-> 3104 hot_add_disk(rdev->mddev, rdev); 3105 if (err) { 3106 rdev->raid_disk = -1; 3107 return err; 3108 } else 3109 sysfs_notify_dirent_safe(rdev->sysfs_state); 3110 if (sysfs_link_rdev(rdev->mddev, rdev)) 3111 /* failure here is OK */; 3112 /* don't wakeup anyone, leave that to userspace. */ 3113 } else { 3114 if (slot >= rdev->mddev->raid_disks && 3115 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 3116 return -ENOSPC; 3117 rdev->raid_disk = slot; 3118 /* assume it is working */ 3119 clear_bit(Faulty, &rdev->flags); 3120 clear_bit(WriteMostly, &rdev->flags); 3121 set_bit(In_sync, &rdev->flags); 3122 sysfs_notify_dirent_safe(rdev->sysfs_state); 3123 } 3124 return len; 3125 } 3126 3127 static struct rdev_sysfs_entry rdev_slot = 3128 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 3129 3130 static ssize_t 3131 offset_show(struct md_rdev *rdev, char *page) 3132 { 3133 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 3134 } 3135 3136 static ssize_t 3137 offset_store(struct md_rdev *rdev, const char *buf, size_t len) 3138 { 3139 unsigned long long offset; 3140 if (kstrtoull(buf, 10, &offset) < 0) 3141 return -EINVAL; 3142 if (rdev->mddev->pers && rdev->raid_disk >= 0) 3143 return -EBUSY; 3144 if (rdev->sectors && rdev->mddev->external) 3145 /* Must set offset before size, so overlap checks 3146 * can be sane */ 3147 return -EBUSY; 3148 rdev->data_offset = offset; 3149 rdev->new_data_offset = offset; 3150 return len; 3151 } 3152 3153 static struct rdev_sysfs_entry rdev_offset = 3154 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 3155 3156 static ssize_t new_offset_show(struct md_rdev *rdev, char *page) 3157 { 3158 return sprintf(page, "%llu\n", 3159 (unsigned long long)rdev->new_data_offset); 3160 } 3161 3162 static ssize_t new_offset_store(struct md_rdev *rdev, 3163 const char *buf, size_t len) 3164 { 3165 unsigned long long new_offset; 3166 struct mddev *mddev = rdev->mddev; 3167 3168 if (kstrtoull(buf, 10, &new_offset) < 0) 3169 return -EINVAL; 3170 3171 if (mddev->sync_thread || 3172 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) 3173 return -EBUSY; 3174 if (new_offset == rdev->data_offset) 3175 /* reset is always permitted */ 3176 ; 3177 else if (new_offset > rdev->data_offset) { 3178 /* must not push array size beyond rdev_sectors */ 3179 if (new_offset - rdev->data_offset 3180 + mddev->dev_sectors > rdev->sectors) 3181 return -E2BIG; 3182 } 3183 /* Metadata worries about other space details. */ 3184 3185 /* decreasing the offset is inconsistent with a backwards 3186 * reshape. 3187 */ 3188 if (new_offset < rdev->data_offset && 3189 mddev->reshape_backwards) 3190 return -EINVAL; 3191 /* Increasing offset is inconsistent with forwards 3192 * reshape. reshape_direction should be set to 3193 * 'backwards' first. 3194 */ 3195 if (new_offset > rdev->data_offset && 3196 !mddev->reshape_backwards) 3197 return -EINVAL; 3198 3199 if (mddev->pers && mddev->persistent && 3200 !super_types[mddev->major_version] 3201 .allow_new_offset(rdev, new_offset)) 3202 return -E2BIG; 3203 rdev->new_data_offset = new_offset; 3204 if (new_offset > rdev->data_offset) 3205 mddev->reshape_backwards = 1; 3206 else if (new_offset < rdev->data_offset) 3207 mddev->reshape_backwards = 0; 3208 3209 return len; 3210 } 3211 static struct rdev_sysfs_entry rdev_new_offset = 3212 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store); 3213 3214 static ssize_t 3215 rdev_size_show(struct md_rdev *rdev, char *page) 3216 { 3217 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 3218 } 3219 3220 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 3221 { 3222 /* check if two start/length pairs overlap */ 3223 if (s1+l1 <= s2) 3224 return 0; 3225 if (s2+l2 <= s1) 3226 return 0; 3227 return 1; 3228 } 3229 3230 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 3231 { 3232 unsigned long long blocks; 3233 sector_t new; 3234 3235 if (kstrtoull(buf, 10, &blocks) < 0) 3236 return -EINVAL; 3237 3238 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 3239 return -EINVAL; /* sector conversion overflow */ 3240 3241 new = blocks * 2; 3242 if (new != blocks * 2) 3243 return -EINVAL; /* unsigned long long to sector_t overflow */ 3244 3245 *sectors = new; 3246 return 0; 3247 } 3248 3249 static ssize_t 3250 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) 3251 { 3252 struct mddev *my_mddev = rdev->mddev; 3253 sector_t oldsectors = rdev->sectors; 3254 sector_t sectors; 3255 3256 if (test_bit(Journal, &rdev->flags)) 3257 return -EBUSY; 3258 if (strict_blocks_to_sectors(buf, §ors) < 0) 3259 return -EINVAL; 3260 if (rdev->data_offset != rdev->new_data_offset) 3261 return -EINVAL; /* too confusing */ 3262 if (my_mddev->pers && rdev->raid_disk >= 0) { 3263 if (my_mddev->persistent) { 3264 sectors = super_types[my_mddev->major_version]. 3265 rdev_size_change(rdev, sectors); 3266 if (!sectors) 3267 return -EBUSY; 3268 } else if (!sectors) 3269 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 3270 rdev->data_offset; 3271 if (!my_mddev->pers->resize) 3272 /* Cannot change size for RAID0 or Linear etc */ 3273 return -EINVAL; 3274 } 3275 if (sectors < my_mddev->dev_sectors) 3276 return -EINVAL; /* component must fit device */ 3277 3278 rdev->sectors = sectors; 3279 if (sectors > oldsectors && my_mddev->external) { 3280 /* Need to check that all other rdevs with the same 3281 * ->bdev do not overlap. 'rcu' is sufficient to walk 3282 * the rdev lists safely. 3283 * This check does not provide a hard guarantee, it 3284 * just helps avoid dangerous mistakes. 3285 */ 3286 struct mddev *mddev; 3287 int overlap = 0; 3288 struct list_head *tmp; 3289 3290 rcu_read_lock(); 3291 for_each_mddev(mddev, tmp) { 3292 struct md_rdev *rdev2; 3293 3294 rdev_for_each(rdev2, mddev) 3295 if (rdev->bdev == rdev2->bdev && 3296 rdev != rdev2 && 3297 overlaps(rdev->data_offset, rdev->sectors, 3298 rdev2->data_offset, 3299 rdev2->sectors)) { 3300 overlap = 1; 3301 break; 3302 } 3303 if (overlap) { 3304 mddev_put(mddev); 3305 break; 3306 } 3307 } 3308 rcu_read_unlock(); 3309 if (overlap) { 3310 /* Someone else could have slipped in a size 3311 * change here, but doing so is just silly. 3312 * We put oldsectors back because we *know* it is 3313 * safe, and trust userspace not to race with 3314 * itself 3315 */ 3316 rdev->sectors = oldsectors; 3317 return -EBUSY; 3318 } 3319 } 3320 return len; 3321 } 3322 3323 static struct rdev_sysfs_entry rdev_size = 3324 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 3325 3326 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) 3327 { 3328 unsigned long long recovery_start = rdev->recovery_offset; 3329 3330 if (test_bit(In_sync, &rdev->flags) || 3331 recovery_start == MaxSector) 3332 return sprintf(page, "none\n"); 3333 3334 return sprintf(page, "%llu\n", recovery_start); 3335 } 3336 3337 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) 3338 { 3339 unsigned long long recovery_start; 3340 3341 if (cmd_match(buf, "none")) 3342 recovery_start = MaxSector; 3343 else if (kstrtoull(buf, 10, &recovery_start)) 3344 return -EINVAL; 3345 3346 if (rdev->mddev->pers && 3347 rdev->raid_disk >= 0) 3348 return -EBUSY; 3349 3350 rdev->recovery_offset = recovery_start; 3351 if (recovery_start == MaxSector) 3352 set_bit(In_sync, &rdev->flags); 3353 else 3354 clear_bit(In_sync, &rdev->flags); 3355 return len; 3356 } 3357 3358 static struct rdev_sysfs_entry rdev_recovery_start = 3359 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 3360 3361 /* sysfs access to bad-blocks list. 3362 * We present two files. 3363 * 'bad-blocks' lists sector numbers and lengths of ranges that 3364 * are recorded as bad. The list is truncated to fit within 3365 * the one-page limit of sysfs. 3366 * Writing "sector length" to this file adds an acknowledged 3367 * bad block list. 3368 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet 3369 * been acknowledged. Writing to this file adds bad blocks 3370 * without acknowledging them. This is largely for testing. 3371 */ 3372 static ssize_t bb_show(struct md_rdev *rdev, char *page) 3373 { 3374 return badblocks_show(&rdev->badblocks, page, 0); 3375 } 3376 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) 3377 { 3378 int rv = badblocks_store(&rdev->badblocks, page, len, 0); 3379 /* Maybe that ack was all we needed */ 3380 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) 3381 wake_up(&rdev->blocked_wait); 3382 return rv; 3383 } 3384 static struct rdev_sysfs_entry rdev_bad_blocks = 3385 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); 3386 3387 static ssize_t ubb_show(struct md_rdev *rdev, char *page) 3388 { 3389 return badblocks_show(&rdev->badblocks, page, 1); 3390 } 3391 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) 3392 { 3393 return badblocks_store(&rdev->badblocks, page, len, 1); 3394 } 3395 static struct rdev_sysfs_entry rdev_unack_bad_blocks = 3396 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store); 3397 3398 static ssize_t 3399 ppl_sector_show(struct md_rdev *rdev, char *page) 3400 { 3401 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector); 3402 } 3403 3404 static ssize_t 3405 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len) 3406 { 3407 unsigned long long sector; 3408 3409 if (kstrtoull(buf, 10, §or) < 0) 3410 return -EINVAL; 3411 if (sector != (sector_t)sector) 3412 return -EINVAL; 3413 3414 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && 3415 rdev->raid_disk >= 0) 3416 return -EBUSY; 3417 3418 if (rdev->mddev->persistent) { 3419 if (rdev->mddev->major_version == 0) 3420 return -EINVAL; 3421 if ((sector > rdev->sb_start && 3422 sector - rdev->sb_start > S16_MAX) || 3423 (sector < rdev->sb_start && 3424 rdev->sb_start - sector > -S16_MIN)) 3425 return -EINVAL; 3426 rdev->ppl.offset = sector - rdev->sb_start; 3427 } else if (!rdev->mddev->external) { 3428 return -EBUSY; 3429 } 3430 rdev->ppl.sector = sector; 3431 return len; 3432 } 3433 3434 static struct rdev_sysfs_entry rdev_ppl_sector = 3435 __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store); 3436 3437 static ssize_t 3438 ppl_size_show(struct md_rdev *rdev, char *page) 3439 { 3440 return sprintf(page, "%u\n", rdev->ppl.size); 3441 } 3442 3443 static ssize_t 3444 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len) 3445 { 3446 unsigned int size; 3447 3448 if (kstrtouint(buf, 10, &size) < 0) 3449 return -EINVAL; 3450 3451 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && 3452 rdev->raid_disk >= 0) 3453 return -EBUSY; 3454 3455 if (rdev->mddev->persistent) { 3456 if (rdev->mddev->major_version == 0) 3457 return -EINVAL; 3458 if (size > U16_MAX) 3459 return -EINVAL; 3460 } else if (!rdev->mddev->external) { 3461 return -EBUSY; 3462 } 3463 rdev->ppl.size = size; 3464 return len; 3465 } 3466 3467 static struct rdev_sysfs_entry rdev_ppl_size = 3468 __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store); 3469 3470 static struct attribute *rdev_default_attrs[] = { 3471 &rdev_state.attr, 3472 &rdev_errors.attr, 3473 &rdev_slot.attr, 3474 &rdev_offset.attr, 3475 &rdev_new_offset.attr, 3476 &rdev_size.attr, 3477 &rdev_recovery_start.attr, 3478 &rdev_bad_blocks.attr, 3479 &rdev_unack_bad_blocks.attr, 3480 &rdev_ppl_sector.attr, 3481 &rdev_ppl_size.attr, 3482 NULL, 3483 }; 3484 static ssize_t 3485 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3486 { 3487 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3488 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3489 3490 if (!entry->show) 3491 return -EIO; 3492 if (!rdev->mddev) 3493 return -ENODEV; 3494 return entry->show(rdev, page); 3495 } 3496 3497 static ssize_t 3498 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 3499 const char *page, size_t length) 3500 { 3501 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3502 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3503 ssize_t rv; 3504 struct mddev *mddev = rdev->mddev; 3505 3506 if (!entry->store) 3507 return -EIO; 3508 if (!capable(CAP_SYS_ADMIN)) 3509 return -EACCES; 3510 rv = mddev ? mddev_lock(mddev) : -ENODEV; 3511 if (!rv) { 3512 if (rdev->mddev == NULL) 3513 rv = -ENODEV; 3514 else 3515 rv = entry->store(rdev, page, length); 3516 mddev_unlock(mddev); 3517 } 3518 return rv; 3519 } 3520 3521 static void rdev_free(struct kobject *ko) 3522 { 3523 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); 3524 kfree(rdev); 3525 } 3526 static const struct sysfs_ops rdev_sysfs_ops = { 3527 .show = rdev_attr_show, 3528 .store = rdev_attr_store, 3529 }; 3530 static struct kobj_type rdev_ktype = { 3531 .release = rdev_free, 3532 .sysfs_ops = &rdev_sysfs_ops, 3533 .default_attrs = rdev_default_attrs, 3534 }; 3535 3536 int md_rdev_init(struct md_rdev *rdev) 3537 { 3538 rdev->desc_nr = -1; 3539 rdev->saved_raid_disk = -1; 3540 rdev->raid_disk = -1; 3541 rdev->flags = 0; 3542 rdev->data_offset = 0; 3543 rdev->new_data_offset = 0; 3544 rdev->sb_events = 0; 3545 rdev->last_read_error = 0; 3546 rdev->sb_loaded = 0; 3547 rdev->bb_page = NULL; 3548 atomic_set(&rdev->nr_pending, 0); 3549 atomic_set(&rdev->read_errors, 0); 3550 atomic_set(&rdev->corrected_errors, 0); 3551 3552 INIT_LIST_HEAD(&rdev->same_set); 3553 init_waitqueue_head(&rdev->blocked_wait); 3554 3555 /* Add space to store bad block list. 3556 * This reserves the space even on arrays where it cannot 3557 * be used - I wonder if that matters 3558 */ 3559 return badblocks_init(&rdev->badblocks, 0); 3560 } 3561 EXPORT_SYMBOL_GPL(md_rdev_init); 3562 /* 3563 * Import a device. If 'super_format' >= 0, then sanity check the superblock 3564 * 3565 * mark the device faulty if: 3566 * 3567 * - the device is nonexistent (zero size) 3568 * - the device has no valid superblock 3569 * 3570 * a faulty rdev _never_ has rdev->sb set. 3571 */ 3572 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) 3573 { 3574 char b[BDEVNAME_SIZE]; 3575 int err; 3576 struct md_rdev *rdev; 3577 sector_t size; 3578 3579 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 3580 if (!rdev) 3581 return ERR_PTR(-ENOMEM); 3582 3583 err = md_rdev_init(rdev); 3584 if (err) 3585 goto abort_free; 3586 err = alloc_disk_sb(rdev); 3587 if (err) 3588 goto abort_free; 3589 3590 err = lock_rdev(rdev, newdev, super_format == -2); 3591 if (err) 3592 goto abort_free; 3593 3594 kobject_init(&rdev->kobj, &rdev_ktype); 3595 3596 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; 3597 if (!size) { 3598 pr_warn("md: %s has zero or unknown size, marking faulty!\n", 3599 bdevname(rdev->bdev,b)); 3600 err = -EINVAL; 3601 goto abort_free; 3602 } 3603 3604 if (super_format >= 0) { 3605 err = super_types[super_format]. 3606 load_super(rdev, NULL, super_minor); 3607 if (err == -EINVAL) { 3608 pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n", 3609 bdevname(rdev->bdev,b), 3610 super_format, super_minor); 3611 goto abort_free; 3612 } 3613 if (err < 0) { 3614 pr_warn("md: could not read %s's sb, not importing!\n", 3615 bdevname(rdev->bdev,b)); 3616 goto abort_free; 3617 } 3618 } 3619 3620 return rdev; 3621 3622 abort_free: 3623 if (rdev->bdev) 3624 unlock_rdev(rdev); 3625 md_rdev_clear(rdev); 3626 kfree(rdev); 3627 return ERR_PTR(err); 3628 } 3629 3630 /* 3631 * Check a full RAID array for plausibility 3632 */ 3633 3634 static int analyze_sbs(struct mddev *mddev) 3635 { 3636 int i; 3637 struct md_rdev *rdev, *freshest, *tmp; 3638 char b[BDEVNAME_SIZE]; 3639 3640 freshest = NULL; 3641 rdev_for_each_safe(rdev, tmp, mddev) 3642 switch (super_types[mddev->major_version]. 3643 load_super(rdev, freshest, mddev->minor_version)) { 3644 case 1: 3645 freshest = rdev; 3646 break; 3647 case 0: 3648 break; 3649 default: 3650 pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n", 3651 bdevname(rdev->bdev,b)); 3652 md_kick_rdev_from_array(rdev); 3653 } 3654 3655 /* Cannot find a valid fresh disk */ 3656 if (!freshest) { 3657 pr_warn("md: cannot find a valid disk\n"); 3658 return -EINVAL; 3659 } 3660 3661 super_types[mddev->major_version]. 3662 validate_super(mddev, freshest); 3663 3664 i = 0; 3665 rdev_for_each_safe(rdev, tmp, mddev) { 3666 if (mddev->max_disks && 3667 (rdev->desc_nr >= mddev->max_disks || 3668 i > mddev->max_disks)) { 3669 pr_warn("md: %s: %s: only %d devices permitted\n", 3670 mdname(mddev), bdevname(rdev->bdev, b), 3671 mddev->max_disks); 3672 md_kick_rdev_from_array(rdev); 3673 continue; 3674 } 3675 if (rdev != freshest) { 3676 if (super_types[mddev->major_version]. 3677 validate_super(mddev, rdev)) { 3678 pr_warn("md: kicking non-fresh %s from array!\n", 3679 bdevname(rdev->bdev,b)); 3680 md_kick_rdev_from_array(rdev); 3681 continue; 3682 } 3683 } 3684 if (mddev->level == LEVEL_MULTIPATH) { 3685 rdev->desc_nr = i++; 3686 rdev->raid_disk = rdev->desc_nr; 3687 set_bit(In_sync, &rdev->flags); 3688 } else if (rdev->raid_disk >= 3689 (mddev->raid_disks - min(0, mddev->delta_disks)) && 3690 !test_bit(Journal, &rdev->flags)) { 3691 rdev->raid_disk = -1; 3692 clear_bit(In_sync, &rdev->flags); 3693 } 3694 } 3695 3696 return 0; 3697 } 3698 3699 /* Read a fixed-point number. 3700 * Numbers in sysfs attributes should be in "standard" units where 3701 * possible, so time should be in seconds. 3702 * However we internally use a a much smaller unit such as 3703 * milliseconds or jiffies. 3704 * This function takes a decimal number with a possible fractional 3705 * component, and produces an integer which is the result of 3706 * multiplying that number by 10^'scale'. 3707 * all without any floating-point arithmetic. 3708 */ 3709 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 3710 { 3711 unsigned long result = 0; 3712 long decimals = -1; 3713 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 3714 if (*cp == '.') 3715 decimals = 0; 3716 else if (decimals < scale) { 3717 unsigned int value; 3718 value = *cp - '0'; 3719 result = result * 10 + value; 3720 if (decimals >= 0) 3721 decimals++; 3722 } 3723 cp++; 3724 } 3725 if (*cp == '\n') 3726 cp++; 3727 if (*cp) 3728 return -EINVAL; 3729 if (decimals < 0) 3730 decimals = 0; 3731 *res = result * int_pow(10, scale - decimals); 3732 return 0; 3733 } 3734 3735 static ssize_t 3736 safe_delay_show(struct mddev *mddev, char *page) 3737 { 3738 int msec = (mddev->safemode_delay*1000)/HZ; 3739 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 3740 } 3741 static ssize_t 3742 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) 3743 { 3744 unsigned long msec; 3745 3746 if (mddev_is_clustered(mddev)) { 3747 pr_warn("md: Safemode is disabled for clustered mode\n"); 3748 return -EINVAL; 3749 } 3750 3751 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) 3752 return -EINVAL; 3753 if (msec == 0) 3754 mddev->safemode_delay = 0; 3755 else { 3756 unsigned long old_delay = mddev->safemode_delay; 3757 unsigned long new_delay = (msec*HZ)/1000; 3758 3759 if (new_delay == 0) 3760 new_delay = 1; 3761 mddev->safemode_delay = new_delay; 3762 if (new_delay < old_delay || old_delay == 0) 3763 mod_timer(&mddev->safemode_timer, jiffies+1); 3764 } 3765 return len; 3766 } 3767 static struct md_sysfs_entry md_safe_delay = 3768 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 3769 3770 static ssize_t 3771 level_show(struct mddev *mddev, char *page) 3772 { 3773 struct md_personality *p; 3774 int ret; 3775 spin_lock(&mddev->lock); 3776 p = mddev->pers; 3777 if (p) 3778 ret = sprintf(page, "%s\n", p->name); 3779 else if (mddev->clevel[0]) 3780 ret = sprintf(page, "%s\n", mddev->clevel); 3781 else if (mddev->level != LEVEL_NONE) 3782 ret = sprintf(page, "%d\n", mddev->level); 3783 else 3784 ret = 0; 3785 spin_unlock(&mddev->lock); 3786 return ret; 3787 } 3788 3789 static ssize_t 3790 level_store(struct mddev *mddev, const char *buf, size_t len) 3791 { 3792 char clevel[16]; 3793 ssize_t rv; 3794 size_t slen = len; 3795 struct md_personality *pers, *oldpers; 3796 long level; 3797 void *priv, *oldpriv; 3798 struct md_rdev *rdev; 3799 3800 if (slen == 0 || slen >= sizeof(clevel)) 3801 return -EINVAL; 3802 3803 rv = mddev_lock(mddev); 3804 if (rv) 3805 return rv; 3806 3807 if (mddev->pers == NULL) { 3808 strncpy(mddev->clevel, buf, slen); 3809 if (mddev->clevel[slen-1] == '\n') 3810 slen--; 3811 mddev->clevel[slen] = 0; 3812 mddev->level = LEVEL_NONE; 3813 rv = len; 3814 goto out_unlock; 3815 } 3816 rv = -EROFS; 3817 if (mddev->ro) 3818 goto out_unlock; 3819 3820 /* request to change the personality. Need to ensure: 3821 * - array is not engaged in resync/recovery/reshape 3822 * - old personality can be suspended 3823 * - new personality will access other array. 3824 */ 3825 3826 rv = -EBUSY; 3827 if (mddev->sync_thread || 3828 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3829 mddev->reshape_position != MaxSector || 3830 mddev->sysfs_active) 3831 goto out_unlock; 3832 3833 rv = -EINVAL; 3834 if (!mddev->pers->quiesce) { 3835 pr_warn("md: %s: %s does not support online personality change\n", 3836 mdname(mddev), mddev->pers->name); 3837 goto out_unlock; 3838 } 3839 3840 /* Now find the new personality */ 3841 strncpy(clevel, buf, slen); 3842 if (clevel[slen-1] == '\n') 3843 slen--; 3844 clevel[slen] = 0; 3845 if (kstrtol(clevel, 10, &level)) 3846 level = LEVEL_NONE; 3847 3848 if (request_module("md-%s", clevel) != 0) 3849 request_module("md-level-%s", clevel); 3850 spin_lock(&pers_lock); 3851 pers = find_pers(level, clevel); 3852 if (!pers || !try_module_get(pers->owner)) { 3853 spin_unlock(&pers_lock); 3854 pr_warn("md: personality %s not loaded\n", clevel); 3855 rv = -EINVAL; 3856 goto out_unlock; 3857 } 3858 spin_unlock(&pers_lock); 3859 3860 if (pers == mddev->pers) { 3861 /* Nothing to do! */ 3862 module_put(pers->owner); 3863 rv = len; 3864 goto out_unlock; 3865 } 3866 if (!pers->takeover) { 3867 module_put(pers->owner); 3868 pr_warn("md: %s: %s does not support personality takeover\n", 3869 mdname(mddev), clevel); 3870 rv = -EINVAL; 3871 goto out_unlock; 3872 } 3873 3874 rdev_for_each(rdev, mddev) 3875 rdev->new_raid_disk = rdev->raid_disk; 3876 3877 /* ->takeover must set new_* and/or delta_disks 3878 * if it succeeds, and may set them when it fails. 3879 */ 3880 priv = pers->takeover(mddev); 3881 if (IS_ERR(priv)) { 3882 mddev->new_level = mddev->level; 3883 mddev->new_layout = mddev->layout; 3884 mddev->new_chunk_sectors = mddev->chunk_sectors; 3885 mddev->raid_disks -= mddev->delta_disks; 3886 mddev->delta_disks = 0; 3887 mddev->reshape_backwards = 0; 3888 module_put(pers->owner); 3889 pr_warn("md: %s: %s would not accept array\n", 3890 mdname(mddev), clevel); 3891 rv = PTR_ERR(priv); 3892 goto out_unlock; 3893 } 3894 3895 /* Looks like we have a winner */ 3896 mddev_suspend(mddev); 3897 mddev_detach(mddev); 3898 3899 spin_lock(&mddev->lock); 3900 oldpers = mddev->pers; 3901 oldpriv = mddev->private; 3902 mddev->pers = pers; 3903 mddev->private = priv; 3904 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 3905 mddev->level = mddev->new_level; 3906 mddev->layout = mddev->new_layout; 3907 mddev->chunk_sectors = mddev->new_chunk_sectors; 3908 mddev->delta_disks = 0; 3909 mddev->reshape_backwards = 0; 3910 mddev->degraded = 0; 3911 spin_unlock(&mddev->lock); 3912 3913 if (oldpers->sync_request == NULL && 3914 mddev->external) { 3915 /* We are converting from a no-redundancy array 3916 * to a redundancy array and metadata is managed 3917 * externally so we need to be sure that writes 3918 * won't block due to a need to transition 3919 * clean->dirty 3920 * until external management is started. 3921 */ 3922 mddev->in_sync = 0; 3923 mddev->safemode_delay = 0; 3924 mddev->safemode = 0; 3925 } 3926 3927 oldpers->free(mddev, oldpriv); 3928 3929 if (oldpers->sync_request == NULL && 3930 pers->sync_request != NULL) { 3931 /* need to add the md_redundancy_group */ 3932 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 3933 pr_warn("md: cannot register extra attributes for %s\n", 3934 mdname(mddev)); 3935 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); 3936 } 3937 if (oldpers->sync_request != NULL && 3938 pers->sync_request == NULL) { 3939 /* need to remove the md_redundancy_group */ 3940 if (mddev->to_remove == NULL) 3941 mddev->to_remove = &md_redundancy_group; 3942 } 3943 3944 module_put(oldpers->owner); 3945 3946 rdev_for_each(rdev, mddev) { 3947 if (rdev->raid_disk < 0) 3948 continue; 3949 if (rdev->new_raid_disk >= mddev->raid_disks) 3950 rdev->new_raid_disk = -1; 3951 if (rdev->new_raid_disk == rdev->raid_disk) 3952 continue; 3953 sysfs_unlink_rdev(mddev, rdev); 3954 } 3955 rdev_for_each(rdev, mddev) { 3956 if (rdev->raid_disk < 0) 3957 continue; 3958 if (rdev->new_raid_disk == rdev->raid_disk) 3959 continue; 3960 rdev->raid_disk = rdev->new_raid_disk; 3961 if (rdev->raid_disk < 0) 3962 clear_bit(In_sync, &rdev->flags); 3963 else { 3964 if (sysfs_link_rdev(mddev, rdev)) 3965 pr_warn("md: cannot register rd%d for %s after level change\n", 3966 rdev->raid_disk, mdname(mddev)); 3967 } 3968 } 3969 3970 if (pers->sync_request == NULL) { 3971 /* this is now an array without redundancy, so 3972 * it must always be in_sync 3973 */ 3974 mddev->in_sync = 1; 3975 del_timer_sync(&mddev->safemode_timer); 3976 } 3977 blk_set_stacking_limits(&mddev->queue->limits); 3978 pers->run(mddev); 3979 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 3980 mddev_resume(mddev); 3981 if (!mddev->thread) 3982 md_update_sb(mddev, 1); 3983 sysfs_notify(&mddev->kobj, NULL, "level"); 3984 md_new_event(mddev); 3985 rv = len; 3986 out_unlock: 3987 mddev_unlock(mddev); 3988 return rv; 3989 } 3990 3991 static struct md_sysfs_entry md_level = 3992 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 3993 3994 static ssize_t 3995 layout_show(struct mddev *mddev, char *page) 3996 { 3997 /* just a number, not meaningful for all levels */ 3998 if (mddev->reshape_position != MaxSector && 3999 mddev->layout != mddev->new_layout) 4000 return sprintf(page, "%d (%d)\n", 4001 mddev->new_layout, mddev->layout); 4002 return sprintf(page, "%d\n", mddev->layout); 4003 } 4004 4005 static ssize_t 4006 layout_store(struct mddev *mddev, const char *buf, size_t len) 4007 { 4008 unsigned int n; 4009 int err; 4010 4011 err = kstrtouint(buf, 10, &n); 4012 if (err < 0) 4013 return err; 4014 err = mddev_lock(mddev); 4015 if (err) 4016 return err; 4017 4018 if (mddev->pers) { 4019 if (mddev->pers->check_reshape == NULL) 4020 err = -EBUSY; 4021 else if (mddev->ro) 4022 err = -EROFS; 4023 else { 4024 mddev->new_layout = n; 4025 err = mddev->pers->check_reshape(mddev); 4026 if (err) 4027 mddev->new_layout = mddev->layout; 4028 } 4029 } else { 4030 mddev->new_layout = n; 4031 if (mddev->reshape_position == MaxSector) 4032 mddev->layout = n; 4033 } 4034 mddev_unlock(mddev); 4035 return err ?: len; 4036 } 4037 static struct md_sysfs_entry md_layout = 4038 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 4039 4040 static ssize_t 4041 raid_disks_show(struct mddev *mddev, char *page) 4042 { 4043 if (mddev->raid_disks == 0) 4044 return 0; 4045 if (mddev->reshape_position != MaxSector && 4046 mddev->delta_disks != 0) 4047 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 4048 mddev->raid_disks - mddev->delta_disks); 4049 return sprintf(page, "%d\n", mddev->raid_disks); 4050 } 4051 4052 static int update_raid_disks(struct mddev *mddev, int raid_disks); 4053 4054 static ssize_t 4055 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) 4056 { 4057 unsigned int n; 4058 int err; 4059 4060 err = kstrtouint(buf, 10, &n); 4061 if (err < 0) 4062 return err; 4063 4064 err = mddev_lock(mddev); 4065 if (err) 4066 return err; 4067 if (mddev->pers) 4068 err = update_raid_disks(mddev, n); 4069 else if (mddev->reshape_position != MaxSector) { 4070 struct md_rdev *rdev; 4071 int olddisks = mddev->raid_disks - mddev->delta_disks; 4072 4073 err = -EINVAL; 4074 rdev_for_each(rdev, mddev) { 4075 if (olddisks < n && 4076 rdev->data_offset < rdev->new_data_offset) 4077 goto out_unlock; 4078 if (olddisks > n && 4079 rdev->data_offset > rdev->new_data_offset) 4080 goto out_unlock; 4081 } 4082 err = 0; 4083 mddev->delta_disks = n - olddisks; 4084 mddev->raid_disks = n; 4085 mddev->reshape_backwards = (mddev->delta_disks < 0); 4086 } else 4087 mddev->raid_disks = n; 4088 out_unlock: 4089 mddev_unlock(mddev); 4090 return err ? err : len; 4091 } 4092 static struct md_sysfs_entry md_raid_disks = 4093 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 4094 4095 static ssize_t 4096 chunk_size_show(struct mddev *mddev, char *page) 4097 { 4098 if (mddev->reshape_position != MaxSector && 4099 mddev->chunk_sectors != mddev->new_chunk_sectors) 4100 return sprintf(page, "%d (%d)\n", 4101 mddev->new_chunk_sectors << 9, 4102 mddev->chunk_sectors << 9); 4103 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 4104 } 4105 4106 static ssize_t 4107 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) 4108 { 4109 unsigned long n; 4110 int err; 4111 4112 err = kstrtoul(buf, 10, &n); 4113 if (err < 0) 4114 return err; 4115 4116 err = mddev_lock(mddev); 4117 if (err) 4118 return err; 4119 if (mddev->pers) { 4120 if (mddev->pers->check_reshape == NULL) 4121 err = -EBUSY; 4122 else if (mddev->ro) 4123 err = -EROFS; 4124 else { 4125 mddev->new_chunk_sectors = n >> 9; 4126 err = mddev->pers->check_reshape(mddev); 4127 if (err) 4128 mddev->new_chunk_sectors = mddev->chunk_sectors; 4129 } 4130 } else { 4131 mddev->new_chunk_sectors = n >> 9; 4132 if (mddev->reshape_position == MaxSector) 4133 mddev->chunk_sectors = n >> 9; 4134 } 4135 mddev_unlock(mddev); 4136 return err ?: len; 4137 } 4138 static struct md_sysfs_entry md_chunk_size = 4139 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 4140 4141 static ssize_t 4142 resync_start_show(struct mddev *mddev, char *page) 4143 { 4144 if (mddev->recovery_cp == MaxSector) 4145 return sprintf(page, "none\n"); 4146 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 4147 } 4148 4149 static ssize_t 4150 resync_start_store(struct mddev *mddev, const char *buf, size_t len) 4151 { 4152 unsigned long long n; 4153 int err; 4154 4155 if (cmd_match(buf, "none")) 4156 n = MaxSector; 4157 else { 4158 err = kstrtoull(buf, 10, &n); 4159 if (err < 0) 4160 return err; 4161 if (n != (sector_t)n) 4162 return -EINVAL; 4163 } 4164 4165 err = mddev_lock(mddev); 4166 if (err) 4167 return err; 4168 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 4169 err = -EBUSY; 4170 4171 if (!err) { 4172 mddev->recovery_cp = n; 4173 if (mddev->pers) 4174 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 4175 } 4176 mddev_unlock(mddev); 4177 return err ?: len; 4178 } 4179 static struct md_sysfs_entry md_resync_start = 4180 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR, 4181 resync_start_show, resync_start_store); 4182 4183 /* 4184 * The array state can be: 4185 * 4186 * clear 4187 * No devices, no size, no level 4188 * Equivalent to STOP_ARRAY ioctl 4189 * inactive 4190 * May have some settings, but array is not active 4191 * all IO results in error 4192 * When written, doesn't tear down array, but just stops it 4193 * suspended (not supported yet) 4194 * All IO requests will block. The array can be reconfigured. 4195 * Writing this, if accepted, will block until array is quiescent 4196 * readonly 4197 * no resync can happen. no superblocks get written. 4198 * write requests fail 4199 * read-auto 4200 * like readonly, but behaves like 'clean' on a write request. 4201 * 4202 * clean - no pending writes, but otherwise active. 4203 * When written to inactive array, starts without resync 4204 * If a write request arrives then 4205 * if metadata is known, mark 'dirty' and switch to 'active'. 4206 * if not known, block and switch to write-pending 4207 * If written to an active array that has pending writes, then fails. 4208 * active 4209 * fully active: IO and resync can be happening. 4210 * When written to inactive array, starts with resync 4211 * 4212 * write-pending 4213 * clean, but writes are blocked waiting for 'active' to be written. 4214 * 4215 * active-idle 4216 * like active, but no writes have been seen for a while (100msec). 4217 * 4218 * broken 4219 * RAID0/LINEAR-only: same as clean, but array is missing a member. 4220 * It's useful because RAID0/LINEAR mounted-arrays aren't stopped 4221 * when a member is gone, so this state will at least alert the 4222 * user that something is wrong. 4223 */ 4224 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 4225 write_pending, active_idle, broken, bad_word}; 4226 static char *array_states[] = { 4227 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 4228 "write-pending", "active-idle", "broken", NULL }; 4229 4230 static int match_word(const char *word, char **list) 4231 { 4232 int n; 4233 for (n=0; list[n]; n++) 4234 if (cmd_match(word, list[n])) 4235 break; 4236 return n; 4237 } 4238 4239 static ssize_t 4240 array_state_show(struct mddev *mddev, char *page) 4241 { 4242 enum array_state st = inactive; 4243 4244 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) { 4245 switch(mddev->ro) { 4246 case 1: 4247 st = readonly; 4248 break; 4249 case 2: 4250 st = read_auto; 4251 break; 4252 case 0: 4253 spin_lock(&mddev->lock); 4254 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 4255 st = write_pending; 4256 else if (mddev->in_sync) 4257 st = clean; 4258 else if (mddev->safemode) 4259 st = active_idle; 4260 else 4261 st = active; 4262 spin_unlock(&mddev->lock); 4263 } 4264 4265 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean) 4266 st = broken; 4267 } else { 4268 if (list_empty(&mddev->disks) && 4269 mddev->raid_disks == 0 && 4270 mddev->dev_sectors == 0) 4271 st = clear; 4272 else 4273 st = inactive; 4274 } 4275 return sprintf(page, "%s\n", array_states[st]); 4276 } 4277 4278 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev); 4279 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev); 4280 static int do_md_run(struct mddev *mddev); 4281 static int restart_array(struct mddev *mddev); 4282 4283 static ssize_t 4284 array_state_store(struct mddev *mddev, const char *buf, size_t len) 4285 { 4286 int err = 0; 4287 enum array_state st = match_word(buf, array_states); 4288 4289 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) { 4290 /* don't take reconfig_mutex when toggling between 4291 * clean and active 4292 */ 4293 spin_lock(&mddev->lock); 4294 if (st == active) { 4295 restart_array(mddev); 4296 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 4297 md_wakeup_thread(mddev->thread); 4298 wake_up(&mddev->sb_wait); 4299 } else /* st == clean */ { 4300 restart_array(mddev); 4301 if (!set_in_sync(mddev)) 4302 err = -EBUSY; 4303 } 4304 if (!err) 4305 sysfs_notify_dirent_safe(mddev->sysfs_state); 4306 spin_unlock(&mddev->lock); 4307 return err ?: len; 4308 } 4309 err = mddev_lock(mddev); 4310 if (err) 4311 return err; 4312 err = -EINVAL; 4313 switch(st) { 4314 case bad_word: 4315 break; 4316 case clear: 4317 /* stopping an active array */ 4318 err = do_md_stop(mddev, 0, NULL); 4319 break; 4320 case inactive: 4321 /* stopping an active array */ 4322 if (mddev->pers) 4323 err = do_md_stop(mddev, 2, NULL); 4324 else 4325 err = 0; /* already inactive */ 4326 break; 4327 case suspended: 4328 break; /* not supported yet */ 4329 case readonly: 4330 if (mddev->pers) 4331 err = md_set_readonly(mddev, NULL); 4332 else { 4333 mddev->ro = 1; 4334 set_disk_ro(mddev->gendisk, 1); 4335 err = do_md_run(mddev); 4336 } 4337 break; 4338 case read_auto: 4339 if (mddev->pers) { 4340 if (mddev->ro == 0) 4341 err = md_set_readonly(mddev, NULL); 4342 else if (mddev->ro == 1) 4343 err = restart_array(mddev); 4344 if (err == 0) { 4345 mddev->ro = 2; 4346 set_disk_ro(mddev->gendisk, 0); 4347 } 4348 } else { 4349 mddev->ro = 2; 4350 err = do_md_run(mddev); 4351 } 4352 break; 4353 case clean: 4354 if (mddev->pers) { 4355 err = restart_array(mddev); 4356 if (err) 4357 break; 4358 spin_lock(&mddev->lock); 4359 if (!set_in_sync(mddev)) 4360 err = -EBUSY; 4361 spin_unlock(&mddev->lock); 4362 } else 4363 err = -EINVAL; 4364 break; 4365 case active: 4366 if (mddev->pers) { 4367 err = restart_array(mddev); 4368 if (err) 4369 break; 4370 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 4371 wake_up(&mddev->sb_wait); 4372 err = 0; 4373 } else { 4374 mddev->ro = 0; 4375 set_disk_ro(mddev->gendisk, 0); 4376 err = do_md_run(mddev); 4377 } 4378 break; 4379 case write_pending: 4380 case active_idle: 4381 case broken: 4382 /* these cannot be set */ 4383 break; 4384 } 4385 4386 if (!err) { 4387 if (mddev->hold_active == UNTIL_IOCTL) 4388 mddev->hold_active = 0; 4389 sysfs_notify_dirent_safe(mddev->sysfs_state); 4390 } 4391 mddev_unlock(mddev); 4392 return err ?: len; 4393 } 4394 static struct md_sysfs_entry md_array_state = 4395 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 4396 4397 static ssize_t 4398 max_corrected_read_errors_show(struct mddev *mddev, char *page) { 4399 return sprintf(page, "%d\n", 4400 atomic_read(&mddev->max_corr_read_errors)); 4401 } 4402 4403 static ssize_t 4404 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) 4405 { 4406 unsigned int n; 4407 int rv; 4408 4409 rv = kstrtouint(buf, 10, &n); 4410 if (rv < 0) 4411 return rv; 4412 atomic_set(&mddev->max_corr_read_errors, n); 4413 return len; 4414 } 4415 4416 static struct md_sysfs_entry max_corr_read_errors = 4417 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 4418 max_corrected_read_errors_store); 4419 4420 static ssize_t 4421 null_show(struct mddev *mddev, char *page) 4422 { 4423 return -EINVAL; 4424 } 4425 4426 static ssize_t 4427 new_dev_store(struct mddev *mddev, const char *buf, size_t len) 4428 { 4429 /* buf must be %d:%d\n? giving major and minor numbers */ 4430 /* The new device is added to the array. 4431 * If the array has a persistent superblock, we read the 4432 * superblock to initialise info and check validity. 4433 * Otherwise, only checking done is that in bind_rdev_to_array, 4434 * which mainly checks size. 4435 */ 4436 char *e; 4437 int major = simple_strtoul(buf, &e, 10); 4438 int minor; 4439 dev_t dev; 4440 struct md_rdev *rdev; 4441 int err; 4442 4443 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 4444 return -EINVAL; 4445 minor = simple_strtoul(e+1, &e, 10); 4446 if (*e && *e != '\n') 4447 return -EINVAL; 4448 dev = MKDEV(major, minor); 4449 if (major != MAJOR(dev) || 4450 minor != MINOR(dev)) 4451 return -EOVERFLOW; 4452 4453 flush_workqueue(md_misc_wq); 4454 4455 err = mddev_lock(mddev); 4456 if (err) 4457 return err; 4458 if (mddev->persistent) { 4459 rdev = md_import_device(dev, mddev->major_version, 4460 mddev->minor_version); 4461 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 4462 struct md_rdev *rdev0 4463 = list_entry(mddev->disks.next, 4464 struct md_rdev, same_set); 4465 err = super_types[mddev->major_version] 4466 .load_super(rdev, rdev0, mddev->minor_version); 4467 if (err < 0) 4468 goto out; 4469 } 4470 } else if (mddev->external) 4471 rdev = md_import_device(dev, -2, -1); 4472 else 4473 rdev = md_import_device(dev, -1, -1); 4474 4475 if (IS_ERR(rdev)) { 4476 mddev_unlock(mddev); 4477 return PTR_ERR(rdev); 4478 } 4479 err = bind_rdev_to_array(rdev, mddev); 4480 out: 4481 if (err) 4482 export_rdev(rdev); 4483 mddev_unlock(mddev); 4484 if (!err) 4485 md_new_event(mddev); 4486 return err ? err : len; 4487 } 4488 4489 static struct md_sysfs_entry md_new_device = 4490 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 4491 4492 static ssize_t 4493 bitmap_store(struct mddev *mddev, const char *buf, size_t len) 4494 { 4495 char *end; 4496 unsigned long chunk, end_chunk; 4497 int err; 4498 4499 err = mddev_lock(mddev); 4500 if (err) 4501 return err; 4502 if (!mddev->bitmap) 4503 goto out; 4504 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 4505 while (*buf) { 4506 chunk = end_chunk = simple_strtoul(buf, &end, 0); 4507 if (buf == end) break; 4508 if (*end == '-') { /* range */ 4509 buf = end + 1; 4510 end_chunk = simple_strtoul(buf, &end, 0); 4511 if (buf == end) break; 4512 } 4513 if (*end && !isspace(*end)) break; 4514 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 4515 buf = skip_spaces(end); 4516 } 4517 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 4518 out: 4519 mddev_unlock(mddev); 4520 return len; 4521 } 4522 4523 static struct md_sysfs_entry md_bitmap = 4524 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 4525 4526 static ssize_t 4527 size_show(struct mddev *mddev, char *page) 4528 { 4529 return sprintf(page, "%llu\n", 4530 (unsigned long long)mddev->dev_sectors / 2); 4531 } 4532 4533 static int update_size(struct mddev *mddev, sector_t num_sectors); 4534 4535 static ssize_t 4536 size_store(struct mddev *mddev, const char *buf, size_t len) 4537 { 4538 /* If array is inactive, we can reduce the component size, but 4539 * not increase it (except from 0). 4540 * If array is active, we can try an on-line resize 4541 */ 4542 sector_t sectors; 4543 int err = strict_blocks_to_sectors(buf, §ors); 4544 4545 if (err < 0) 4546 return err; 4547 err = mddev_lock(mddev); 4548 if (err) 4549 return err; 4550 if (mddev->pers) { 4551 err = update_size(mddev, sectors); 4552 if (err == 0) 4553 md_update_sb(mddev, 1); 4554 } else { 4555 if (mddev->dev_sectors == 0 || 4556 mddev->dev_sectors > sectors) 4557 mddev->dev_sectors = sectors; 4558 else 4559 err = -ENOSPC; 4560 } 4561 mddev_unlock(mddev); 4562 return err ? err : len; 4563 } 4564 4565 static struct md_sysfs_entry md_size = 4566 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 4567 4568 /* Metadata version. 4569 * This is one of 4570 * 'none' for arrays with no metadata (good luck...) 4571 * 'external' for arrays with externally managed metadata, 4572 * or N.M for internally known formats 4573 */ 4574 static ssize_t 4575 metadata_show(struct mddev *mddev, char *page) 4576 { 4577 if (mddev->persistent) 4578 return sprintf(page, "%d.%d\n", 4579 mddev->major_version, mddev->minor_version); 4580 else if (mddev->external) 4581 return sprintf(page, "external:%s\n", mddev->metadata_type); 4582 else 4583 return sprintf(page, "none\n"); 4584 } 4585 4586 static ssize_t 4587 metadata_store(struct mddev *mddev, const char *buf, size_t len) 4588 { 4589 int major, minor; 4590 char *e; 4591 int err; 4592 /* Changing the details of 'external' metadata is 4593 * always permitted. Otherwise there must be 4594 * no devices attached to the array. 4595 */ 4596 4597 err = mddev_lock(mddev); 4598 if (err) 4599 return err; 4600 err = -EBUSY; 4601 if (mddev->external && strncmp(buf, "external:", 9) == 0) 4602 ; 4603 else if (!list_empty(&mddev->disks)) 4604 goto out_unlock; 4605 4606 err = 0; 4607 if (cmd_match(buf, "none")) { 4608 mddev->persistent = 0; 4609 mddev->external = 0; 4610 mddev->major_version = 0; 4611 mddev->minor_version = 90; 4612 goto out_unlock; 4613 } 4614 if (strncmp(buf, "external:", 9) == 0) { 4615 size_t namelen = len-9; 4616 if (namelen >= sizeof(mddev->metadata_type)) 4617 namelen = sizeof(mddev->metadata_type)-1; 4618 strncpy(mddev->metadata_type, buf+9, namelen); 4619 mddev->metadata_type[namelen] = 0; 4620 if (namelen && mddev->metadata_type[namelen-1] == '\n') 4621 mddev->metadata_type[--namelen] = 0; 4622 mddev->persistent = 0; 4623 mddev->external = 1; 4624 mddev->major_version = 0; 4625 mddev->minor_version = 90; 4626 goto out_unlock; 4627 } 4628 major = simple_strtoul(buf, &e, 10); 4629 err = -EINVAL; 4630 if (e==buf || *e != '.') 4631 goto out_unlock; 4632 buf = e+1; 4633 minor = simple_strtoul(buf, &e, 10); 4634 if (e==buf || (*e && *e != '\n') ) 4635 goto out_unlock; 4636 err = -ENOENT; 4637 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 4638 goto out_unlock; 4639 mddev->major_version = major; 4640 mddev->minor_version = minor; 4641 mddev->persistent = 1; 4642 mddev->external = 0; 4643 err = 0; 4644 out_unlock: 4645 mddev_unlock(mddev); 4646 return err ?: len; 4647 } 4648 4649 static struct md_sysfs_entry md_metadata = 4650 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4651 4652 static ssize_t 4653 action_show(struct mddev *mddev, char *page) 4654 { 4655 char *type = "idle"; 4656 unsigned long recovery = mddev->recovery; 4657 if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 4658 type = "frozen"; 4659 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) || 4660 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) { 4661 if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 4662 type = "reshape"; 4663 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 4664 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) 4665 type = "resync"; 4666 else if (test_bit(MD_RECOVERY_CHECK, &recovery)) 4667 type = "check"; 4668 else 4669 type = "repair"; 4670 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 4671 type = "recover"; 4672 else if (mddev->reshape_position != MaxSector) 4673 type = "reshape"; 4674 } 4675 return sprintf(page, "%s\n", type); 4676 } 4677 4678 static ssize_t 4679 action_store(struct mddev *mddev, const char *page, size_t len) 4680 { 4681 if (!mddev->pers || !mddev->pers->sync_request) 4682 return -EINVAL; 4683 4684 4685 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4686 if (cmd_match(page, "frozen")) 4687 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4688 else 4689 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4690 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 4691 mddev_lock(mddev) == 0) { 4692 flush_workqueue(md_misc_wq); 4693 if (mddev->sync_thread) { 4694 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4695 md_reap_sync_thread(mddev); 4696 } 4697 mddev_unlock(mddev); 4698 } 4699 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4700 return -EBUSY; 4701 else if (cmd_match(page, "resync")) 4702 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4703 else if (cmd_match(page, "recover")) { 4704 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4705 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4706 } else if (cmd_match(page, "reshape")) { 4707 int err; 4708 if (mddev->pers->start_reshape == NULL) 4709 return -EINVAL; 4710 err = mddev_lock(mddev); 4711 if (!err) { 4712 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4713 err = -EBUSY; 4714 else { 4715 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4716 err = mddev->pers->start_reshape(mddev); 4717 } 4718 mddev_unlock(mddev); 4719 } 4720 if (err) 4721 return err; 4722 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4723 } else { 4724 if (cmd_match(page, "check")) 4725 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4726 else if (!cmd_match(page, "repair")) 4727 return -EINVAL; 4728 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4729 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4730 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4731 } 4732 if (mddev->ro == 2) { 4733 /* A write to sync_action is enough to justify 4734 * canceling read-auto mode 4735 */ 4736 mddev->ro = 0; 4737 md_wakeup_thread(mddev->sync_thread); 4738 } 4739 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4740 md_wakeup_thread(mddev->thread); 4741 sysfs_notify_dirent_safe(mddev->sysfs_action); 4742 return len; 4743 } 4744 4745 static struct md_sysfs_entry md_scan_mode = 4746 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 4747 4748 static ssize_t 4749 last_sync_action_show(struct mddev *mddev, char *page) 4750 { 4751 return sprintf(page, "%s\n", mddev->last_sync_action); 4752 } 4753 4754 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action); 4755 4756 static ssize_t 4757 mismatch_cnt_show(struct mddev *mddev, char *page) 4758 { 4759 return sprintf(page, "%llu\n", 4760 (unsigned long long) 4761 atomic64_read(&mddev->resync_mismatches)); 4762 } 4763 4764 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 4765 4766 static ssize_t 4767 sync_min_show(struct mddev *mddev, char *page) 4768 { 4769 return sprintf(page, "%d (%s)\n", speed_min(mddev), 4770 mddev->sync_speed_min ? "local": "system"); 4771 } 4772 4773 static ssize_t 4774 sync_min_store(struct mddev *mddev, const char *buf, size_t len) 4775 { 4776 unsigned int min; 4777 int rv; 4778 4779 if (strncmp(buf, "system", 6)==0) { 4780 min = 0; 4781 } else { 4782 rv = kstrtouint(buf, 10, &min); 4783 if (rv < 0) 4784 return rv; 4785 if (min == 0) 4786 return -EINVAL; 4787 } 4788 mddev->sync_speed_min = min; 4789 return len; 4790 } 4791 4792 static struct md_sysfs_entry md_sync_min = 4793 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 4794 4795 static ssize_t 4796 sync_max_show(struct mddev *mddev, char *page) 4797 { 4798 return sprintf(page, "%d (%s)\n", speed_max(mddev), 4799 mddev->sync_speed_max ? "local": "system"); 4800 } 4801 4802 static ssize_t 4803 sync_max_store(struct mddev *mddev, const char *buf, size_t len) 4804 { 4805 unsigned int max; 4806 int rv; 4807 4808 if (strncmp(buf, "system", 6)==0) { 4809 max = 0; 4810 } else { 4811 rv = kstrtouint(buf, 10, &max); 4812 if (rv < 0) 4813 return rv; 4814 if (max == 0) 4815 return -EINVAL; 4816 } 4817 mddev->sync_speed_max = max; 4818 return len; 4819 } 4820 4821 static struct md_sysfs_entry md_sync_max = 4822 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 4823 4824 static ssize_t 4825 degraded_show(struct mddev *mddev, char *page) 4826 { 4827 return sprintf(page, "%d\n", mddev->degraded); 4828 } 4829 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 4830 4831 static ssize_t 4832 sync_force_parallel_show(struct mddev *mddev, char *page) 4833 { 4834 return sprintf(page, "%d\n", mddev->parallel_resync); 4835 } 4836 4837 static ssize_t 4838 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) 4839 { 4840 long n; 4841 4842 if (kstrtol(buf, 10, &n)) 4843 return -EINVAL; 4844 4845 if (n != 0 && n != 1) 4846 return -EINVAL; 4847 4848 mddev->parallel_resync = n; 4849 4850 if (mddev->sync_thread) 4851 wake_up(&resync_wait); 4852 4853 return len; 4854 } 4855 4856 /* force parallel resync, even with shared block devices */ 4857 static struct md_sysfs_entry md_sync_force_parallel = 4858 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 4859 sync_force_parallel_show, sync_force_parallel_store); 4860 4861 static ssize_t 4862 sync_speed_show(struct mddev *mddev, char *page) 4863 { 4864 unsigned long resync, dt, db; 4865 if (mddev->curr_resync == 0) 4866 return sprintf(page, "none\n"); 4867 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 4868 dt = (jiffies - mddev->resync_mark) / HZ; 4869 if (!dt) dt++; 4870 db = resync - mddev->resync_mark_cnt; 4871 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 4872 } 4873 4874 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 4875 4876 static ssize_t 4877 sync_completed_show(struct mddev *mddev, char *page) 4878 { 4879 unsigned long long max_sectors, resync; 4880 4881 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4882 return sprintf(page, "none\n"); 4883 4884 if (mddev->curr_resync == 1 || 4885 mddev->curr_resync == 2) 4886 return sprintf(page, "delayed\n"); 4887 4888 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 4889 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4890 max_sectors = mddev->resync_max_sectors; 4891 else 4892 max_sectors = mddev->dev_sectors; 4893 4894 resync = mddev->curr_resync_completed; 4895 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 4896 } 4897 4898 static struct md_sysfs_entry md_sync_completed = 4899 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL); 4900 4901 static ssize_t 4902 min_sync_show(struct mddev *mddev, char *page) 4903 { 4904 return sprintf(page, "%llu\n", 4905 (unsigned long long)mddev->resync_min); 4906 } 4907 static ssize_t 4908 min_sync_store(struct mddev *mddev, const char *buf, size_t len) 4909 { 4910 unsigned long long min; 4911 int err; 4912 4913 if (kstrtoull(buf, 10, &min)) 4914 return -EINVAL; 4915 4916 spin_lock(&mddev->lock); 4917 err = -EINVAL; 4918 if (min > mddev->resync_max) 4919 goto out_unlock; 4920 4921 err = -EBUSY; 4922 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4923 goto out_unlock; 4924 4925 /* Round down to multiple of 4K for safety */ 4926 mddev->resync_min = round_down(min, 8); 4927 err = 0; 4928 4929 out_unlock: 4930 spin_unlock(&mddev->lock); 4931 return err ?: len; 4932 } 4933 4934 static struct md_sysfs_entry md_min_sync = 4935 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 4936 4937 static ssize_t 4938 max_sync_show(struct mddev *mddev, char *page) 4939 { 4940 if (mddev->resync_max == MaxSector) 4941 return sprintf(page, "max\n"); 4942 else 4943 return sprintf(page, "%llu\n", 4944 (unsigned long long)mddev->resync_max); 4945 } 4946 static ssize_t 4947 max_sync_store(struct mddev *mddev, const char *buf, size_t len) 4948 { 4949 int err; 4950 spin_lock(&mddev->lock); 4951 if (strncmp(buf, "max", 3) == 0) 4952 mddev->resync_max = MaxSector; 4953 else { 4954 unsigned long long max; 4955 int chunk; 4956 4957 err = -EINVAL; 4958 if (kstrtoull(buf, 10, &max)) 4959 goto out_unlock; 4960 if (max < mddev->resync_min) 4961 goto out_unlock; 4962 4963 err = -EBUSY; 4964 if (max < mddev->resync_max && 4965 mddev->ro == 0 && 4966 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4967 goto out_unlock; 4968 4969 /* Must be a multiple of chunk_size */ 4970 chunk = mddev->chunk_sectors; 4971 if (chunk) { 4972 sector_t temp = max; 4973 4974 err = -EINVAL; 4975 if (sector_div(temp, chunk)) 4976 goto out_unlock; 4977 } 4978 mddev->resync_max = max; 4979 } 4980 wake_up(&mddev->recovery_wait); 4981 err = 0; 4982 out_unlock: 4983 spin_unlock(&mddev->lock); 4984 return err ?: len; 4985 } 4986 4987 static struct md_sysfs_entry md_max_sync = 4988 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 4989 4990 static ssize_t 4991 suspend_lo_show(struct mddev *mddev, char *page) 4992 { 4993 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 4994 } 4995 4996 static ssize_t 4997 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) 4998 { 4999 unsigned long long new; 5000 int err; 5001 5002 err = kstrtoull(buf, 10, &new); 5003 if (err < 0) 5004 return err; 5005 if (new != (sector_t)new) 5006 return -EINVAL; 5007 5008 err = mddev_lock(mddev); 5009 if (err) 5010 return err; 5011 err = -EINVAL; 5012 if (mddev->pers == NULL || 5013 mddev->pers->quiesce == NULL) 5014 goto unlock; 5015 mddev_suspend(mddev); 5016 mddev->suspend_lo = new; 5017 mddev_resume(mddev); 5018 5019 err = 0; 5020 unlock: 5021 mddev_unlock(mddev); 5022 return err ?: len; 5023 } 5024 static struct md_sysfs_entry md_suspend_lo = 5025 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 5026 5027 static ssize_t 5028 suspend_hi_show(struct mddev *mddev, char *page) 5029 { 5030 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 5031 } 5032 5033 static ssize_t 5034 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) 5035 { 5036 unsigned long long new; 5037 int err; 5038 5039 err = kstrtoull(buf, 10, &new); 5040 if (err < 0) 5041 return err; 5042 if (new != (sector_t)new) 5043 return -EINVAL; 5044 5045 err = mddev_lock(mddev); 5046 if (err) 5047 return err; 5048 err = -EINVAL; 5049 if (mddev->pers == NULL) 5050 goto unlock; 5051 5052 mddev_suspend(mddev); 5053 mddev->suspend_hi = new; 5054 mddev_resume(mddev); 5055 5056 err = 0; 5057 unlock: 5058 mddev_unlock(mddev); 5059 return err ?: len; 5060 } 5061 static struct md_sysfs_entry md_suspend_hi = 5062 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 5063 5064 static ssize_t 5065 reshape_position_show(struct mddev *mddev, char *page) 5066 { 5067 if (mddev->reshape_position != MaxSector) 5068 return sprintf(page, "%llu\n", 5069 (unsigned long long)mddev->reshape_position); 5070 strcpy(page, "none\n"); 5071 return 5; 5072 } 5073 5074 static ssize_t 5075 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) 5076 { 5077 struct md_rdev *rdev; 5078 unsigned long long new; 5079 int err; 5080 5081 err = kstrtoull(buf, 10, &new); 5082 if (err < 0) 5083 return err; 5084 if (new != (sector_t)new) 5085 return -EINVAL; 5086 err = mddev_lock(mddev); 5087 if (err) 5088 return err; 5089 err = -EBUSY; 5090 if (mddev->pers) 5091 goto unlock; 5092 mddev->reshape_position = new; 5093 mddev->delta_disks = 0; 5094 mddev->reshape_backwards = 0; 5095 mddev->new_level = mddev->level; 5096 mddev->new_layout = mddev->layout; 5097 mddev->new_chunk_sectors = mddev->chunk_sectors; 5098 rdev_for_each(rdev, mddev) 5099 rdev->new_data_offset = rdev->data_offset; 5100 err = 0; 5101 unlock: 5102 mddev_unlock(mddev); 5103 return err ?: len; 5104 } 5105 5106 static struct md_sysfs_entry md_reshape_position = 5107 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 5108 reshape_position_store); 5109 5110 static ssize_t 5111 reshape_direction_show(struct mddev *mddev, char *page) 5112 { 5113 return sprintf(page, "%s\n", 5114 mddev->reshape_backwards ? "backwards" : "forwards"); 5115 } 5116 5117 static ssize_t 5118 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) 5119 { 5120 int backwards = 0; 5121 int err; 5122 5123 if (cmd_match(buf, "forwards")) 5124 backwards = 0; 5125 else if (cmd_match(buf, "backwards")) 5126 backwards = 1; 5127 else 5128 return -EINVAL; 5129 if (mddev->reshape_backwards == backwards) 5130 return len; 5131 5132 err = mddev_lock(mddev); 5133 if (err) 5134 return err; 5135 /* check if we are allowed to change */ 5136 if (mddev->delta_disks) 5137 err = -EBUSY; 5138 else if (mddev->persistent && 5139 mddev->major_version == 0) 5140 err = -EINVAL; 5141 else 5142 mddev->reshape_backwards = backwards; 5143 mddev_unlock(mddev); 5144 return err ?: len; 5145 } 5146 5147 static struct md_sysfs_entry md_reshape_direction = 5148 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show, 5149 reshape_direction_store); 5150 5151 static ssize_t 5152 array_size_show(struct mddev *mddev, char *page) 5153 { 5154 if (mddev->external_size) 5155 return sprintf(page, "%llu\n", 5156 (unsigned long long)mddev->array_sectors/2); 5157 else 5158 return sprintf(page, "default\n"); 5159 } 5160 5161 static ssize_t 5162 array_size_store(struct mddev *mddev, const char *buf, size_t len) 5163 { 5164 sector_t sectors; 5165 int err; 5166 5167 err = mddev_lock(mddev); 5168 if (err) 5169 return err; 5170 5171 /* cluster raid doesn't support change array_sectors */ 5172 if (mddev_is_clustered(mddev)) { 5173 mddev_unlock(mddev); 5174 return -EINVAL; 5175 } 5176 5177 if (strncmp(buf, "default", 7) == 0) { 5178 if (mddev->pers) 5179 sectors = mddev->pers->size(mddev, 0, 0); 5180 else 5181 sectors = mddev->array_sectors; 5182 5183 mddev->external_size = 0; 5184 } else { 5185 if (strict_blocks_to_sectors(buf, §ors) < 0) 5186 err = -EINVAL; 5187 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 5188 err = -E2BIG; 5189 else 5190 mddev->external_size = 1; 5191 } 5192 5193 if (!err) { 5194 mddev->array_sectors = sectors; 5195 if (mddev->pers) { 5196 set_capacity(mddev->gendisk, mddev->array_sectors); 5197 revalidate_disk(mddev->gendisk); 5198 } 5199 } 5200 mddev_unlock(mddev); 5201 return err ?: len; 5202 } 5203 5204 static struct md_sysfs_entry md_array_size = 5205 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 5206 array_size_store); 5207 5208 static ssize_t 5209 consistency_policy_show(struct mddev *mddev, char *page) 5210 { 5211 int ret; 5212 5213 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 5214 ret = sprintf(page, "journal\n"); 5215 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) { 5216 ret = sprintf(page, "ppl\n"); 5217 } else if (mddev->bitmap) { 5218 ret = sprintf(page, "bitmap\n"); 5219 } else if (mddev->pers) { 5220 if (mddev->pers->sync_request) 5221 ret = sprintf(page, "resync\n"); 5222 else 5223 ret = sprintf(page, "none\n"); 5224 } else { 5225 ret = sprintf(page, "unknown\n"); 5226 } 5227 5228 return ret; 5229 } 5230 5231 static ssize_t 5232 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len) 5233 { 5234 int err = 0; 5235 5236 if (mddev->pers) { 5237 if (mddev->pers->change_consistency_policy) 5238 err = mddev->pers->change_consistency_policy(mddev, buf); 5239 else 5240 err = -EBUSY; 5241 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) { 5242 set_bit(MD_HAS_PPL, &mddev->flags); 5243 } else { 5244 err = -EINVAL; 5245 } 5246 5247 return err ? err : len; 5248 } 5249 5250 static struct md_sysfs_entry md_consistency_policy = 5251 __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show, 5252 consistency_policy_store); 5253 5254 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page) 5255 { 5256 return sprintf(page, "%d\n", mddev->fail_last_dev); 5257 } 5258 5259 /* 5260 * Setting fail_last_dev to true to allow last device to be forcibly removed 5261 * from RAID1/RAID10. 5262 */ 5263 static ssize_t 5264 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len) 5265 { 5266 int ret; 5267 bool value; 5268 5269 ret = kstrtobool(buf, &value); 5270 if (ret) 5271 return ret; 5272 5273 if (value != mddev->fail_last_dev) 5274 mddev->fail_last_dev = value; 5275 5276 return len; 5277 } 5278 static struct md_sysfs_entry md_fail_last_dev = 5279 __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show, 5280 fail_last_dev_store); 5281 5282 static struct attribute *md_default_attrs[] = { 5283 &md_level.attr, 5284 &md_layout.attr, 5285 &md_raid_disks.attr, 5286 &md_chunk_size.attr, 5287 &md_size.attr, 5288 &md_resync_start.attr, 5289 &md_metadata.attr, 5290 &md_new_device.attr, 5291 &md_safe_delay.attr, 5292 &md_array_state.attr, 5293 &md_reshape_position.attr, 5294 &md_reshape_direction.attr, 5295 &md_array_size.attr, 5296 &max_corr_read_errors.attr, 5297 &md_consistency_policy.attr, 5298 &md_fail_last_dev.attr, 5299 NULL, 5300 }; 5301 5302 static struct attribute *md_redundancy_attrs[] = { 5303 &md_scan_mode.attr, 5304 &md_last_scan_mode.attr, 5305 &md_mismatches.attr, 5306 &md_sync_min.attr, 5307 &md_sync_max.attr, 5308 &md_sync_speed.attr, 5309 &md_sync_force_parallel.attr, 5310 &md_sync_completed.attr, 5311 &md_min_sync.attr, 5312 &md_max_sync.attr, 5313 &md_suspend_lo.attr, 5314 &md_suspend_hi.attr, 5315 &md_bitmap.attr, 5316 &md_degraded.attr, 5317 NULL, 5318 }; 5319 static struct attribute_group md_redundancy_group = { 5320 .name = NULL, 5321 .attrs = md_redundancy_attrs, 5322 }; 5323 5324 static ssize_t 5325 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 5326 { 5327 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 5328 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 5329 ssize_t rv; 5330 5331 if (!entry->show) 5332 return -EIO; 5333 spin_lock(&all_mddevs_lock); 5334 if (list_empty(&mddev->all_mddevs)) { 5335 spin_unlock(&all_mddevs_lock); 5336 return -EBUSY; 5337 } 5338 mddev_get(mddev); 5339 spin_unlock(&all_mddevs_lock); 5340 5341 rv = entry->show(mddev, page); 5342 mddev_put(mddev); 5343 return rv; 5344 } 5345 5346 static ssize_t 5347 md_attr_store(struct kobject *kobj, struct attribute *attr, 5348 const char *page, size_t length) 5349 { 5350 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 5351 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 5352 ssize_t rv; 5353 5354 if (!entry->store) 5355 return -EIO; 5356 if (!capable(CAP_SYS_ADMIN)) 5357 return -EACCES; 5358 spin_lock(&all_mddevs_lock); 5359 if (list_empty(&mddev->all_mddevs)) { 5360 spin_unlock(&all_mddevs_lock); 5361 return -EBUSY; 5362 } 5363 mddev_get(mddev); 5364 spin_unlock(&all_mddevs_lock); 5365 rv = entry->store(mddev, page, length); 5366 mddev_put(mddev); 5367 return rv; 5368 } 5369 5370 static void md_free(struct kobject *ko) 5371 { 5372 struct mddev *mddev = container_of(ko, struct mddev, kobj); 5373 5374 if (mddev->sysfs_state) 5375 sysfs_put(mddev->sysfs_state); 5376 5377 if (mddev->gendisk) 5378 del_gendisk(mddev->gendisk); 5379 if (mddev->queue) 5380 blk_cleanup_queue(mddev->queue); 5381 if (mddev->gendisk) 5382 put_disk(mddev->gendisk); 5383 percpu_ref_exit(&mddev->writes_pending); 5384 5385 bioset_exit(&mddev->bio_set); 5386 bioset_exit(&mddev->sync_set); 5387 kfree(mddev); 5388 } 5389 5390 static const struct sysfs_ops md_sysfs_ops = { 5391 .show = md_attr_show, 5392 .store = md_attr_store, 5393 }; 5394 static struct kobj_type md_ktype = { 5395 .release = md_free, 5396 .sysfs_ops = &md_sysfs_ops, 5397 .default_attrs = md_default_attrs, 5398 }; 5399 5400 int mdp_major = 0; 5401 5402 static void mddev_delayed_delete(struct work_struct *ws) 5403 { 5404 struct mddev *mddev = container_of(ws, struct mddev, del_work); 5405 5406 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); 5407 kobject_del(&mddev->kobj); 5408 kobject_put(&mddev->kobj); 5409 } 5410 5411 static void no_op(struct percpu_ref *r) {} 5412 5413 int mddev_init_writes_pending(struct mddev *mddev) 5414 { 5415 if (mddev->writes_pending.percpu_count_ptr) 5416 return 0; 5417 if (percpu_ref_init(&mddev->writes_pending, no_op, 5418 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0) 5419 return -ENOMEM; 5420 /* We want to start with the refcount at zero */ 5421 percpu_ref_put(&mddev->writes_pending); 5422 return 0; 5423 } 5424 EXPORT_SYMBOL_GPL(mddev_init_writes_pending); 5425 5426 static int md_alloc(dev_t dev, char *name) 5427 { 5428 /* 5429 * If dev is zero, name is the name of a device to allocate with 5430 * an arbitrary minor number. It will be "md_???" 5431 * If dev is non-zero it must be a device number with a MAJOR of 5432 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then 5433 * the device is being created by opening a node in /dev. 5434 * If "name" is not NULL, the device is being created by 5435 * writing to /sys/module/md_mod/parameters/new_array. 5436 */ 5437 static DEFINE_MUTEX(disks_mutex); 5438 struct mddev *mddev = mddev_find(dev); 5439 struct gendisk *disk; 5440 int partitioned; 5441 int shift; 5442 int unit; 5443 int error; 5444 5445 if (!mddev) 5446 return -ENODEV; 5447 5448 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 5449 shift = partitioned ? MdpMinorShift : 0; 5450 unit = MINOR(mddev->unit) >> shift; 5451 5452 /* wait for any previous instance of this device to be 5453 * completely removed (mddev_delayed_delete). 5454 */ 5455 flush_workqueue(md_misc_wq); 5456 5457 mutex_lock(&disks_mutex); 5458 error = -EEXIST; 5459 if (mddev->gendisk) 5460 goto abort; 5461 5462 if (name && !dev) { 5463 /* Need to ensure that 'name' is not a duplicate. 5464 */ 5465 struct mddev *mddev2; 5466 spin_lock(&all_mddevs_lock); 5467 5468 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 5469 if (mddev2->gendisk && 5470 strcmp(mddev2->gendisk->disk_name, name) == 0) { 5471 spin_unlock(&all_mddevs_lock); 5472 goto abort; 5473 } 5474 spin_unlock(&all_mddevs_lock); 5475 } 5476 if (name && dev) 5477 /* 5478 * Creating /dev/mdNNN via "newarray", so adjust hold_active. 5479 */ 5480 mddev->hold_active = UNTIL_STOP; 5481 5482 error = -ENOMEM; 5483 mddev->queue = blk_alloc_queue(GFP_KERNEL); 5484 if (!mddev->queue) 5485 goto abort; 5486 mddev->queue->queuedata = mddev; 5487 5488 blk_queue_make_request(mddev->queue, md_make_request); 5489 blk_set_stacking_limits(&mddev->queue->limits); 5490 5491 disk = alloc_disk(1 << shift); 5492 if (!disk) { 5493 blk_cleanup_queue(mddev->queue); 5494 mddev->queue = NULL; 5495 goto abort; 5496 } 5497 disk->major = MAJOR(mddev->unit); 5498 disk->first_minor = unit << shift; 5499 if (name) 5500 strcpy(disk->disk_name, name); 5501 else if (partitioned) 5502 sprintf(disk->disk_name, "md_d%d", unit); 5503 else 5504 sprintf(disk->disk_name, "md%d", unit); 5505 disk->fops = &md_fops; 5506 disk->private_data = mddev; 5507 disk->queue = mddev->queue; 5508 blk_queue_write_cache(mddev->queue, true, true); 5509 /* Allow extended partitions. This makes the 5510 * 'mdp' device redundant, but we can't really 5511 * remove it now. 5512 */ 5513 disk->flags |= GENHD_FL_EXT_DEVT; 5514 mddev->gendisk = disk; 5515 /* As soon as we call add_disk(), another thread could get 5516 * through to md_open, so make sure it doesn't get too far 5517 */ 5518 mutex_lock(&mddev->open_mutex); 5519 add_disk(disk); 5520 5521 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); 5522 if (error) { 5523 /* This isn't possible, but as kobject_init_and_add is marked 5524 * __must_check, we must do something with the result 5525 */ 5526 pr_debug("md: cannot register %s/md - name in use\n", 5527 disk->disk_name); 5528 error = 0; 5529 } 5530 if (mddev->kobj.sd && 5531 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) 5532 pr_debug("pointless warning\n"); 5533 mutex_unlock(&mddev->open_mutex); 5534 abort: 5535 mutex_unlock(&disks_mutex); 5536 if (!error && mddev->kobj.sd) { 5537 kobject_uevent(&mddev->kobj, KOBJ_ADD); 5538 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 5539 } 5540 mddev_put(mddev); 5541 return error; 5542 } 5543 5544 static struct kobject *md_probe(dev_t dev, int *part, void *data) 5545 { 5546 if (create_on_open) 5547 md_alloc(dev, NULL); 5548 return NULL; 5549 } 5550 5551 static int add_named_array(const char *val, const struct kernel_param *kp) 5552 { 5553 /* 5554 * val must be "md_*" or "mdNNN". 5555 * For "md_*" we allocate an array with a large free minor number, and 5556 * set the name to val. val must not already be an active name. 5557 * For "mdNNN" we allocate an array with the minor number NNN 5558 * which must not already be in use. 5559 */ 5560 int len = strlen(val); 5561 char buf[DISK_NAME_LEN]; 5562 unsigned long devnum; 5563 5564 while (len && val[len-1] == '\n') 5565 len--; 5566 if (len >= DISK_NAME_LEN) 5567 return -E2BIG; 5568 strlcpy(buf, val, len+1); 5569 if (strncmp(buf, "md_", 3) == 0) 5570 return md_alloc(0, buf); 5571 if (strncmp(buf, "md", 2) == 0 && 5572 isdigit(buf[2]) && 5573 kstrtoul(buf+2, 10, &devnum) == 0 && 5574 devnum <= MINORMASK) 5575 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL); 5576 5577 return -EINVAL; 5578 } 5579 5580 static void md_safemode_timeout(struct timer_list *t) 5581 { 5582 struct mddev *mddev = from_timer(mddev, t, safemode_timer); 5583 5584 mddev->safemode = 1; 5585 if (mddev->external) 5586 sysfs_notify_dirent_safe(mddev->sysfs_state); 5587 5588 md_wakeup_thread(mddev->thread); 5589 } 5590 5591 static int start_dirty_degraded; 5592 5593 int md_run(struct mddev *mddev) 5594 { 5595 int err; 5596 struct md_rdev *rdev; 5597 struct md_personality *pers; 5598 5599 if (list_empty(&mddev->disks)) 5600 /* cannot run an array with no devices.. */ 5601 return -EINVAL; 5602 5603 if (mddev->pers) 5604 return -EBUSY; 5605 /* Cannot run until previous stop completes properly */ 5606 if (mddev->sysfs_active) 5607 return -EBUSY; 5608 5609 /* 5610 * Analyze all RAID superblock(s) 5611 */ 5612 if (!mddev->raid_disks) { 5613 if (!mddev->persistent) 5614 return -EINVAL; 5615 err = analyze_sbs(mddev); 5616 if (err) 5617 return -EINVAL; 5618 } 5619 5620 if (mddev->level != LEVEL_NONE) 5621 request_module("md-level-%d", mddev->level); 5622 else if (mddev->clevel[0]) 5623 request_module("md-%s", mddev->clevel); 5624 5625 /* 5626 * Drop all container device buffers, from now on 5627 * the only valid external interface is through the md 5628 * device. 5629 */ 5630 mddev->has_superblocks = false; 5631 rdev_for_each(rdev, mddev) { 5632 if (test_bit(Faulty, &rdev->flags)) 5633 continue; 5634 sync_blockdev(rdev->bdev); 5635 invalidate_bdev(rdev->bdev); 5636 if (mddev->ro != 1 && 5637 (bdev_read_only(rdev->bdev) || 5638 bdev_read_only(rdev->meta_bdev))) { 5639 mddev->ro = 1; 5640 if (mddev->gendisk) 5641 set_disk_ro(mddev->gendisk, 1); 5642 } 5643 5644 if (rdev->sb_page) 5645 mddev->has_superblocks = true; 5646 5647 /* perform some consistency tests on the device. 5648 * We don't want the data to overlap the metadata, 5649 * Internal Bitmap issues have been handled elsewhere. 5650 */ 5651 if (rdev->meta_bdev) { 5652 /* Nothing to check */; 5653 } else if (rdev->data_offset < rdev->sb_start) { 5654 if (mddev->dev_sectors && 5655 rdev->data_offset + mddev->dev_sectors 5656 > rdev->sb_start) { 5657 pr_warn("md: %s: data overlaps metadata\n", 5658 mdname(mddev)); 5659 return -EINVAL; 5660 } 5661 } else { 5662 if (rdev->sb_start + rdev->sb_size/512 5663 > rdev->data_offset) { 5664 pr_warn("md: %s: metadata overlaps data\n", 5665 mdname(mddev)); 5666 return -EINVAL; 5667 } 5668 } 5669 sysfs_notify_dirent_safe(rdev->sysfs_state); 5670 } 5671 5672 if (!bioset_initialized(&mddev->bio_set)) { 5673 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 5674 if (err) 5675 return err; 5676 } 5677 if (!bioset_initialized(&mddev->sync_set)) { 5678 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 5679 if (err) 5680 return err; 5681 } 5682 5683 spin_lock(&pers_lock); 5684 pers = find_pers(mddev->level, mddev->clevel); 5685 if (!pers || !try_module_get(pers->owner)) { 5686 spin_unlock(&pers_lock); 5687 if (mddev->level != LEVEL_NONE) 5688 pr_warn("md: personality for level %d is not loaded!\n", 5689 mddev->level); 5690 else 5691 pr_warn("md: personality for level %s is not loaded!\n", 5692 mddev->clevel); 5693 err = -EINVAL; 5694 goto abort; 5695 } 5696 spin_unlock(&pers_lock); 5697 if (mddev->level != pers->level) { 5698 mddev->level = pers->level; 5699 mddev->new_level = pers->level; 5700 } 5701 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 5702 5703 if (mddev->reshape_position != MaxSector && 5704 pers->start_reshape == NULL) { 5705 /* This personality cannot handle reshaping... */ 5706 module_put(pers->owner); 5707 err = -EINVAL; 5708 goto abort; 5709 } 5710 5711 if (pers->sync_request) { 5712 /* Warn if this is a potentially silly 5713 * configuration. 5714 */ 5715 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5716 struct md_rdev *rdev2; 5717 int warned = 0; 5718 5719 rdev_for_each(rdev, mddev) 5720 rdev_for_each(rdev2, mddev) { 5721 if (rdev < rdev2 && 5722 rdev->bdev->bd_contains == 5723 rdev2->bdev->bd_contains) { 5724 pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n", 5725 mdname(mddev), 5726 bdevname(rdev->bdev,b), 5727 bdevname(rdev2->bdev,b2)); 5728 warned = 1; 5729 } 5730 } 5731 5732 if (warned) 5733 pr_warn("True protection against single-disk failure might be compromised.\n"); 5734 } 5735 5736 mddev->recovery = 0; 5737 /* may be over-ridden by personality */ 5738 mddev->resync_max_sectors = mddev->dev_sectors; 5739 5740 mddev->ok_start_degraded = start_dirty_degraded; 5741 5742 if (start_readonly && mddev->ro == 0) 5743 mddev->ro = 2; /* read-only, but switch on first write */ 5744 5745 err = pers->run(mddev); 5746 if (err) 5747 pr_warn("md: pers->run() failed ...\n"); 5748 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { 5749 WARN_ONCE(!mddev->external_size, 5750 "%s: default size too small, but 'external_size' not in effect?\n", 5751 __func__); 5752 pr_warn("md: invalid array_size %llu > default size %llu\n", 5753 (unsigned long long)mddev->array_sectors / 2, 5754 (unsigned long long)pers->size(mddev, 0, 0) / 2); 5755 err = -EINVAL; 5756 } 5757 if (err == 0 && pers->sync_request && 5758 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { 5759 struct bitmap *bitmap; 5760 5761 bitmap = md_bitmap_create(mddev, -1); 5762 if (IS_ERR(bitmap)) { 5763 err = PTR_ERR(bitmap); 5764 pr_warn("%s: failed to create bitmap (%d)\n", 5765 mdname(mddev), err); 5766 } else 5767 mddev->bitmap = bitmap; 5768 5769 } 5770 if (err) 5771 goto bitmap_abort; 5772 5773 if (mddev->bitmap_info.max_write_behind > 0) { 5774 bool creat_pool = false; 5775 5776 rdev_for_each(rdev, mddev) { 5777 if (test_bit(WriteMostly, &rdev->flags) && 5778 rdev_init_serial(rdev)) 5779 creat_pool = true; 5780 } 5781 if (creat_pool && mddev->serial_info_pool == NULL) { 5782 mddev->serial_info_pool = 5783 mempool_create_kmalloc_pool(NR_SERIAL_INFOS, 5784 sizeof(struct serial_info)); 5785 if (!mddev->serial_info_pool) { 5786 err = -ENOMEM; 5787 goto bitmap_abort; 5788 } 5789 } 5790 } 5791 5792 if (mddev->queue) { 5793 bool nonrot = true; 5794 5795 rdev_for_each(rdev, mddev) { 5796 if (rdev->raid_disk >= 0 && 5797 !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) { 5798 nonrot = false; 5799 break; 5800 } 5801 } 5802 if (mddev->degraded) 5803 nonrot = false; 5804 if (nonrot) 5805 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); 5806 else 5807 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); 5808 mddev->queue->backing_dev_info->congested_data = mddev; 5809 mddev->queue->backing_dev_info->congested_fn = md_congested; 5810 } 5811 if (pers->sync_request) { 5812 if (mddev->kobj.sd && 5813 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 5814 pr_warn("md: cannot register extra attributes for %s\n", 5815 mdname(mddev)); 5816 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 5817 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 5818 mddev->ro = 0; 5819 5820 atomic_set(&mddev->max_corr_read_errors, 5821 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 5822 mddev->safemode = 0; 5823 if (mddev_is_clustered(mddev)) 5824 mddev->safemode_delay = 0; 5825 else 5826 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 5827 mddev->in_sync = 1; 5828 smp_wmb(); 5829 spin_lock(&mddev->lock); 5830 mddev->pers = pers; 5831 spin_unlock(&mddev->lock); 5832 rdev_for_each(rdev, mddev) 5833 if (rdev->raid_disk >= 0) 5834 sysfs_link_rdev(mddev, rdev); /* failure here is OK */ 5835 5836 if (mddev->degraded && !mddev->ro) 5837 /* This ensures that recovering status is reported immediately 5838 * via sysfs - until a lack of spares is confirmed. 5839 */ 5840 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5841 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5842 5843 if (mddev->sb_flags) 5844 md_update_sb(mddev, 0); 5845 5846 md_new_event(mddev); 5847 return 0; 5848 5849 bitmap_abort: 5850 mddev_detach(mddev); 5851 if (mddev->private) 5852 pers->free(mddev, mddev->private); 5853 mddev->private = NULL; 5854 module_put(pers->owner); 5855 md_bitmap_destroy(mddev); 5856 abort: 5857 bioset_exit(&mddev->bio_set); 5858 bioset_exit(&mddev->sync_set); 5859 return err; 5860 } 5861 EXPORT_SYMBOL_GPL(md_run); 5862 5863 static int do_md_run(struct mddev *mddev) 5864 { 5865 int err; 5866 5867 set_bit(MD_NOT_READY, &mddev->flags); 5868 err = md_run(mddev); 5869 if (err) 5870 goto out; 5871 err = md_bitmap_load(mddev); 5872 if (err) { 5873 md_bitmap_destroy(mddev); 5874 goto out; 5875 } 5876 5877 if (mddev_is_clustered(mddev)) 5878 md_allow_write(mddev); 5879 5880 /* run start up tasks that require md_thread */ 5881 md_start(mddev); 5882 5883 md_wakeup_thread(mddev->thread); 5884 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 5885 5886 set_capacity(mddev->gendisk, mddev->array_sectors); 5887 revalidate_disk(mddev->gendisk); 5888 clear_bit(MD_NOT_READY, &mddev->flags); 5889 mddev->changed = 1; 5890 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 5891 sysfs_notify_dirent_safe(mddev->sysfs_state); 5892 sysfs_notify_dirent_safe(mddev->sysfs_action); 5893 sysfs_notify(&mddev->kobj, NULL, "degraded"); 5894 out: 5895 clear_bit(MD_NOT_READY, &mddev->flags); 5896 return err; 5897 } 5898 5899 int md_start(struct mddev *mddev) 5900 { 5901 int ret = 0; 5902 5903 if (mddev->pers->start) { 5904 set_bit(MD_RECOVERY_WAIT, &mddev->recovery); 5905 md_wakeup_thread(mddev->thread); 5906 ret = mddev->pers->start(mddev); 5907 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); 5908 md_wakeup_thread(mddev->sync_thread); 5909 } 5910 return ret; 5911 } 5912 EXPORT_SYMBOL_GPL(md_start); 5913 5914 static int restart_array(struct mddev *mddev) 5915 { 5916 struct gendisk *disk = mddev->gendisk; 5917 struct md_rdev *rdev; 5918 bool has_journal = false; 5919 bool has_readonly = false; 5920 5921 /* Complain if it has no devices */ 5922 if (list_empty(&mddev->disks)) 5923 return -ENXIO; 5924 if (!mddev->pers) 5925 return -EINVAL; 5926 if (!mddev->ro) 5927 return -EBUSY; 5928 5929 rcu_read_lock(); 5930 rdev_for_each_rcu(rdev, mddev) { 5931 if (test_bit(Journal, &rdev->flags) && 5932 !test_bit(Faulty, &rdev->flags)) 5933 has_journal = true; 5934 if (bdev_read_only(rdev->bdev)) 5935 has_readonly = true; 5936 } 5937 rcu_read_unlock(); 5938 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal) 5939 /* Don't restart rw with journal missing/faulty */ 5940 return -EINVAL; 5941 if (has_readonly) 5942 return -EROFS; 5943 5944 mddev->safemode = 0; 5945 mddev->ro = 0; 5946 set_disk_ro(disk, 0); 5947 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); 5948 /* Kick recovery or resync if necessary */ 5949 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5950 md_wakeup_thread(mddev->thread); 5951 md_wakeup_thread(mddev->sync_thread); 5952 sysfs_notify_dirent_safe(mddev->sysfs_state); 5953 return 0; 5954 } 5955 5956 static void md_clean(struct mddev *mddev) 5957 { 5958 mddev->array_sectors = 0; 5959 mddev->external_size = 0; 5960 mddev->dev_sectors = 0; 5961 mddev->raid_disks = 0; 5962 mddev->recovery_cp = 0; 5963 mddev->resync_min = 0; 5964 mddev->resync_max = MaxSector; 5965 mddev->reshape_position = MaxSector; 5966 mddev->external = 0; 5967 mddev->persistent = 0; 5968 mddev->level = LEVEL_NONE; 5969 mddev->clevel[0] = 0; 5970 mddev->flags = 0; 5971 mddev->sb_flags = 0; 5972 mddev->ro = 0; 5973 mddev->metadata_type[0] = 0; 5974 mddev->chunk_sectors = 0; 5975 mddev->ctime = mddev->utime = 0; 5976 mddev->layout = 0; 5977 mddev->max_disks = 0; 5978 mddev->events = 0; 5979 mddev->can_decrease_events = 0; 5980 mddev->delta_disks = 0; 5981 mddev->reshape_backwards = 0; 5982 mddev->new_level = LEVEL_NONE; 5983 mddev->new_layout = 0; 5984 mddev->new_chunk_sectors = 0; 5985 mddev->curr_resync = 0; 5986 atomic64_set(&mddev->resync_mismatches, 0); 5987 mddev->suspend_lo = mddev->suspend_hi = 0; 5988 mddev->sync_speed_min = mddev->sync_speed_max = 0; 5989 mddev->recovery = 0; 5990 mddev->in_sync = 0; 5991 mddev->changed = 0; 5992 mddev->degraded = 0; 5993 mddev->safemode = 0; 5994 mddev->private = NULL; 5995 mddev->cluster_info = NULL; 5996 mddev->bitmap_info.offset = 0; 5997 mddev->bitmap_info.default_offset = 0; 5998 mddev->bitmap_info.default_space = 0; 5999 mddev->bitmap_info.chunksize = 0; 6000 mddev->bitmap_info.daemon_sleep = 0; 6001 mddev->bitmap_info.max_write_behind = 0; 6002 mddev->bitmap_info.nodes = 0; 6003 } 6004 6005 static void __md_stop_writes(struct mddev *mddev) 6006 { 6007 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6008 flush_workqueue(md_misc_wq); 6009 if (mddev->sync_thread) { 6010 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6011 md_reap_sync_thread(mddev); 6012 } 6013 6014 del_timer_sync(&mddev->safemode_timer); 6015 6016 if (mddev->pers && mddev->pers->quiesce) { 6017 mddev->pers->quiesce(mddev, 1); 6018 mddev->pers->quiesce(mddev, 0); 6019 } 6020 md_bitmap_flush(mddev); 6021 6022 if (mddev->ro == 0 && 6023 ((!mddev->in_sync && !mddev_is_clustered(mddev)) || 6024 mddev->sb_flags)) { 6025 /* mark array as shutdown cleanly */ 6026 if (!mddev_is_clustered(mddev)) 6027 mddev->in_sync = 1; 6028 md_update_sb(mddev, 1); 6029 } 6030 mempool_destroy(mddev->serial_info_pool); 6031 mddev->serial_info_pool = NULL; 6032 } 6033 6034 void md_stop_writes(struct mddev *mddev) 6035 { 6036 mddev_lock_nointr(mddev); 6037 __md_stop_writes(mddev); 6038 mddev_unlock(mddev); 6039 } 6040 EXPORT_SYMBOL_GPL(md_stop_writes); 6041 6042 static void mddev_detach(struct mddev *mddev) 6043 { 6044 md_bitmap_wait_behind_writes(mddev); 6045 if (mddev->pers && mddev->pers->quiesce) { 6046 mddev->pers->quiesce(mddev, 1); 6047 mddev->pers->quiesce(mddev, 0); 6048 } 6049 md_unregister_thread(&mddev->thread); 6050 if (mddev->queue) 6051 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 6052 } 6053 6054 static void __md_stop(struct mddev *mddev) 6055 { 6056 struct md_personality *pers = mddev->pers; 6057 md_bitmap_destroy(mddev); 6058 mddev_detach(mddev); 6059 /* Ensure ->event_work is done */ 6060 flush_workqueue(md_misc_wq); 6061 spin_lock(&mddev->lock); 6062 mddev->pers = NULL; 6063 spin_unlock(&mddev->lock); 6064 pers->free(mddev, mddev->private); 6065 mddev->private = NULL; 6066 if (pers->sync_request && mddev->to_remove == NULL) 6067 mddev->to_remove = &md_redundancy_group; 6068 module_put(pers->owner); 6069 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6070 } 6071 6072 void md_stop(struct mddev *mddev) 6073 { 6074 /* stop the array and free an attached data structures. 6075 * This is called from dm-raid 6076 */ 6077 __md_stop(mddev); 6078 bioset_exit(&mddev->bio_set); 6079 bioset_exit(&mddev->sync_set); 6080 } 6081 6082 EXPORT_SYMBOL_GPL(md_stop); 6083 6084 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) 6085 { 6086 int err = 0; 6087 int did_freeze = 0; 6088 6089 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 6090 did_freeze = 1; 6091 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6092 md_wakeup_thread(mddev->thread); 6093 } 6094 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 6095 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6096 if (mddev->sync_thread) 6097 /* Thread might be blocked waiting for metadata update 6098 * which will now never happen */ 6099 wake_up_process(mddev->sync_thread->tsk); 6100 6101 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 6102 return -EBUSY; 6103 mddev_unlock(mddev); 6104 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, 6105 &mddev->recovery)); 6106 wait_event(mddev->sb_wait, 6107 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 6108 mddev_lock_nointr(mddev); 6109 6110 mutex_lock(&mddev->open_mutex); 6111 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 6112 mddev->sync_thread || 6113 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 6114 pr_warn("md: %s still in use.\n",mdname(mddev)); 6115 if (did_freeze) { 6116 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6117 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6118 md_wakeup_thread(mddev->thread); 6119 } 6120 err = -EBUSY; 6121 goto out; 6122 } 6123 if (mddev->pers) { 6124 __md_stop_writes(mddev); 6125 6126 err = -ENXIO; 6127 if (mddev->ro==1) 6128 goto out; 6129 mddev->ro = 1; 6130 set_disk_ro(mddev->gendisk, 1); 6131 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6132 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6133 md_wakeup_thread(mddev->thread); 6134 sysfs_notify_dirent_safe(mddev->sysfs_state); 6135 err = 0; 6136 } 6137 out: 6138 mutex_unlock(&mddev->open_mutex); 6139 return err; 6140 } 6141 6142 /* mode: 6143 * 0 - completely stop and dis-assemble array 6144 * 2 - stop but do not disassemble array 6145 */ 6146 static int do_md_stop(struct mddev *mddev, int mode, 6147 struct block_device *bdev) 6148 { 6149 struct gendisk *disk = mddev->gendisk; 6150 struct md_rdev *rdev; 6151 int did_freeze = 0; 6152 6153 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 6154 did_freeze = 1; 6155 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6156 md_wakeup_thread(mddev->thread); 6157 } 6158 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 6159 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6160 if (mddev->sync_thread) 6161 /* Thread might be blocked waiting for metadata update 6162 * which will now never happen */ 6163 wake_up_process(mddev->sync_thread->tsk); 6164 6165 mddev_unlock(mddev); 6166 wait_event(resync_wait, (mddev->sync_thread == NULL && 6167 !test_bit(MD_RECOVERY_RUNNING, 6168 &mddev->recovery))); 6169 mddev_lock_nointr(mddev); 6170 6171 mutex_lock(&mddev->open_mutex); 6172 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 6173 mddev->sysfs_active || 6174 mddev->sync_thread || 6175 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 6176 pr_warn("md: %s still in use.\n",mdname(mddev)); 6177 mutex_unlock(&mddev->open_mutex); 6178 if (did_freeze) { 6179 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6180 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6181 md_wakeup_thread(mddev->thread); 6182 } 6183 return -EBUSY; 6184 } 6185 if (mddev->pers) { 6186 if (mddev->ro) 6187 set_disk_ro(disk, 0); 6188 6189 __md_stop_writes(mddev); 6190 __md_stop(mddev); 6191 mddev->queue->backing_dev_info->congested_fn = NULL; 6192 6193 /* tell userspace to handle 'inactive' */ 6194 sysfs_notify_dirent_safe(mddev->sysfs_state); 6195 6196 rdev_for_each(rdev, mddev) 6197 if (rdev->raid_disk >= 0) 6198 sysfs_unlink_rdev(mddev, rdev); 6199 6200 set_capacity(disk, 0); 6201 mutex_unlock(&mddev->open_mutex); 6202 mddev->changed = 1; 6203 revalidate_disk(disk); 6204 6205 if (mddev->ro) 6206 mddev->ro = 0; 6207 } else 6208 mutex_unlock(&mddev->open_mutex); 6209 /* 6210 * Free resources if final stop 6211 */ 6212 if (mode == 0) { 6213 pr_info("md: %s stopped.\n", mdname(mddev)); 6214 6215 if (mddev->bitmap_info.file) { 6216 struct file *f = mddev->bitmap_info.file; 6217 spin_lock(&mddev->lock); 6218 mddev->bitmap_info.file = NULL; 6219 spin_unlock(&mddev->lock); 6220 fput(f); 6221 } 6222 mddev->bitmap_info.offset = 0; 6223 6224 export_array(mddev); 6225 6226 md_clean(mddev); 6227 if (mddev->hold_active == UNTIL_STOP) 6228 mddev->hold_active = 0; 6229 } 6230 md_new_event(mddev); 6231 sysfs_notify_dirent_safe(mddev->sysfs_state); 6232 return 0; 6233 } 6234 6235 #ifndef MODULE 6236 static void autorun_array(struct mddev *mddev) 6237 { 6238 struct md_rdev *rdev; 6239 int err; 6240 6241 if (list_empty(&mddev->disks)) 6242 return; 6243 6244 pr_info("md: running: "); 6245 6246 rdev_for_each(rdev, mddev) { 6247 char b[BDEVNAME_SIZE]; 6248 pr_cont("<%s>", bdevname(rdev->bdev,b)); 6249 } 6250 pr_cont("\n"); 6251 6252 err = do_md_run(mddev); 6253 if (err) { 6254 pr_warn("md: do_md_run() returned %d\n", err); 6255 do_md_stop(mddev, 0, NULL); 6256 } 6257 } 6258 6259 /* 6260 * lets try to run arrays based on all disks that have arrived 6261 * until now. (those are in pending_raid_disks) 6262 * 6263 * the method: pick the first pending disk, collect all disks with 6264 * the same UUID, remove all from the pending list and put them into 6265 * the 'same_array' list. Then order this list based on superblock 6266 * update time (freshest comes first), kick out 'old' disks and 6267 * compare superblocks. If everything's fine then run it. 6268 * 6269 * If "unit" is allocated, then bump its reference count 6270 */ 6271 static void autorun_devices(int part) 6272 { 6273 struct md_rdev *rdev0, *rdev, *tmp; 6274 struct mddev *mddev; 6275 char b[BDEVNAME_SIZE]; 6276 6277 pr_info("md: autorun ...\n"); 6278 while (!list_empty(&pending_raid_disks)) { 6279 int unit; 6280 dev_t dev; 6281 LIST_HEAD(candidates); 6282 rdev0 = list_entry(pending_raid_disks.next, 6283 struct md_rdev, same_set); 6284 6285 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b)); 6286 INIT_LIST_HEAD(&candidates); 6287 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 6288 if (super_90_load(rdev, rdev0, 0) >= 0) { 6289 pr_debug("md: adding %s ...\n", 6290 bdevname(rdev->bdev,b)); 6291 list_move(&rdev->same_set, &candidates); 6292 } 6293 /* 6294 * now we have a set of devices, with all of them having 6295 * mostly sane superblocks. It's time to allocate the 6296 * mddev. 6297 */ 6298 if (part) { 6299 dev = MKDEV(mdp_major, 6300 rdev0->preferred_minor << MdpMinorShift); 6301 unit = MINOR(dev) >> MdpMinorShift; 6302 } else { 6303 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 6304 unit = MINOR(dev); 6305 } 6306 if (rdev0->preferred_minor != unit) { 6307 pr_warn("md: unit number in %s is bad: %d\n", 6308 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 6309 break; 6310 } 6311 6312 md_probe(dev, NULL, NULL); 6313 mddev = mddev_find(dev); 6314 if (!mddev || !mddev->gendisk) { 6315 if (mddev) 6316 mddev_put(mddev); 6317 break; 6318 } 6319 if (mddev_lock(mddev)) 6320 pr_warn("md: %s locked, cannot run\n", mdname(mddev)); 6321 else if (mddev->raid_disks || mddev->major_version 6322 || !list_empty(&mddev->disks)) { 6323 pr_warn("md: %s already running, cannot run %s\n", 6324 mdname(mddev), bdevname(rdev0->bdev,b)); 6325 mddev_unlock(mddev); 6326 } else { 6327 pr_debug("md: created %s\n", mdname(mddev)); 6328 mddev->persistent = 1; 6329 rdev_for_each_list(rdev, tmp, &candidates) { 6330 list_del_init(&rdev->same_set); 6331 if (bind_rdev_to_array(rdev, mddev)) 6332 export_rdev(rdev); 6333 } 6334 autorun_array(mddev); 6335 mddev_unlock(mddev); 6336 } 6337 /* on success, candidates will be empty, on error 6338 * it won't... 6339 */ 6340 rdev_for_each_list(rdev, tmp, &candidates) { 6341 list_del_init(&rdev->same_set); 6342 export_rdev(rdev); 6343 } 6344 mddev_put(mddev); 6345 } 6346 pr_info("md: ... autorun DONE.\n"); 6347 } 6348 #endif /* !MODULE */ 6349 6350 static int get_version(void __user *arg) 6351 { 6352 mdu_version_t ver; 6353 6354 ver.major = MD_MAJOR_VERSION; 6355 ver.minor = MD_MINOR_VERSION; 6356 ver.patchlevel = MD_PATCHLEVEL_VERSION; 6357 6358 if (copy_to_user(arg, &ver, sizeof(ver))) 6359 return -EFAULT; 6360 6361 return 0; 6362 } 6363 6364 static int get_array_info(struct mddev *mddev, void __user *arg) 6365 { 6366 mdu_array_info_t info; 6367 int nr,working,insync,failed,spare; 6368 struct md_rdev *rdev; 6369 6370 nr = working = insync = failed = spare = 0; 6371 rcu_read_lock(); 6372 rdev_for_each_rcu(rdev, mddev) { 6373 nr++; 6374 if (test_bit(Faulty, &rdev->flags)) 6375 failed++; 6376 else { 6377 working++; 6378 if (test_bit(In_sync, &rdev->flags)) 6379 insync++; 6380 else if (test_bit(Journal, &rdev->flags)) 6381 /* TODO: add journal count to md_u.h */ 6382 ; 6383 else 6384 spare++; 6385 } 6386 } 6387 rcu_read_unlock(); 6388 6389 info.major_version = mddev->major_version; 6390 info.minor_version = mddev->minor_version; 6391 info.patch_version = MD_PATCHLEVEL_VERSION; 6392 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); 6393 info.level = mddev->level; 6394 info.size = mddev->dev_sectors / 2; 6395 if (info.size != mddev->dev_sectors / 2) /* overflow */ 6396 info.size = -1; 6397 info.nr_disks = nr; 6398 info.raid_disks = mddev->raid_disks; 6399 info.md_minor = mddev->md_minor; 6400 info.not_persistent= !mddev->persistent; 6401 6402 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); 6403 info.state = 0; 6404 if (mddev->in_sync) 6405 info.state = (1<<MD_SB_CLEAN); 6406 if (mddev->bitmap && mddev->bitmap_info.offset) 6407 info.state |= (1<<MD_SB_BITMAP_PRESENT); 6408 if (mddev_is_clustered(mddev)) 6409 info.state |= (1<<MD_SB_CLUSTERED); 6410 info.active_disks = insync; 6411 info.working_disks = working; 6412 info.failed_disks = failed; 6413 info.spare_disks = spare; 6414 6415 info.layout = mddev->layout; 6416 info.chunk_size = mddev->chunk_sectors << 9; 6417 6418 if (copy_to_user(arg, &info, sizeof(info))) 6419 return -EFAULT; 6420 6421 return 0; 6422 } 6423 6424 static int get_bitmap_file(struct mddev *mddev, void __user * arg) 6425 { 6426 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 6427 char *ptr; 6428 int err; 6429 6430 file = kzalloc(sizeof(*file), GFP_NOIO); 6431 if (!file) 6432 return -ENOMEM; 6433 6434 err = 0; 6435 spin_lock(&mddev->lock); 6436 /* bitmap enabled */ 6437 if (mddev->bitmap_info.file) { 6438 ptr = file_path(mddev->bitmap_info.file, file->pathname, 6439 sizeof(file->pathname)); 6440 if (IS_ERR(ptr)) 6441 err = PTR_ERR(ptr); 6442 else 6443 memmove(file->pathname, ptr, 6444 sizeof(file->pathname)-(ptr-file->pathname)); 6445 } 6446 spin_unlock(&mddev->lock); 6447 6448 if (err == 0 && 6449 copy_to_user(arg, file, sizeof(*file))) 6450 err = -EFAULT; 6451 6452 kfree(file); 6453 return err; 6454 } 6455 6456 static int get_disk_info(struct mddev *mddev, void __user * arg) 6457 { 6458 mdu_disk_info_t info; 6459 struct md_rdev *rdev; 6460 6461 if (copy_from_user(&info, arg, sizeof(info))) 6462 return -EFAULT; 6463 6464 rcu_read_lock(); 6465 rdev = md_find_rdev_nr_rcu(mddev, info.number); 6466 if (rdev) { 6467 info.major = MAJOR(rdev->bdev->bd_dev); 6468 info.minor = MINOR(rdev->bdev->bd_dev); 6469 info.raid_disk = rdev->raid_disk; 6470 info.state = 0; 6471 if (test_bit(Faulty, &rdev->flags)) 6472 info.state |= (1<<MD_DISK_FAULTY); 6473 else if (test_bit(In_sync, &rdev->flags)) { 6474 info.state |= (1<<MD_DISK_ACTIVE); 6475 info.state |= (1<<MD_DISK_SYNC); 6476 } 6477 if (test_bit(Journal, &rdev->flags)) 6478 info.state |= (1<<MD_DISK_JOURNAL); 6479 if (test_bit(WriteMostly, &rdev->flags)) 6480 info.state |= (1<<MD_DISK_WRITEMOSTLY); 6481 if (test_bit(FailFast, &rdev->flags)) 6482 info.state |= (1<<MD_DISK_FAILFAST); 6483 } else { 6484 info.major = info.minor = 0; 6485 info.raid_disk = -1; 6486 info.state = (1<<MD_DISK_REMOVED); 6487 } 6488 rcu_read_unlock(); 6489 6490 if (copy_to_user(arg, &info, sizeof(info))) 6491 return -EFAULT; 6492 6493 return 0; 6494 } 6495 6496 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) 6497 { 6498 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 6499 struct md_rdev *rdev; 6500 dev_t dev = MKDEV(info->major,info->minor); 6501 6502 if (mddev_is_clustered(mddev) && 6503 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) { 6504 pr_warn("%s: Cannot add to clustered mddev.\n", 6505 mdname(mddev)); 6506 return -EINVAL; 6507 } 6508 6509 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 6510 return -EOVERFLOW; 6511 6512 if (!mddev->raid_disks) { 6513 int err; 6514 /* expecting a device which has a superblock */ 6515 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 6516 if (IS_ERR(rdev)) { 6517 pr_warn("md: md_import_device returned %ld\n", 6518 PTR_ERR(rdev)); 6519 return PTR_ERR(rdev); 6520 } 6521 if (!list_empty(&mddev->disks)) { 6522 struct md_rdev *rdev0 6523 = list_entry(mddev->disks.next, 6524 struct md_rdev, same_set); 6525 err = super_types[mddev->major_version] 6526 .load_super(rdev, rdev0, mddev->minor_version); 6527 if (err < 0) { 6528 pr_warn("md: %s has different UUID to %s\n", 6529 bdevname(rdev->bdev,b), 6530 bdevname(rdev0->bdev,b2)); 6531 export_rdev(rdev); 6532 return -EINVAL; 6533 } 6534 } 6535 err = bind_rdev_to_array(rdev, mddev); 6536 if (err) 6537 export_rdev(rdev); 6538 return err; 6539 } 6540 6541 /* 6542 * add_new_disk can be used once the array is assembled 6543 * to add "hot spares". They must already have a superblock 6544 * written 6545 */ 6546 if (mddev->pers) { 6547 int err; 6548 if (!mddev->pers->hot_add_disk) { 6549 pr_warn("%s: personality does not support diskops!\n", 6550 mdname(mddev)); 6551 return -EINVAL; 6552 } 6553 if (mddev->persistent) 6554 rdev = md_import_device(dev, mddev->major_version, 6555 mddev->minor_version); 6556 else 6557 rdev = md_import_device(dev, -1, -1); 6558 if (IS_ERR(rdev)) { 6559 pr_warn("md: md_import_device returned %ld\n", 6560 PTR_ERR(rdev)); 6561 return PTR_ERR(rdev); 6562 } 6563 /* set saved_raid_disk if appropriate */ 6564 if (!mddev->persistent) { 6565 if (info->state & (1<<MD_DISK_SYNC) && 6566 info->raid_disk < mddev->raid_disks) { 6567 rdev->raid_disk = info->raid_disk; 6568 set_bit(In_sync, &rdev->flags); 6569 clear_bit(Bitmap_sync, &rdev->flags); 6570 } else 6571 rdev->raid_disk = -1; 6572 rdev->saved_raid_disk = rdev->raid_disk; 6573 } else 6574 super_types[mddev->major_version]. 6575 validate_super(mddev, rdev); 6576 if ((info->state & (1<<MD_DISK_SYNC)) && 6577 rdev->raid_disk != info->raid_disk) { 6578 /* This was a hot-add request, but events doesn't 6579 * match, so reject it. 6580 */ 6581 export_rdev(rdev); 6582 return -EINVAL; 6583 } 6584 6585 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 6586 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 6587 set_bit(WriteMostly, &rdev->flags); 6588 else 6589 clear_bit(WriteMostly, &rdev->flags); 6590 if (info->state & (1<<MD_DISK_FAILFAST)) 6591 set_bit(FailFast, &rdev->flags); 6592 else 6593 clear_bit(FailFast, &rdev->flags); 6594 6595 if (info->state & (1<<MD_DISK_JOURNAL)) { 6596 struct md_rdev *rdev2; 6597 bool has_journal = false; 6598 6599 /* make sure no existing journal disk */ 6600 rdev_for_each(rdev2, mddev) { 6601 if (test_bit(Journal, &rdev2->flags)) { 6602 has_journal = true; 6603 break; 6604 } 6605 } 6606 if (has_journal || mddev->bitmap) { 6607 export_rdev(rdev); 6608 return -EBUSY; 6609 } 6610 set_bit(Journal, &rdev->flags); 6611 } 6612 /* 6613 * check whether the device shows up in other nodes 6614 */ 6615 if (mddev_is_clustered(mddev)) { 6616 if (info->state & (1 << MD_DISK_CANDIDATE)) 6617 set_bit(Candidate, &rdev->flags); 6618 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) { 6619 /* --add initiated by this node */ 6620 err = md_cluster_ops->add_new_disk(mddev, rdev); 6621 if (err) { 6622 export_rdev(rdev); 6623 return err; 6624 } 6625 } 6626 } 6627 6628 rdev->raid_disk = -1; 6629 err = bind_rdev_to_array(rdev, mddev); 6630 6631 if (err) 6632 export_rdev(rdev); 6633 6634 if (mddev_is_clustered(mddev)) { 6635 if (info->state & (1 << MD_DISK_CANDIDATE)) { 6636 if (!err) { 6637 err = md_cluster_ops->new_disk_ack(mddev, 6638 err == 0); 6639 if (err) 6640 md_kick_rdev_from_array(rdev); 6641 } 6642 } else { 6643 if (err) 6644 md_cluster_ops->add_new_disk_cancel(mddev); 6645 else 6646 err = add_bound_rdev(rdev); 6647 } 6648 6649 } else if (!err) 6650 err = add_bound_rdev(rdev); 6651 6652 return err; 6653 } 6654 6655 /* otherwise, add_new_disk is only allowed 6656 * for major_version==0 superblocks 6657 */ 6658 if (mddev->major_version != 0) { 6659 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev)); 6660 return -EINVAL; 6661 } 6662 6663 if (!(info->state & (1<<MD_DISK_FAULTY))) { 6664 int err; 6665 rdev = md_import_device(dev, -1, 0); 6666 if (IS_ERR(rdev)) { 6667 pr_warn("md: error, md_import_device() returned %ld\n", 6668 PTR_ERR(rdev)); 6669 return PTR_ERR(rdev); 6670 } 6671 rdev->desc_nr = info->number; 6672 if (info->raid_disk < mddev->raid_disks) 6673 rdev->raid_disk = info->raid_disk; 6674 else 6675 rdev->raid_disk = -1; 6676 6677 if (rdev->raid_disk < mddev->raid_disks) 6678 if (info->state & (1<<MD_DISK_SYNC)) 6679 set_bit(In_sync, &rdev->flags); 6680 6681 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 6682 set_bit(WriteMostly, &rdev->flags); 6683 if (info->state & (1<<MD_DISK_FAILFAST)) 6684 set_bit(FailFast, &rdev->flags); 6685 6686 if (!mddev->persistent) { 6687 pr_debug("md: nonpersistent superblock ...\n"); 6688 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 6689 } else 6690 rdev->sb_start = calc_dev_sboffset(rdev); 6691 rdev->sectors = rdev->sb_start; 6692 6693 err = bind_rdev_to_array(rdev, mddev); 6694 if (err) { 6695 export_rdev(rdev); 6696 return err; 6697 } 6698 } 6699 6700 return 0; 6701 } 6702 6703 static int hot_remove_disk(struct mddev *mddev, dev_t dev) 6704 { 6705 char b[BDEVNAME_SIZE]; 6706 struct md_rdev *rdev; 6707 6708 if (!mddev->pers) 6709 return -ENODEV; 6710 6711 rdev = find_rdev(mddev, dev); 6712 if (!rdev) 6713 return -ENXIO; 6714 6715 if (rdev->raid_disk < 0) 6716 goto kick_rdev; 6717 6718 clear_bit(Blocked, &rdev->flags); 6719 remove_and_add_spares(mddev, rdev); 6720 6721 if (rdev->raid_disk >= 0) 6722 goto busy; 6723 6724 kick_rdev: 6725 if (mddev_is_clustered(mddev)) 6726 md_cluster_ops->remove_disk(mddev, rdev); 6727 6728 md_kick_rdev_from_array(rdev); 6729 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6730 if (mddev->thread) 6731 md_wakeup_thread(mddev->thread); 6732 else 6733 md_update_sb(mddev, 1); 6734 md_new_event(mddev); 6735 6736 return 0; 6737 busy: 6738 pr_debug("md: cannot remove active disk %s from %s ...\n", 6739 bdevname(rdev->bdev,b), mdname(mddev)); 6740 return -EBUSY; 6741 } 6742 6743 static int hot_add_disk(struct mddev *mddev, dev_t dev) 6744 { 6745 char b[BDEVNAME_SIZE]; 6746 int err; 6747 struct md_rdev *rdev; 6748 6749 if (!mddev->pers) 6750 return -ENODEV; 6751 6752 if (mddev->major_version != 0) { 6753 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n", 6754 mdname(mddev)); 6755 return -EINVAL; 6756 } 6757 if (!mddev->pers->hot_add_disk) { 6758 pr_warn("%s: personality does not support diskops!\n", 6759 mdname(mddev)); 6760 return -EINVAL; 6761 } 6762 6763 rdev = md_import_device(dev, -1, 0); 6764 if (IS_ERR(rdev)) { 6765 pr_warn("md: error, md_import_device() returned %ld\n", 6766 PTR_ERR(rdev)); 6767 return -EINVAL; 6768 } 6769 6770 if (mddev->persistent) 6771 rdev->sb_start = calc_dev_sboffset(rdev); 6772 else 6773 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 6774 6775 rdev->sectors = rdev->sb_start; 6776 6777 if (test_bit(Faulty, &rdev->flags)) { 6778 pr_warn("md: can not hot-add faulty %s disk to %s!\n", 6779 bdevname(rdev->bdev,b), mdname(mddev)); 6780 err = -EINVAL; 6781 goto abort_export; 6782 } 6783 6784 clear_bit(In_sync, &rdev->flags); 6785 rdev->desc_nr = -1; 6786 rdev->saved_raid_disk = -1; 6787 err = bind_rdev_to_array(rdev, mddev); 6788 if (err) 6789 goto abort_export; 6790 6791 /* 6792 * The rest should better be atomic, we can have disk failures 6793 * noticed in interrupt contexts ... 6794 */ 6795 6796 rdev->raid_disk = -1; 6797 6798 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6799 if (!mddev->thread) 6800 md_update_sb(mddev, 1); 6801 /* 6802 * Kick recovery, maybe this spare has to be added to the 6803 * array immediately. 6804 */ 6805 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6806 md_wakeup_thread(mddev->thread); 6807 md_new_event(mddev); 6808 return 0; 6809 6810 abort_export: 6811 export_rdev(rdev); 6812 return err; 6813 } 6814 6815 static int set_bitmap_file(struct mddev *mddev, int fd) 6816 { 6817 int err = 0; 6818 6819 if (mddev->pers) { 6820 if (!mddev->pers->quiesce || !mddev->thread) 6821 return -EBUSY; 6822 if (mddev->recovery || mddev->sync_thread) 6823 return -EBUSY; 6824 /* we should be able to change the bitmap.. */ 6825 } 6826 6827 if (fd >= 0) { 6828 struct inode *inode; 6829 struct file *f; 6830 6831 if (mddev->bitmap || mddev->bitmap_info.file) 6832 return -EEXIST; /* cannot add when bitmap is present */ 6833 f = fget(fd); 6834 6835 if (f == NULL) { 6836 pr_warn("%s: error: failed to get bitmap file\n", 6837 mdname(mddev)); 6838 return -EBADF; 6839 } 6840 6841 inode = f->f_mapping->host; 6842 if (!S_ISREG(inode->i_mode)) { 6843 pr_warn("%s: error: bitmap file must be a regular file\n", 6844 mdname(mddev)); 6845 err = -EBADF; 6846 } else if (!(f->f_mode & FMODE_WRITE)) { 6847 pr_warn("%s: error: bitmap file must open for write\n", 6848 mdname(mddev)); 6849 err = -EBADF; 6850 } else if (atomic_read(&inode->i_writecount) != 1) { 6851 pr_warn("%s: error: bitmap file is already in use\n", 6852 mdname(mddev)); 6853 err = -EBUSY; 6854 } 6855 if (err) { 6856 fput(f); 6857 return err; 6858 } 6859 mddev->bitmap_info.file = f; 6860 mddev->bitmap_info.offset = 0; /* file overrides offset */ 6861 } else if (mddev->bitmap == NULL) 6862 return -ENOENT; /* cannot remove what isn't there */ 6863 err = 0; 6864 if (mddev->pers) { 6865 if (fd >= 0) { 6866 struct bitmap *bitmap; 6867 6868 bitmap = md_bitmap_create(mddev, -1); 6869 mddev_suspend(mddev); 6870 if (!IS_ERR(bitmap)) { 6871 mddev->bitmap = bitmap; 6872 err = md_bitmap_load(mddev); 6873 } else 6874 err = PTR_ERR(bitmap); 6875 if (err) { 6876 md_bitmap_destroy(mddev); 6877 fd = -1; 6878 } 6879 mddev_resume(mddev); 6880 } else if (fd < 0) { 6881 mddev_suspend(mddev); 6882 md_bitmap_destroy(mddev); 6883 mddev_resume(mddev); 6884 } 6885 } 6886 if (fd < 0) { 6887 struct file *f = mddev->bitmap_info.file; 6888 if (f) { 6889 spin_lock(&mddev->lock); 6890 mddev->bitmap_info.file = NULL; 6891 spin_unlock(&mddev->lock); 6892 fput(f); 6893 } 6894 } 6895 6896 return err; 6897 } 6898 6899 /* 6900 * set_array_info is used two different ways 6901 * The original usage is when creating a new array. 6902 * In this usage, raid_disks is > 0 and it together with 6903 * level, size, not_persistent,layout,chunksize determine the 6904 * shape of the array. 6905 * This will always create an array with a type-0.90.0 superblock. 6906 * The newer usage is when assembling an array. 6907 * In this case raid_disks will be 0, and the major_version field is 6908 * use to determine which style super-blocks are to be found on the devices. 6909 * The minor and patch _version numbers are also kept incase the 6910 * super_block handler wishes to interpret them. 6911 */ 6912 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) 6913 { 6914 6915 if (info->raid_disks == 0) { 6916 /* just setting version number for superblock loading */ 6917 if (info->major_version < 0 || 6918 info->major_version >= ARRAY_SIZE(super_types) || 6919 super_types[info->major_version].name == NULL) { 6920 /* maybe try to auto-load a module? */ 6921 pr_warn("md: superblock version %d not known\n", 6922 info->major_version); 6923 return -EINVAL; 6924 } 6925 mddev->major_version = info->major_version; 6926 mddev->minor_version = info->minor_version; 6927 mddev->patch_version = info->patch_version; 6928 mddev->persistent = !info->not_persistent; 6929 /* ensure mddev_put doesn't delete this now that there 6930 * is some minimal configuration. 6931 */ 6932 mddev->ctime = ktime_get_real_seconds(); 6933 return 0; 6934 } 6935 mddev->major_version = MD_MAJOR_VERSION; 6936 mddev->minor_version = MD_MINOR_VERSION; 6937 mddev->patch_version = MD_PATCHLEVEL_VERSION; 6938 mddev->ctime = ktime_get_real_seconds(); 6939 6940 mddev->level = info->level; 6941 mddev->clevel[0] = 0; 6942 mddev->dev_sectors = 2 * (sector_t)info->size; 6943 mddev->raid_disks = info->raid_disks; 6944 /* don't set md_minor, it is determined by which /dev/md* was 6945 * openned 6946 */ 6947 if (info->state & (1<<MD_SB_CLEAN)) 6948 mddev->recovery_cp = MaxSector; 6949 else 6950 mddev->recovery_cp = 0; 6951 mddev->persistent = ! info->not_persistent; 6952 mddev->external = 0; 6953 6954 mddev->layout = info->layout; 6955 if (mddev->level == 0) 6956 /* Cannot trust RAID0 layout info here */ 6957 mddev->layout = -1; 6958 mddev->chunk_sectors = info->chunk_size >> 9; 6959 6960 if (mddev->persistent) { 6961 mddev->max_disks = MD_SB_DISKS; 6962 mddev->flags = 0; 6963 mddev->sb_flags = 0; 6964 } 6965 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6966 6967 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 6968 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 6969 mddev->bitmap_info.offset = 0; 6970 6971 mddev->reshape_position = MaxSector; 6972 6973 /* 6974 * Generate a 128 bit UUID 6975 */ 6976 get_random_bytes(mddev->uuid, 16); 6977 6978 mddev->new_level = mddev->level; 6979 mddev->new_chunk_sectors = mddev->chunk_sectors; 6980 mddev->new_layout = mddev->layout; 6981 mddev->delta_disks = 0; 6982 mddev->reshape_backwards = 0; 6983 6984 return 0; 6985 } 6986 6987 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) 6988 { 6989 lockdep_assert_held(&mddev->reconfig_mutex); 6990 6991 if (mddev->external_size) 6992 return; 6993 6994 mddev->array_sectors = array_sectors; 6995 } 6996 EXPORT_SYMBOL(md_set_array_sectors); 6997 6998 static int update_size(struct mddev *mddev, sector_t num_sectors) 6999 { 7000 struct md_rdev *rdev; 7001 int rv; 7002 int fit = (num_sectors == 0); 7003 sector_t old_dev_sectors = mddev->dev_sectors; 7004 7005 if (mddev->pers->resize == NULL) 7006 return -EINVAL; 7007 /* The "num_sectors" is the number of sectors of each device that 7008 * is used. This can only make sense for arrays with redundancy. 7009 * linear and raid0 always use whatever space is available. We can only 7010 * consider changing this number if no resync or reconstruction is 7011 * happening, and if the new size is acceptable. It must fit before the 7012 * sb_start or, if that is <data_offset, it must fit before the size 7013 * of each device. If num_sectors is zero, we find the largest size 7014 * that fits. 7015 */ 7016 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 7017 mddev->sync_thread) 7018 return -EBUSY; 7019 if (mddev->ro) 7020 return -EROFS; 7021 7022 rdev_for_each(rdev, mddev) { 7023 sector_t avail = rdev->sectors; 7024 7025 if (fit && (num_sectors == 0 || num_sectors > avail)) 7026 num_sectors = avail; 7027 if (avail < num_sectors) 7028 return -ENOSPC; 7029 } 7030 rv = mddev->pers->resize(mddev, num_sectors); 7031 if (!rv) { 7032 if (mddev_is_clustered(mddev)) 7033 md_cluster_ops->update_size(mddev, old_dev_sectors); 7034 else if (mddev->queue) { 7035 set_capacity(mddev->gendisk, mddev->array_sectors); 7036 revalidate_disk(mddev->gendisk); 7037 } 7038 } 7039 return rv; 7040 } 7041 7042 static int update_raid_disks(struct mddev *mddev, int raid_disks) 7043 { 7044 int rv; 7045 struct md_rdev *rdev; 7046 /* change the number of raid disks */ 7047 if (mddev->pers->check_reshape == NULL) 7048 return -EINVAL; 7049 if (mddev->ro) 7050 return -EROFS; 7051 if (raid_disks <= 0 || 7052 (mddev->max_disks && raid_disks >= mddev->max_disks)) 7053 return -EINVAL; 7054 if (mddev->sync_thread || 7055 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 7056 mddev->reshape_position != MaxSector) 7057 return -EBUSY; 7058 7059 rdev_for_each(rdev, mddev) { 7060 if (mddev->raid_disks < raid_disks && 7061 rdev->data_offset < rdev->new_data_offset) 7062 return -EINVAL; 7063 if (mddev->raid_disks > raid_disks && 7064 rdev->data_offset > rdev->new_data_offset) 7065 return -EINVAL; 7066 } 7067 7068 mddev->delta_disks = raid_disks - mddev->raid_disks; 7069 if (mddev->delta_disks < 0) 7070 mddev->reshape_backwards = 1; 7071 else if (mddev->delta_disks > 0) 7072 mddev->reshape_backwards = 0; 7073 7074 rv = mddev->pers->check_reshape(mddev); 7075 if (rv < 0) { 7076 mddev->delta_disks = 0; 7077 mddev->reshape_backwards = 0; 7078 } 7079 return rv; 7080 } 7081 7082 /* 7083 * update_array_info is used to change the configuration of an 7084 * on-line array. 7085 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 7086 * fields in the info are checked against the array. 7087 * Any differences that cannot be handled will cause an error. 7088 * Normally, only one change can be managed at a time. 7089 */ 7090 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) 7091 { 7092 int rv = 0; 7093 int cnt = 0; 7094 int state = 0; 7095 7096 /* calculate expected state,ignoring low bits */ 7097 if (mddev->bitmap && mddev->bitmap_info.offset) 7098 state |= (1 << MD_SB_BITMAP_PRESENT); 7099 7100 if (mddev->major_version != info->major_version || 7101 mddev->minor_version != info->minor_version || 7102 /* mddev->patch_version != info->patch_version || */ 7103 mddev->ctime != info->ctime || 7104 mddev->level != info->level || 7105 /* mddev->layout != info->layout || */ 7106 mddev->persistent != !info->not_persistent || 7107 mddev->chunk_sectors != info->chunk_size >> 9 || 7108 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 7109 ((state^info->state) & 0xfffffe00) 7110 ) 7111 return -EINVAL; 7112 /* Check there is only one change */ 7113 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 7114 cnt++; 7115 if (mddev->raid_disks != info->raid_disks) 7116 cnt++; 7117 if (mddev->layout != info->layout) 7118 cnt++; 7119 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 7120 cnt++; 7121 if (cnt == 0) 7122 return 0; 7123 if (cnt > 1) 7124 return -EINVAL; 7125 7126 if (mddev->layout != info->layout) { 7127 /* Change layout 7128 * we don't need to do anything at the md level, the 7129 * personality will take care of it all. 7130 */ 7131 if (mddev->pers->check_reshape == NULL) 7132 return -EINVAL; 7133 else { 7134 mddev->new_layout = info->layout; 7135 rv = mddev->pers->check_reshape(mddev); 7136 if (rv) 7137 mddev->new_layout = mddev->layout; 7138 return rv; 7139 } 7140 } 7141 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 7142 rv = update_size(mddev, (sector_t)info->size * 2); 7143 7144 if (mddev->raid_disks != info->raid_disks) 7145 rv = update_raid_disks(mddev, info->raid_disks); 7146 7147 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 7148 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { 7149 rv = -EINVAL; 7150 goto err; 7151 } 7152 if (mddev->recovery || mddev->sync_thread) { 7153 rv = -EBUSY; 7154 goto err; 7155 } 7156 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 7157 struct bitmap *bitmap; 7158 /* add the bitmap */ 7159 if (mddev->bitmap) { 7160 rv = -EEXIST; 7161 goto err; 7162 } 7163 if (mddev->bitmap_info.default_offset == 0) { 7164 rv = -EINVAL; 7165 goto err; 7166 } 7167 mddev->bitmap_info.offset = 7168 mddev->bitmap_info.default_offset; 7169 mddev->bitmap_info.space = 7170 mddev->bitmap_info.default_space; 7171 bitmap = md_bitmap_create(mddev, -1); 7172 mddev_suspend(mddev); 7173 if (!IS_ERR(bitmap)) { 7174 mddev->bitmap = bitmap; 7175 rv = md_bitmap_load(mddev); 7176 } else 7177 rv = PTR_ERR(bitmap); 7178 if (rv) 7179 md_bitmap_destroy(mddev); 7180 mddev_resume(mddev); 7181 } else { 7182 /* remove the bitmap */ 7183 if (!mddev->bitmap) { 7184 rv = -ENOENT; 7185 goto err; 7186 } 7187 if (mddev->bitmap->storage.file) { 7188 rv = -EINVAL; 7189 goto err; 7190 } 7191 if (mddev->bitmap_info.nodes) { 7192 /* hold PW on all the bitmap lock */ 7193 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) { 7194 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n"); 7195 rv = -EPERM; 7196 md_cluster_ops->unlock_all_bitmaps(mddev); 7197 goto err; 7198 } 7199 7200 mddev->bitmap_info.nodes = 0; 7201 md_cluster_ops->leave(mddev); 7202 } 7203 mddev_suspend(mddev); 7204 md_bitmap_destroy(mddev); 7205 mddev_resume(mddev); 7206 mddev->bitmap_info.offset = 0; 7207 } 7208 } 7209 md_update_sb(mddev, 1); 7210 return rv; 7211 err: 7212 return rv; 7213 } 7214 7215 static int set_disk_faulty(struct mddev *mddev, dev_t dev) 7216 { 7217 struct md_rdev *rdev; 7218 int err = 0; 7219 7220 if (mddev->pers == NULL) 7221 return -ENODEV; 7222 7223 rcu_read_lock(); 7224 rdev = md_find_rdev_rcu(mddev, dev); 7225 if (!rdev) 7226 err = -ENODEV; 7227 else { 7228 md_error(mddev, rdev); 7229 if (!test_bit(Faulty, &rdev->flags)) 7230 err = -EBUSY; 7231 } 7232 rcu_read_unlock(); 7233 return err; 7234 } 7235 7236 /* 7237 * We have a problem here : there is no easy way to give a CHS 7238 * virtual geometry. We currently pretend that we have a 2 heads 7239 * 4 sectors (with a BIG number of cylinders...). This drives 7240 * dosfs just mad... ;-) 7241 */ 7242 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 7243 { 7244 struct mddev *mddev = bdev->bd_disk->private_data; 7245 7246 geo->heads = 2; 7247 geo->sectors = 4; 7248 geo->cylinders = mddev->array_sectors / 8; 7249 return 0; 7250 } 7251 7252 static inline bool md_ioctl_valid(unsigned int cmd) 7253 { 7254 switch (cmd) { 7255 case ADD_NEW_DISK: 7256 case BLKROSET: 7257 case GET_ARRAY_INFO: 7258 case GET_BITMAP_FILE: 7259 case GET_DISK_INFO: 7260 case HOT_ADD_DISK: 7261 case HOT_REMOVE_DISK: 7262 case RAID_AUTORUN: 7263 case RAID_VERSION: 7264 case RESTART_ARRAY_RW: 7265 case RUN_ARRAY: 7266 case SET_ARRAY_INFO: 7267 case SET_BITMAP_FILE: 7268 case SET_DISK_FAULTY: 7269 case STOP_ARRAY: 7270 case STOP_ARRAY_RO: 7271 case CLUSTERED_DISK_NACK: 7272 return true; 7273 default: 7274 return false; 7275 } 7276 } 7277 7278 static int md_ioctl(struct block_device *bdev, fmode_t mode, 7279 unsigned int cmd, unsigned long arg) 7280 { 7281 int err = 0; 7282 void __user *argp = (void __user *)arg; 7283 struct mddev *mddev = NULL; 7284 int ro; 7285 bool did_set_md_closing = false; 7286 7287 if (!md_ioctl_valid(cmd)) 7288 return -ENOTTY; 7289 7290 switch (cmd) { 7291 case RAID_VERSION: 7292 case GET_ARRAY_INFO: 7293 case GET_DISK_INFO: 7294 break; 7295 default: 7296 if (!capable(CAP_SYS_ADMIN)) 7297 return -EACCES; 7298 } 7299 7300 /* 7301 * Commands dealing with the RAID driver but not any 7302 * particular array: 7303 */ 7304 switch (cmd) { 7305 case RAID_VERSION: 7306 err = get_version(argp); 7307 goto out; 7308 7309 #ifndef MODULE 7310 case RAID_AUTORUN: 7311 err = 0; 7312 autostart_arrays(arg); 7313 goto out; 7314 #endif 7315 default:; 7316 } 7317 7318 /* 7319 * Commands creating/starting a new array: 7320 */ 7321 7322 mddev = bdev->bd_disk->private_data; 7323 7324 if (!mddev) { 7325 BUG(); 7326 goto out; 7327 } 7328 7329 /* Some actions do not requires the mutex */ 7330 switch (cmd) { 7331 case GET_ARRAY_INFO: 7332 if (!mddev->raid_disks && !mddev->external) 7333 err = -ENODEV; 7334 else 7335 err = get_array_info(mddev, argp); 7336 goto out; 7337 7338 case GET_DISK_INFO: 7339 if (!mddev->raid_disks && !mddev->external) 7340 err = -ENODEV; 7341 else 7342 err = get_disk_info(mddev, argp); 7343 goto out; 7344 7345 case SET_DISK_FAULTY: 7346 err = set_disk_faulty(mddev, new_decode_dev(arg)); 7347 goto out; 7348 7349 case GET_BITMAP_FILE: 7350 err = get_bitmap_file(mddev, argp); 7351 goto out; 7352 7353 } 7354 7355 if (cmd == ADD_NEW_DISK) 7356 /* need to ensure md_delayed_delete() has completed */ 7357 flush_workqueue(md_misc_wq); 7358 7359 if (cmd == HOT_REMOVE_DISK) 7360 /* need to ensure recovery thread has run */ 7361 wait_event_interruptible_timeout(mddev->sb_wait, 7362 !test_bit(MD_RECOVERY_NEEDED, 7363 &mddev->recovery), 7364 msecs_to_jiffies(5000)); 7365 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { 7366 /* Need to flush page cache, and ensure no-one else opens 7367 * and writes 7368 */ 7369 mutex_lock(&mddev->open_mutex); 7370 if (mddev->pers && atomic_read(&mddev->openers) > 1) { 7371 mutex_unlock(&mddev->open_mutex); 7372 err = -EBUSY; 7373 goto out; 7374 } 7375 WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags)); 7376 set_bit(MD_CLOSING, &mddev->flags); 7377 did_set_md_closing = true; 7378 mutex_unlock(&mddev->open_mutex); 7379 sync_blockdev(bdev); 7380 } 7381 err = mddev_lock(mddev); 7382 if (err) { 7383 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n", 7384 err, cmd); 7385 goto out; 7386 } 7387 7388 if (cmd == SET_ARRAY_INFO) { 7389 mdu_array_info_t info; 7390 if (!arg) 7391 memset(&info, 0, sizeof(info)); 7392 else if (copy_from_user(&info, argp, sizeof(info))) { 7393 err = -EFAULT; 7394 goto unlock; 7395 } 7396 if (mddev->pers) { 7397 err = update_array_info(mddev, &info); 7398 if (err) { 7399 pr_warn("md: couldn't update array info. %d\n", err); 7400 goto unlock; 7401 } 7402 goto unlock; 7403 } 7404 if (!list_empty(&mddev->disks)) { 7405 pr_warn("md: array %s already has disks!\n", mdname(mddev)); 7406 err = -EBUSY; 7407 goto unlock; 7408 } 7409 if (mddev->raid_disks) { 7410 pr_warn("md: array %s already initialised!\n", mdname(mddev)); 7411 err = -EBUSY; 7412 goto unlock; 7413 } 7414 err = set_array_info(mddev, &info); 7415 if (err) { 7416 pr_warn("md: couldn't set array info. %d\n", err); 7417 goto unlock; 7418 } 7419 goto unlock; 7420 } 7421 7422 /* 7423 * Commands querying/configuring an existing array: 7424 */ 7425 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 7426 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 7427 if ((!mddev->raid_disks && !mddev->external) 7428 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 7429 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 7430 && cmd != GET_BITMAP_FILE) { 7431 err = -ENODEV; 7432 goto unlock; 7433 } 7434 7435 /* 7436 * Commands even a read-only array can execute: 7437 */ 7438 switch (cmd) { 7439 case RESTART_ARRAY_RW: 7440 err = restart_array(mddev); 7441 goto unlock; 7442 7443 case STOP_ARRAY: 7444 err = do_md_stop(mddev, 0, bdev); 7445 goto unlock; 7446 7447 case STOP_ARRAY_RO: 7448 err = md_set_readonly(mddev, bdev); 7449 goto unlock; 7450 7451 case HOT_REMOVE_DISK: 7452 err = hot_remove_disk(mddev, new_decode_dev(arg)); 7453 goto unlock; 7454 7455 case ADD_NEW_DISK: 7456 /* We can support ADD_NEW_DISK on read-only arrays 7457 * only if we are re-adding a preexisting device. 7458 * So require mddev->pers and MD_DISK_SYNC. 7459 */ 7460 if (mddev->pers) { 7461 mdu_disk_info_t info; 7462 if (copy_from_user(&info, argp, sizeof(info))) 7463 err = -EFAULT; 7464 else if (!(info.state & (1<<MD_DISK_SYNC))) 7465 /* Need to clear read-only for this */ 7466 break; 7467 else 7468 err = add_new_disk(mddev, &info); 7469 goto unlock; 7470 } 7471 break; 7472 7473 case BLKROSET: 7474 if (get_user(ro, (int __user *)(arg))) { 7475 err = -EFAULT; 7476 goto unlock; 7477 } 7478 err = -EINVAL; 7479 7480 /* if the bdev is going readonly the value of mddev->ro 7481 * does not matter, no writes are coming 7482 */ 7483 if (ro) 7484 goto unlock; 7485 7486 /* are we are already prepared for writes? */ 7487 if (mddev->ro != 1) 7488 goto unlock; 7489 7490 /* transitioning to readauto need only happen for 7491 * arrays that call md_write_start 7492 */ 7493 if (mddev->pers) { 7494 err = restart_array(mddev); 7495 if (err == 0) { 7496 mddev->ro = 2; 7497 set_disk_ro(mddev->gendisk, 0); 7498 } 7499 } 7500 goto unlock; 7501 } 7502 7503 /* 7504 * The remaining ioctls are changing the state of the 7505 * superblock, so we do not allow them on read-only arrays. 7506 */ 7507 if (mddev->ro && mddev->pers) { 7508 if (mddev->ro == 2) { 7509 mddev->ro = 0; 7510 sysfs_notify_dirent_safe(mddev->sysfs_state); 7511 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7512 /* mddev_unlock will wake thread */ 7513 /* If a device failed while we were read-only, we 7514 * need to make sure the metadata is updated now. 7515 */ 7516 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { 7517 mddev_unlock(mddev); 7518 wait_event(mddev->sb_wait, 7519 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && 7520 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 7521 mddev_lock_nointr(mddev); 7522 } 7523 } else { 7524 err = -EROFS; 7525 goto unlock; 7526 } 7527 } 7528 7529 switch (cmd) { 7530 case ADD_NEW_DISK: 7531 { 7532 mdu_disk_info_t info; 7533 if (copy_from_user(&info, argp, sizeof(info))) 7534 err = -EFAULT; 7535 else 7536 err = add_new_disk(mddev, &info); 7537 goto unlock; 7538 } 7539 7540 case CLUSTERED_DISK_NACK: 7541 if (mddev_is_clustered(mddev)) 7542 md_cluster_ops->new_disk_ack(mddev, false); 7543 else 7544 err = -EINVAL; 7545 goto unlock; 7546 7547 case HOT_ADD_DISK: 7548 err = hot_add_disk(mddev, new_decode_dev(arg)); 7549 goto unlock; 7550 7551 case RUN_ARRAY: 7552 err = do_md_run(mddev); 7553 goto unlock; 7554 7555 case SET_BITMAP_FILE: 7556 err = set_bitmap_file(mddev, (int)arg); 7557 goto unlock; 7558 7559 default: 7560 err = -EINVAL; 7561 goto unlock; 7562 } 7563 7564 unlock: 7565 if (mddev->hold_active == UNTIL_IOCTL && 7566 err != -EINVAL) 7567 mddev->hold_active = 0; 7568 mddev_unlock(mddev); 7569 out: 7570 if(did_set_md_closing) 7571 clear_bit(MD_CLOSING, &mddev->flags); 7572 return err; 7573 } 7574 #ifdef CONFIG_COMPAT 7575 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, 7576 unsigned int cmd, unsigned long arg) 7577 { 7578 switch (cmd) { 7579 case HOT_REMOVE_DISK: 7580 case HOT_ADD_DISK: 7581 case SET_DISK_FAULTY: 7582 case SET_BITMAP_FILE: 7583 /* These take in integer arg, do not convert */ 7584 break; 7585 default: 7586 arg = (unsigned long)compat_ptr(arg); 7587 break; 7588 } 7589 7590 return md_ioctl(bdev, mode, cmd, arg); 7591 } 7592 #endif /* CONFIG_COMPAT */ 7593 7594 static int md_open(struct block_device *bdev, fmode_t mode) 7595 { 7596 /* 7597 * Succeed if we can lock the mddev, which confirms that 7598 * it isn't being stopped right now. 7599 */ 7600 struct mddev *mddev = mddev_find(bdev->bd_dev); 7601 int err; 7602 7603 if (!mddev) 7604 return -ENODEV; 7605 7606 if (mddev->gendisk != bdev->bd_disk) { 7607 /* we are racing with mddev_put which is discarding this 7608 * bd_disk. 7609 */ 7610 mddev_put(mddev); 7611 /* Wait until bdev->bd_disk is definitely gone */ 7612 flush_workqueue(md_misc_wq); 7613 /* Then retry the open from the top */ 7614 return -ERESTARTSYS; 7615 } 7616 BUG_ON(mddev != bdev->bd_disk->private_data); 7617 7618 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) 7619 goto out; 7620 7621 if (test_bit(MD_CLOSING, &mddev->flags)) { 7622 mutex_unlock(&mddev->open_mutex); 7623 err = -ENODEV; 7624 goto out; 7625 } 7626 7627 err = 0; 7628 atomic_inc(&mddev->openers); 7629 mutex_unlock(&mddev->open_mutex); 7630 7631 check_disk_change(bdev); 7632 out: 7633 if (err) 7634 mddev_put(mddev); 7635 return err; 7636 } 7637 7638 static void md_release(struct gendisk *disk, fmode_t mode) 7639 { 7640 struct mddev *mddev = disk->private_data; 7641 7642 BUG_ON(!mddev); 7643 atomic_dec(&mddev->openers); 7644 mddev_put(mddev); 7645 } 7646 7647 static int md_media_changed(struct gendisk *disk) 7648 { 7649 struct mddev *mddev = disk->private_data; 7650 7651 return mddev->changed; 7652 } 7653 7654 static int md_revalidate(struct gendisk *disk) 7655 { 7656 struct mddev *mddev = disk->private_data; 7657 7658 mddev->changed = 0; 7659 return 0; 7660 } 7661 static const struct block_device_operations md_fops = 7662 { 7663 .owner = THIS_MODULE, 7664 .open = md_open, 7665 .release = md_release, 7666 .ioctl = md_ioctl, 7667 #ifdef CONFIG_COMPAT 7668 .compat_ioctl = md_compat_ioctl, 7669 #endif 7670 .getgeo = md_getgeo, 7671 .media_changed = md_media_changed, 7672 .revalidate_disk= md_revalidate, 7673 }; 7674 7675 static int md_thread(void *arg) 7676 { 7677 struct md_thread *thread = arg; 7678 7679 /* 7680 * md_thread is a 'system-thread', it's priority should be very 7681 * high. We avoid resource deadlocks individually in each 7682 * raid personality. (RAID5 does preallocation) We also use RR and 7683 * the very same RT priority as kswapd, thus we will never get 7684 * into a priority inversion deadlock. 7685 * 7686 * we definitely have to have equal or higher priority than 7687 * bdflush, otherwise bdflush will deadlock if there are too 7688 * many dirty RAID5 blocks. 7689 */ 7690 7691 allow_signal(SIGKILL); 7692 while (!kthread_should_stop()) { 7693 7694 /* We need to wait INTERRUPTIBLE so that 7695 * we don't add to the load-average. 7696 * That means we need to be sure no signals are 7697 * pending 7698 */ 7699 if (signal_pending(current)) 7700 flush_signals(current); 7701 7702 wait_event_interruptible_timeout 7703 (thread->wqueue, 7704 test_bit(THREAD_WAKEUP, &thread->flags) 7705 || kthread_should_stop() || kthread_should_park(), 7706 thread->timeout); 7707 7708 clear_bit(THREAD_WAKEUP, &thread->flags); 7709 if (kthread_should_park()) 7710 kthread_parkme(); 7711 if (!kthread_should_stop()) 7712 thread->run(thread); 7713 } 7714 7715 return 0; 7716 } 7717 7718 void md_wakeup_thread(struct md_thread *thread) 7719 { 7720 if (thread) { 7721 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); 7722 set_bit(THREAD_WAKEUP, &thread->flags); 7723 wake_up(&thread->wqueue); 7724 } 7725 } 7726 EXPORT_SYMBOL(md_wakeup_thread); 7727 7728 struct md_thread *md_register_thread(void (*run) (struct md_thread *), 7729 struct mddev *mddev, const char *name) 7730 { 7731 struct md_thread *thread; 7732 7733 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL); 7734 if (!thread) 7735 return NULL; 7736 7737 init_waitqueue_head(&thread->wqueue); 7738 7739 thread->run = run; 7740 thread->mddev = mddev; 7741 thread->timeout = MAX_SCHEDULE_TIMEOUT; 7742 thread->tsk = kthread_run(md_thread, thread, 7743 "%s_%s", 7744 mdname(thread->mddev), 7745 name); 7746 if (IS_ERR(thread->tsk)) { 7747 kfree(thread); 7748 return NULL; 7749 } 7750 return thread; 7751 } 7752 EXPORT_SYMBOL(md_register_thread); 7753 7754 void md_unregister_thread(struct md_thread **threadp) 7755 { 7756 struct md_thread *thread = *threadp; 7757 if (!thread) 7758 return; 7759 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 7760 /* Locking ensures that mddev_unlock does not wake_up a 7761 * non-existent thread 7762 */ 7763 spin_lock(&pers_lock); 7764 *threadp = NULL; 7765 spin_unlock(&pers_lock); 7766 7767 kthread_stop(thread->tsk); 7768 kfree(thread); 7769 } 7770 EXPORT_SYMBOL(md_unregister_thread); 7771 7772 void md_error(struct mddev *mddev, struct md_rdev *rdev) 7773 { 7774 if (!rdev || test_bit(Faulty, &rdev->flags)) 7775 return; 7776 7777 if (!mddev->pers || !mddev->pers->error_handler) 7778 return; 7779 mddev->pers->error_handler(mddev,rdev); 7780 if (mddev->degraded) 7781 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7782 sysfs_notify_dirent_safe(rdev->sysfs_state); 7783 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7784 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7785 md_wakeup_thread(mddev->thread); 7786 if (mddev->event_work.func) 7787 queue_work(md_misc_wq, &mddev->event_work); 7788 md_new_event(mddev); 7789 } 7790 EXPORT_SYMBOL(md_error); 7791 7792 /* seq_file implementation /proc/mdstat */ 7793 7794 static void status_unused(struct seq_file *seq) 7795 { 7796 int i = 0; 7797 struct md_rdev *rdev; 7798 7799 seq_printf(seq, "unused devices: "); 7800 7801 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 7802 char b[BDEVNAME_SIZE]; 7803 i++; 7804 seq_printf(seq, "%s ", 7805 bdevname(rdev->bdev,b)); 7806 } 7807 if (!i) 7808 seq_printf(seq, "<none>"); 7809 7810 seq_printf(seq, "\n"); 7811 } 7812 7813 static int status_resync(struct seq_file *seq, struct mddev *mddev) 7814 { 7815 sector_t max_sectors, resync, res; 7816 unsigned long dt, db = 0; 7817 sector_t rt, curr_mark_cnt, resync_mark_cnt; 7818 int scale, recovery_active; 7819 unsigned int per_milli; 7820 7821 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 7822 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7823 max_sectors = mddev->resync_max_sectors; 7824 else 7825 max_sectors = mddev->dev_sectors; 7826 7827 resync = mddev->curr_resync; 7828 if (resync <= 3) { 7829 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7830 /* Still cleaning up */ 7831 resync = max_sectors; 7832 } else if (resync > max_sectors) 7833 resync = max_sectors; 7834 else 7835 resync -= atomic_read(&mddev->recovery_active); 7836 7837 if (resync == 0) { 7838 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { 7839 struct md_rdev *rdev; 7840 7841 rdev_for_each(rdev, mddev) 7842 if (rdev->raid_disk >= 0 && 7843 !test_bit(Faulty, &rdev->flags) && 7844 rdev->recovery_offset != MaxSector && 7845 rdev->recovery_offset) { 7846 seq_printf(seq, "\trecover=REMOTE"); 7847 return 1; 7848 } 7849 if (mddev->reshape_position != MaxSector) 7850 seq_printf(seq, "\treshape=REMOTE"); 7851 else 7852 seq_printf(seq, "\tresync=REMOTE"); 7853 return 1; 7854 } 7855 if (mddev->recovery_cp < MaxSector) { 7856 seq_printf(seq, "\tresync=PENDING"); 7857 return 1; 7858 } 7859 return 0; 7860 } 7861 if (resync < 3) { 7862 seq_printf(seq, "\tresync=DELAYED"); 7863 return 1; 7864 } 7865 7866 WARN_ON(max_sectors == 0); 7867 /* Pick 'scale' such that (resync>>scale)*1000 will fit 7868 * in a sector_t, and (max_sectors>>scale) will fit in a 7869 * u32, as those are the requirements for sector_div. 7870 * Thus 'scale' must be at least 10 7871 */ 7872 scale = 10; 7873 if (sizeof(sector_t) > sizeof(unsigned long)) { 7874 while ( max_sectors/2 > (1ULL<<(scale+32))) 7875 scale++; 7876 } 7877 res = (resync>>scale)*1000; 7878 sector_div(res, (u32)((max_sectors>>scale)+1)); 7879 7880 per_milli = res; 7881 { 7882 int i, x = per_milli/50, y = 20-x; 7883 seq_printf(seq, "["); 7884 for (i = 0; i < x; i++) 7885 seq_printf(seq, "="); 7886 seq_printf(seq, ">"); 7887 for (i = 0; i < y; i++) 7888 seq_printf(seq, "."); 7889 seq_printf(seq, "] "); 7890 } 7891 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 7892 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 7893 "reshape" : 7894 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 7895 "check" : 7896 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 7897 "resync" : "recovery"))), 7898 per_milli/10, per_milli % 10, 7899 (unsigned long long) resync/2, 7900 (unsigned long long) max_sectors/2); 7901 7902 /* 7903 * dt: time from mark until now 7904 * db: blocks written from mark until now 7905 * rt: remaining time 7906 * 7907 * rt is a sector_t, which is always 64bit now. We are keeping 7908 * the original algorithm, but it is not really necessary. 7909 * 7910 * Original algorithm: 7911 * So we divide before multiply in case it is 32bit and close 7912 * to the limit. 7913 * We scale the divisor (db) by 32 to avoid losing precision 7914 * near the end of resync when the number of remaining sectors 7915 * is close to 'db'. 7916 * We then divide rt by 32 after multiplying by db to compensate. 7917 * The '+1' avoids division by zero if db is very small. 7918 */ 7919 dt = ((jiffies - mddev->resync_mark) / HZ); 7920 if (!dt) dt++; 7921 7922 curr_mark_cnt = mddev->curr_mark_cnt; 7923 recovery_active = atomic_read(&mddev->recovery_active); 7924 resync_mark_cnt = mddev->resync_mark_cnt; 7925 7926 if (curr_mark_cnt >= (recovery_active + resync_mark_cnt)) 7927 db = curr_mark_cnt - (recovery_active + resync_mark_cnt); 7928 7929 rt = max_sectors - resync; /* number of remaining sectors */ 7930 rt = div64_u64(rt, db/32+1); 7931 rt *= dt; 7932 rt >>= 5; 7933 7934 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 7935 ((unsigned long)rt % 60)/6); 7936 7937 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 7938 return 1; 7939 } 7940 7941 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 7942 { 7943 struct list_head *tmp; 7944 loff_t l = *pos; 7945 struct mddev *mddev; 7946 7947 if (l >= 0x10000) 7948 return NULL; 7949 if (!l--) 7950 /* header */ 7951 return (void*)1; 7952 7953 spin_lock(&all_mddevs_lock); 7954 list_for_each(tmp,&all_mddevs) 7955 if (!l--) { 7956 mddev = list_entry(tmp, struct mddev, all_mddevs); 7957 mddev_get(mddev); 7958 spin_unlock(&all_mddevs_lock); 7959 return mddev; 7960 } 7961 spin_unlock(&all_mddevs_lock); 7962 if (!l--) 7963 return (void*)2;/* tail */ 7964 return NULL; 7965 } 7966 7967 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 7968 { 7969 struct list_head *tmp; 7970 struct mddev *next_mddev, *mddev = v; 7971 7972 ++*pos; 7973 if (v == (void*)2) 7974 return NULL; 7975 7976 spin_lock(&all_mddevs_lock); 7977 if (v == (void*)1) 7978 tmp = all_mddevs.next; 7979 else 7980 tmp = mddev->all_mddevs.next; 7981 if (tmp != &all_mddevs) 7982 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs)); 7983 else { 7984 next_mddev = (void*)2; 7985 *pos = 0x10000; 7986 } 7987 spin_unlock(&all_mddevs_lock); 7988 7989 if (v != (void*)1) 7990 mddev_put(mddev); 7991 return next_mddev; 7992 7993 } 7994 7995 static void md_seq_stop(struct seq_file *seq, void *v) 7996 { 7997 struct mddev *mddev = v; 7998 7999 if (mddev && v != (void*)1 && v != (void*)2) 8000 mddev_put(mddev); 8001 } 8002 8003 static int md_seq_show(struct seq_file *seq, void *v) 8004 { 8005 struct mddev *mddev = v; 8006 sector_t sectors; 8007 struct md_rdev *rdev; 8008 8009 if (v == (void*)1) { 8010 struct md_personality *pers; 8011 seq_printf(seq, "Personalities : "); 8012 spin_lock(&pers_lock); 8013 list_for_each_entry(pers, &pers_list, list) 8014 seq_printf(seq, "[%s] ", pers->name); 8015 8016 spin_unlock(&pers_lock); 8017 seq_printf(seq, "\n"); 8018 seq->poll_event = atomic_read(&md_event_count); 8019 return 0; 8020 } 8021 if (v == (void*)2) { 8022 status_unused(seq); 8023 return 0; 8024 } 8025 8026 spin_lock(&mddev->lock); 8027 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 8028 seq_printf(seq, "%s : %sactive", mdname(mddev), 8029 mddev->pers ? "" : "in"); 8030 if (mddev->pers) { 8031 if (mddev->ro==1) 8032 seq_printf(seq, " (read-only)"); 8033 if (mddev->ro==2) 8034 seq_printf(seq, " (auto-read-only)"); 8035 seq_printf(seq, " %s", mddev->pers->name); 8036 } 8037 8038 sectors = 0; 8039 rcu_read_lock(); 8040 rdev_for_each_rcu(rdev, mddev) { 8041 char b[BDEVNAME_SIZE]; 8042 seq_printf(seq, " %s[%d]", 8043 bdevname(rdev->bdev,b), rdev->desc_nr); 8044 if (test_bit(WriteMostly, &rdev->flags)) 8045 seq_printf(seq, "(W)"); 8046 if (test_bit(Journal, &rdev->flags)) 8047 seq_printf(seq, "(J)"); 8048 if (test_bit(Faulty, &rdev->flags)) { 8049 seq_printf(seq, "(F)"); 8050 continue; 8051 } 8052 if (rdev->raid_disk < 0) 8053 seq_printf(seq, "(S)"); /* spare */ 8054 if (test_bit(Replacement, &rdev->flags)) 8055 seq_printf(seq, "(R)"); 8056 sectors += rdev->sectors; 8057 } 8058 rcu_read_unlock(); 8059 8060 if (!list_empty(&mddev->disks)) { 8061 if (mddev->pers) 8062 seq_printf(seq, "\n %llu blocks", 8063 (unsigned long long) 8064 mddev->array_sectors / 2); 8065 else 8066 seq_printf(seq, "\n %llu blocks", 8067 (unsigned long long)sectors / 2); 8068 } 8069 if (mddev->persistent) { 8070 if (mddev->major_version != 0 || 8071 mddev->minor_version != 90) { 8072 seq_printf(seq," super %d.%d", 8073 mddev->major_version, 8074 mddev->minor_version); 8075 } 8076 } else if (mddev->external) 8077 seq_printf(seq, " super external:%s", 8078 mddev->metadata_type); 8079 else 8080 seq_printf(seq, " super non-persistent"); 8081 8082 if (mddev->pers) { 8083 mddev->pers->status(seq, mddev); 8084 seq_printf(seq, "\n "); 8085 if (mddev->pers->sync_request) { 8086 if (status_resync(seq, mddev)) 8087 seq_printf(seq, "\n "); 8088 } 8089 } else 8090 seq_printf(seq, "\n "); 8091 8092 md_bitmap_status(seq, mddev->bitmap); 8093 8094 seq_printf(seq, "\n"); 8095 } 8096 spin_unlock(&mddev->lock); 8097 8098 return 0; 8099 } 8100 8101 static const struct seq_operations md_seq_ops = { 8102 .start = md_seq_start, 8103 .next = md_seq_next, 8104 .stop = md_seq_stop, 8105 .show = md_seq_show, 8106 }; 8107 8108 static int md_seq_open(struct inode *inode, struct file *file) 8109 { 8110 struct seq_file *seq; 8111 int error; 8112 8113 error = seq_open(file, &md_seq_ops); 8114 if (error) 8115 return error; 8116 8117 seq = file->private_data; 8118 seq->poll_event = atomic_read(&md_event_count); 8119 return error; 8120 } 8121 8122 static int md_unloading; 8123 static __poll_t mdstat_poll(struct file *filp, poll_table *wait) 8124 { 8125 struct seq_file *seq = filp->private_data; 8126 __poll_t mask; 8127 8128 if (md_unloading) 8129 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; 8130 poll_wait(filp, &md_event_waiters, wait); 8131 8132 /* always allow read */ 8133 mask = EPOLLIN | EPOLLRDNORM; 8134 8135 if (seq->poll_event != atomic_read(&md_event_count)) 8136 mask |= EPOLLERR | EPOLLPRI; 8137 return mask; 8138 } 8139 8140 static const struct file_operations md_seq_fops = { 8141 .owner = THIS_MODULE, 8142 .open = md_seq_open, 8143 .read = seq_read, 8144 .llseek = seq_lseek, 8145 .release = seq_release, 8146 .poll = mdstat_poll, 8147 }; 8148 8149 int register_md_personality(struct md_personality *p) 8150 { 8151 pr_debug("md: %s personality registered for level %d\n", 8152 p->name, p->level); 8153 spin_lock(&pers_lock); 8154 list_add_tail(&p->list, &pers_list); 8155 spin_unlock(&pers_lock); 8156 return 0; 8157 } 8158 EXPORT_SYMBOL(register_md_personality); 8159 8160 int unregister_md_personality(struct md_personality *p) 8161 { 8162 pr_debug("md: %s personality unregistered\n", p->name); 8163 spin_lock(&pers_lock); 8164 list_del_init(&p->list); 8165 spin_unlock(&pers_lock); 8166 return 0; 8167 } 8168 EXPORT_SYMBOL(unregister_md_personality); 8169 8170 int register_md_cluster_operations(struct md_cluster_operations *ops, 8171 struct module *module) 8172 { 8173 int ret = 0; 8174 spin_lock(&pers_lock); 8175 if (md_cluster_ops != NULL) 8176 ret = -EALREADY; 8177 else { 8178 md_cluster_ops = ops; 8179 md_cluster_mod = module; 8180 } 8181 spin_unlock(&pers_lock); 8182 return ret; 8183 } 8184 EXPORT_SYMBOL(register_md_cluster_operations); 8185 8186 int unregister_md_cluster_operations(void) 8187 { 8188 spin_lock(&pers_lock); 8189 md_cluster_ops = NULL; 8190 spin_unlock(&pers_lock); 8191 return 0; 8192 } 8193 EXPORT_SYMBOL(unregister_md_cluster_operations); 8194 8195 int md_setup_cluster(struct mddev *mddev, int nodes) 8196 { 8197 if (!md_cluster_ops) 8198 request_module("md-cluster"); 8199 spin_lock(&pers_lock); 8200 /* ensure module won't be unloaded */ 8201 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) { 8202 pr_warn("can't find md-cluster module or get it's reference.\n"); 8203 spin_unlock(&pers_lock); 8204 return -ENOENT; 8205 } 8206 spin_unlock(&pers_lock); 8207 8208 return md_cluster_ops->join(mddev, nodes); 8209 } 8210 8211 void md_cluster_stop(struct mddev *mddev) 8212 { 8213 if (!md_cluster_ops) 8214 return; 8215 md_cluster_ops->leave(mddev); 8216 module_put(md_cluster_mod); 8217 } 8218 8219 static int is_mddev_idle(struct mddev *mddev, int init) 8220 { 8221 struct md_rdev *rdev; 8222 int idle; 8223 int curr_events; 8224 8225 idle = 1; 8226 rcu_read_lock(); 8227 rdev_for_each_rcu(rdev, mddev) { 8228 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 8229 curr_events = (int)part_stat_read_accum(&disk->part0, sectors) - 8230 atomic_read(&disk->sync_io); 8231 /* sync IO will cause sync_io to increase before the disk_stats 8232 * as sync_io is counted when a request starts, and 8233 * disk_stats is counted when it completes. 8234 * So resync activity will cause curr_events to be smaller than 8235 * when there was no such activity. 8236 * non-sync IO will cause disk_stat to increase without 8237 * increasing sync_io so curr_events will (eventually) 8238 * be larger than it was before. Once it becomes 8239 * substantially larger, the test below will cause 8240 * the array to appear non-idle, and resync will slow 8241 * down. 8242 * If there is a lot of outstanding resync activity when 8243 * we set last_event to curr_events, then all that activity 8244 * completing might cause the array to appear non-idle 8245 * and resync will be slowed down even though there might 8246 * not have been non-resync activity. This will only 8247 * happen once though. 'last_events' will soon reflect 8248 * the state where there is little or no outstanding 8249 * resync requests, and further resync activity will 8250 * always make curr_events less than last_events. 8251 * 8252 */ 8253 if (init || curr_events - rdev->last_events > 64) { 8254 rdev->last_events = curr_events; 8255 idle = 0; 8256 } 8257 } 8258 rcu_read_unlock(); 8259 return idle; 8260 } 8261 8262 void md_done_sync(struct mddev *mddev, int blocks, int ok) 8263 { 8264 /* another "blocks" (512byte) blocks have been synced */ 8265 atomic_sub(blocks, &mddev->recovery_active); 8266 wake_up(&mddev->recovery_wait); 8267 if (!ok) { 8268 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8269 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); 8270 md_wakeup_thread(mddev->thread); 8271 // stop recovery, signal do_sync .... 8272 } 8273 } 8274 EXPORT_SYMBOL(md_done_sync); 8275 8276 /* md_write_start(mddev, bi) 8277 * If we need to update some array metadata (e.g. 'active' flag 8278 * in superblock) before writing, schedule a superblock update 8279 * and wait for it to complete. 8280 * A return value of 'false' means that the write wasn't recorded 8281 * and cannot proceed as the array is being suspend. 8282 */ 8283 bool md_write_start(struct mddev *mddev, struct bio *bi) 8284 { 8285 int did_change = 0; 8286 8287 if (bio_data_dir(bi) != WRITE) 8288 return true; 8289 8290 BUG_ON(mddev->ro == 1); 8291 if (mddev->ro == 2) { 8292 /* need to switch to read/write */ 8293 mddev->ro = 0; 8294 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8295 md_wakeup_thread(mddev->thread); 8296 md_wakeup_thread(mddev->sync_thread); 8297 did_change = 1; 8298 } 8299 rcu_read_lock(); 8300 percpu_ref_get(&mddev->writes_pending); 8301 smp_mb(); /* Match smp_mb in set_in_sync() */ 8302 if (mddev->safemode == 1) 8303 mddev->safemode = 0; 8304 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */ 8305 if (mddev->in_sync || mddev->sync_checkers) { 8306 spin_lock(&mddev->lock); 8307 if (mddev->in_sync) { 8308 mddev->in_sync = 0; 8309 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8310 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 8311 md_wakeup_thread(mddev->thread); 8312 did_change = 1; 8313 } 8314 spin_unlock(&mddev->lock); 8315 } 8316 rcu_read_unlock(); 8317 if (did_change) 8318 sysfs_notify_dirent_safe(mddev->sysfs_state); 8319 if (!mddev->has_superblocks) 8320 return true; 8321 wait_event(mddev->sb_wait, 8322 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || 8323 mddev->suspended); 8324 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 8325 percpu_ref_put(&mddev->writes_pending); 8326 return false; 8327 } 8328 return true; 8329 } 8330 EXPORT_SYMBOL(md_write_start); 8331 8332 /* md_write_inc can only be called when md_write_start() has 8333 * already been called at least once of the current request. 8334 * It increments the counter and is useful when a single request 8335 * is split into several parts. Each part causes an increment and 8336 * so needs a matching md_write_end(). 8337 * Unlike md_write_start(), it is safe to call md_write_inc() inside 8338 * a spinlocked region. 8339 */ 8340 void md_write_inc(struct mddev *mddev, struct bio *bi) 8341 { 8342 if (bio_data_dir(bi) != WRITE) 8343 return; 8344 WARN_ON_ONCE(mddev->in_sync || mddev->ro); 8345 percpu_ref_get(&mddev->writes_pending); 8346 } 8347 EXPORT_SYMBOL(md_write_inc); 8348 8349 void md_write_end(struct mddev *mddev) 8350 { 8351 percpu_ref_put(&mddev->writes_pending); 8352 8353 if (mddev->safemode == 2) 8354 md_wakeup_thread(mddev->thread); 8355 else if (mddev->safemode_delay) 8356 /* The roundup() ensures this only performs locking once 8357 * every ->safemode_delay jiffies 8358 */ 8359 mod_timer(&mddev->safemode_timer, 8360 roundup(jiffies, mddev->safemode_delay) + 8361 mddev->safemode_delay); 8362 } 8363 8364 EXPORT_SYMBOL(md_write_end); 8365 8366 /* md_allow_write(mddev) 8367 * Calling this ensures that the array is marked 'active' so that writes 8368 * may proceed without blocking. It is important to call this before 8369 * attempting a GFP_KERNEL allocation while holding the mddev lock. 8370 * Must be called with mddev_lock held. 8371 */ 8372 void md_allow_write(struct mddev *mddev) 8373 { 8374 if (!mddev->pers) 8375 return; 8376 if (mddev->ro) 8377 return; 8378 if (!mddev->pers->sync_request) 8379 return; 8380 8381 spin_lock(&mddev->lock); 8382 if (mddev->in_sync) { 8383 mddev->in_sync = 0; 8384 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8385 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 8386 if (mddev->safemode_delay && 8387 mddev->safemode == 0) 8388 mddev->safemode = 1; 8389 spin_unlock(&mddev->lock); 8390 md_update_sb(mddev, 0); 8391 sysfs_notify_dirent_safe(mddev->sysfs_state); 8392 /* wait for the dirty state to be recorded in the metadata */ 8393 wait_event(mddev->sb_wait, 8394 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 8395 } else 8396 spin_unlock(&mddev->lock); 8397 } 8398 EXPORT_SYMBOL_GPL(md_allow_write); 8399 8400 #define SYNC_MARKS 10 8401 #define SYNC_MARK_STEP (3*HZ) 8402 #define UPDATE_FREQUENCY (5*60*HZ) 8403 void md_do_sync(struct md_thread *thread) 8404 { 8405 struct mddev *mddev = thread->mddev; 8406 struct mddev *mddev2; 8407 unsigned int currspeed = 0, window; 8408 sector_t max_sectors,j, io_sectors, recovery_done; 8409 unsigned long mark[SYNC_MARKS]; 8410 unsigned long update_time; 8411 sector_t mark_cnt[SYNC_MARKS]; 8412 int last_mark,m; 8413 struct list_head *tmp; 8414 sector_t last_check; 8415 int skipped = 0; 8416 struct md_rdev *rdev; 8417 char *desc, *action = NULL; 8418 struct blk_plug plug; 8419 int ret; 8420 8421 /* just incase thread restarts... */ 8422 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 8423 test_bit(MD_RECOVERY_WAIT, &mddev->recovery)) 8424 return; 8425 if (mddev->ro) {/* never try to sync a read-only array */ 8426 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8427 return; 8428 } 8429 8430 if (mddev_is_clustered(mddev)) { 8431 ret = md_cluster_ops->resync_start(mddev); 8432 if (ret) 8433 goto skip; 8434 8435 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags); 8436 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 8437 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || 8438 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 8439 && ((unsigned long long)mddev->curr_resync_completed 8440 < (unsigned long long)mddev->resync_max_sectors)) 8441 goto skip; 8442 } 8443 8444 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8445 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 8446 desc = "data-check"; 8447 action = "check"; 8448 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 8449 desc = "requested-resync"; 8450 action = "repair"; 8451 } else 8452 desc = "resync"; 8453 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 8454 desc = "reshape"; 8455 else 8456 desc = "recovery"; 8457 8458 mddev->last_sync_action = action ?: desc; 8459 8460 /* we overload curr_resync somewhat here. 8461 * 0 == not engaged in resync at all 8462 * 2 == checking that there is no conflict with another sync 8463 * 1 == like 2, but have yielded to allow conflicting resync to 8464 * commence 8465 * other == active in resync - this many blocks 8466 * 8467 * Before starting a resync we must have set curr_resync to 8468 * 2, and then checked that every "conflicting" array has curr_resync 8469 * less than ours. When we find one that is the same or higher 8470 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 8471 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 8472 * This will mean we have to start checking from the beginning again. 8473 * 8474 */ 8475 8476 do { 8477 int mddev2_minor = -1; 8478 mddev->curr_resync = 2; 8479 8480 try_again: 8481 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8482 goto skip; 8483 for_each_mddev(mddev2, tmp) { 8484 if (mddev2 == mddev) 8485 continue; 8486 if (!mddev->parallel_resync 8487 && mddev2->curr_resync 8488 && match_mddev_units(mddev, mddev2)) { 8489 DEFINE_WAIT(wq); 8490 if (mddev < mddev2 && mddev->curr_resync == 2) { 8491 /* arbitrarily yield */ 8492 mddev->curr_resync = 1; 8493 wake_up(&resync_wait); 8494 } 8495 if (mddev > mddev2 && mddev->curr_resync == 1) 8496 /* no need to wait here, we can wait the next 8497 * time 'round when curr_resync == 2 8498 */ 8499 continue; 8500 /* We need to wait 'interruptible' so as not to 8501 * contribute to the load average, and not to 8502 * be caught by 'softlockup' 8503 */ 8504 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 8505 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8506 mddev2->curr_resync >= mddev->curr_resync) { 8507 if (mddev2_minor != mddev2->md_minor) { 8508 mddev2_minor = mddev2->md_minor; 8509 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n", 8510 desc, mdname(mddev), 8511 mdname(mddev2)); 8512 } 8513 mddev_put(mddev2); 8514 if (signal_pending(current)) 8515 flush_signals(current); 8516 schedule(); 8517 finish_wait(&resync_wait, &wq); 8518 goto try_again; 8519 } 8520 finish_wait(&resync_wait, &wq); 8521 } 8522 } 8523 } while (mddev->curr_resync < 2); 8524 8525 j = 0; 8526 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8527 /* resync follows the size requested by the personality, 8528 * which defaults to physical size, but can be virtual size 8529 */ 8530 max_sectors = mddev->resync_max_sectors; 8531 atomic64_set(&mddev->resync_mismatches, 0); 8532 /* we don't use the checkpoint if there's a bitmap */ 8533 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 8534 j = mddev->resync_min; 8535 else if (!mddev->bitmap) 8536 j = mddev->recovery_cp; 8537 8538 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 8539 max_sectors = mddev->resync_max_sectors; 8540 /* 8541 * If the original node aborts reshaping then we continue the 8542 * reshaping, so set j again to avoid restart reshape from the 8543 * first beginning 8544 */ 8545 if (mddev_is_clustered(mddev) && 8546 mddev->reshape_position != MaxSector) 8547 j = mddev->reshape_position; 8548 } else { 8549 /* recovery follows the physical size of devices */ 8550 max_sectors = mddev->dev_sectors; 8551 j = MaxSector; 8552 rcu_read_lock(); 8553 rdev_for_each_rcu(rdev, mddev) 8554 if (rdev->raid_disk >= 0 && 8555 !test_bit(Journal, &rdev->flags) && 8556 !test_bit(Faulty, &rdev->flags) && 8557 !test_bit(In_sync, &rdev->flags) && 8558 rdev->recovery_offset < j) 8559 j = rdev->recovery_offset; 8560 rcu_read_unlock(); 8561 8562 /* If there is a bitmap, we need to make sure all 8563 * writes that started before we added a spare 8564 * complete before we start doing a recovery. 8565 * Otherwise the write might complete and (via 8566 * bitmap_endwrite) set a bit in the bitmap after the 8567 * recovery has checked that bit and skipped that 8568 * region. 8569 */ 8570 if (mddev->bitmap) { 8571 mddev->pers->quiesce(mddev, 1); 8572 mddev->pers->quiesce(mddev, 0); 8573 } 8574 } 8575 8576 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); 8577 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); 8578 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n", 8579 speed_max(mddev), desc); 8580 8581 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 8582 8583 io_sectors = 0; 8584 for (m = 0; m < SYNC_MARKS; m++) { 8585 mark[m] = jiffies; 8586 mark_cnt[m] = io_sectors; 8587 } 8588 last_mark = 0; 8589 mddev->resync_mark = mark[last_mark]; 8590 mddev->resync_mark_cnt = mark_cnt[last_mark]; 8591 8592 /* 8593 * Tune reconstruction: 8594 */ 8595 window = 32 * (PAGE_SIZE / 512); 8596 pr_debug("md: using %dk window, over a total of %lluk.\n", 8597 window/2, (unsigned long long)max_sectors/2); 8598 8599 atomic_set(&mddev->recovery_active, 0); 8600 last_check = 0; 8601 8602 if (j>2) { 8603 pr_debug("md: resuming %s of %s from checkpoint.\n", 8604 desc, mdname(mddev)); 8605 mddev->curr_resync = j; 8606 } else 8607 mddev->curr_resync = 3; /* no longer delayed */ 8608 mddev->curr_resync_completed = j; 8609 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 8610 md_new_event(mddev); 8611 update_time = jiffies; 8612 8613 blk_start_plug(&plug); 8614 while (j < max_sectors) { 8615 sector_t sectors; 8616 8617 skipped = 0; 8618 8619 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8620 ((mddev->curr_resync > mddev->curr_resync_completed && 8621 (mddev->curr_resync - mddev->curr_resync_completed) 8622 > (max_sectors >> 4)) || 8623 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) || 8624 (j - mddev->curr_resync_completed)*2 8625 >= mddev->resync_max - mddev->curr_resync_completed || 8626 mddev->curr_resync_completed > mddev->resync_max 8627 )) { 8628 /* time to update curr_resync_completed */ 8629 wait_event(mddev->recovery_wait, 8630 atomic_read(&mddev->recovery_active) == 0); 8631 mddev->curr_resync_completed = j; 8632 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 8633 j > mddev->recovery_cp) 8634 mddev->recovery_cp = j; 8635 update_time = jiffies; 8636 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8637 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 8638 } 8639 8640 while (j >= mddev->resync_max && 8641 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8642 /* As this condition is controlled by user-space, 8643 * we can block indefinitely, so use '_interruptible' 8644 * to avoid triggering warnings. 8645 */ 8646 flush_signals(current); /* just in case */ 8647 wait_event_interruptible(mddev->recovery_wait, 8648 mddev->resync_max > j 8649 || test_bit(MD_RECOVERY_INTR, 8650 &mddev->recovery)); 8651 } 8652 8653 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8654 break; 8655 8656 sectors = mddev->pers->sync_request(mddev, j, &skipped); 8657 if (sectors == 0) { 8658 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8659 break; 8660 } 8661 8662 if (!skipped) { /* actual IO requested */ 8663 io_sectors += sectors; 8664 atomic_add(sectors, &mddev->recovery_active); 8665 } 8666 8667 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8668 break; 8669 8670 j += sectors; 8671 if (j > max_sectors) 8672 /* when skipping, extra large numbers can be returned. */ 8673 j = max_sectors; 8674 if (j > 2) 8675 mddev->curr_resync = j; 8676 mddev->curr_mark_cnt = io_sectors; 8677 if (last_check == 0) 8678 /* this is the earliest that rebuild will be 8679 * visible in /proc/mdstat 8680 */ 8681 md_new_event(mddev); 8682 8683 if (last_check + window > io_sectors || j == max_sectors) 8684 continue; 8685 8686 last_check = io_sectors; 8687 repeat: 8688 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 8689 /* step marks */ 8690 int next = (last_mark+1) % SYNC_MARKS; 8691 8692 mddev->resync_mark = mark[next]; 8693 mddev->resync_mark_cnt = mark_cnt[next]; 8694 mark[next] = jiffies; 8695 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 8696 last_mark = next; 8697 } 8698 8699 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8700 break; 8701 8702 /* 8703 * this loop exits only if either when we are slower than 8704 * the 'hard' speed limit, or the system was IO-idle for 8705 * a jiffy. 8706 * the system might be non-idle CPU-wise, but we only care 8707 * about not overloading the IO subsystem. (things like an 8708 * e2fsck being done on the RAID array should execute fast) 8709 */ 8710 cond_resched(); 8711 8712 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); 8713 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 8714 /((jiffies-mddev->resync_mark)/HZ +1) +1; 8715 8716 if (currspeed > speed_min(mddev)) { 8717 if (currspeed > speed_max(mddev)) { 8718 msleep(500); 8719 goto repeat; 8720 } 8721 if (!is_mddev_idle(mddev, 0)) { 8722 /* 8723 * Give other IO more of a chance. 8724 * The faster the devices, the less we wait. 8725 */ 8726 wait_event(mddev->recovery_wait, 8727 !atomic_read(&mddev->recovery_active)); 8728 } 8729 } 8730 } 8731 pr_info("md: %s: %s %s.\n",mdname(mddev), desc, 8732 test_bit(MD_RECOVERY_INTR, &mddev->recovery) 8733 ? "interrupted" : "done"); 8734 /* 8735 * this also signals 'finished resyncing' to md_stop 8736 */ 8737 blk_finish_plug(&plug); 8738 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 8739 8740 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8741 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8742 mddev->curr_resync > 3) { 8743 mddev->curr_resync_completed = mddev->curr_resync; 8744 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 8745 } 8746 mddev->pers->sync_request(mddev, max_sectors, &skipped); 8747 8748 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 8749 mddev->curr_resync > 3) { 8750 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8751 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8752 if (mddev->curr_resync >= mddev->recovery_cp) { 8753 pr_debug("md: checkpointing %s of %s.\n", 8754 desc, mdname(mddev)); 8755 if (test_bit(MD_RECOVERY_ERROR, 8756 &mddev->recovery)) 8757 mddev->recovery_cp = 8758 mddev->curr_resync_completed; 8759 else 8760 mddev->recovery_cp = 8761 mddev->curr_resync; 8762 } 8763 } else 8764 mddev->recovery_cp = MaxSector; 8765 } else { 8766 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8767 mddev->curr_resync = MaxSector; 8768 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8769 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { 8770 rcu_read_lock(); 8771 rdev_for_each_rcu(rdev, mddev) 8772 if (rdev->raid_disk >= 0 && 8773 mddev->delta_disks >= 0 && 8774 !test_bit(Journal, &rdev->flags) && 8775 !test_bit(Faulty, &rdev->flags) && 8776 !test_bit(In_sync, &rdev->flags) && 8777 rdev->recovery_offset < mddev->curr_resync) 8778 rdev->recovery_offset = mddev->curr_resync; 8779 rcu_read_unlock(); 8780 } 8781 } 8782 } 8783 skip: 8784 /* set CHANGE_PENDING here since maybe another update is needed, 8785 * so other nodes are informed. It should be harmless for normal 8786 * raid */ 8787 set_mask_bits(&mddev->sb_flags, 0, 8788 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); 8789 8790 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8791 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8792 mddev->delta_disks > 0 && 8793 mddev->pers->finish_reshape && 8794 mddev->pers->size && 8795 mddev->queue) { 8796 mddev_lock_nointr(mddev); 8797 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); 8798 mddev_unlock(mddev); 8799 if (!mddev_is_clustered(mddev)) { 8800 set_capacity(mddev->gendisk, mddev->array_sectors); 8801 revalidate_disk(mddev->gendisk); 8802 } 8803 } 8804 8805 spin_lock(&mddev->lock); 8806 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8807 /* We completed so min/max setting can be forgotten if used. */ 8808 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 8809 mddev->resync_min = 0; 8810 mddev->resync_max = MaxSector; 8811 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 8812 mddev->resync_min = mddev->curr_resync_completed; 8813 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 8814 mddev->curr_resync = 0; 8815 spin_unlock(&mddev->lock); 8816 8817 wake_up(&resync_wait); 8818 md_wakeup_thread(mddev->thread); 8819 return; 8820 } 8821 EXPORT_SYMBOL_GPL(md_do_sync); 8822 8823 static int remove_and_add_spares(struct mddev *mddev, 8824 struct md_rdev *this) 8825 { 8826 struct md_rdev *rdev; 8827 int spares = 0; 8828 int removed = 0; 8829 bool remove_some = false; 8830 8831 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 8832 /* Mustn't remove devices when resync thread is running */ 8833 return 0; 8834 8835 rdev_for_each(rdev, mddev) { 8836 if ((this == NULL || rdev == this) && 8837 rdev->raid_disk >= 0 && 8838 !test_bit(Blocked, &rdev->flags) && 8839 test_bit(Faulty, &rdev->flags) && 8840 atomic_read(&rdev->nr_pending)==0) { 8841 /* Faulty non-Blocked devices with nr_pending == 0 8842 * never get nr_pending incremented, 8843 * never get Faulty cleared, and never get Blocked set. 8844 * So we can synchronize_rcu now rather than once per device 8845 */ 8846 remove_some = true; 8847 set_bit(RemoveSynchronized, &rdev->flags); 8848 } 8849 } 8850 8851 if (remove_some) 8852 synchronize_rcu(); 8853 rdev_for_each(rdev, mddev) { 8854 if ((this == NULL || rdev == this) && 8855 rdev->raid_disk >= 0 && 8856 !test_bit(Blocked, &rdev->flags) && 8857 ((test_bit(RemoveSynchronized, &rdev->flags) || 8858 (!test_bit(In_sync, &rdev->flags) && 8859 !test_bit(Journal, &rdev->flags))) && 8860 atomic_read(&rdev->nr_pending)==0)) { 8861 if (mddev->pers->hot_remove_disk( 8862 mddev, rdev) == 0) { 8863 sysfs_unlink_rdev(mddev, rdev); 8864 rdev->saved_raid_disk = rdev->raid_disk; 8865 rdev->raid_disk = -1; 8866 removed++; 8867 } 8868 } 8869 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags)) 8870 clear_bit(RemoveSynchronized, &rdev->flags); 8871 } 8872 8873 if (removed && mddev->kobj.sd) 8874 sysfs_notify(&mddev->kobj, NULL, "degraded"); 8875 8876 if (this && removed) 8877 goto no_add; 8878 8879 rdev_for_each(rdev, mddev) { 8880 if (this && this != rdev) 8881 continue; 8882 if (test_bit(Candidate, &rdev->flags)) 8883 continue; 8884 if (rdev->raid_disk >= 0 && 8885 !test_bit(In_sync, &rdev->flags) && 8886 !test_bit(Journal, &rdev->flags) && 8887 !test_bit(Faulty, &rdev->flags)) 8888 spares++; 8889 if (rdev->raid_disk >= 0) 8890 continue; 8891 if (test_bit(Faulty, &rdev->flags)) 8892 continue; 8893 if (!test_bit(Journal, &rdev->flags)) { 8894 if (mddev->ro && 8895 ! (rdev->saved_raid_disk >= 0 && 8896 !test_bit(Bitmap_sync, &rdev->flags))) 8897 continue; 8898 8899 rdev->recovery_offset = 0; 8900 } 8901 if (mddev->pers-> 8902 hot_add_disk(mddev, rdev) == 0) { 8903 if (sysfs_link_rdev(mddev, rdev)) 8904 /* failure here is OK */; 8905 if (!test_bit(Journal, &rdev->flags)) 8906 spares++; 8907 md_new_event(mddev); 8908 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 8909 } 8910 } 8911 no_add: 8912 if (removed) 8913 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 8914 return spares; 8915 } 8916 8917 static void md_start_sync(struct work_struct *ws) 8918 { 8919 struct mddev *mddev = container_of(ws, struct mddev, del_work); 8920 8921 mddev->sync_thread = md_register_thread(md_do_sync, 8922 mddev, 8923 "resync"); 8924 if (!mddev->sync_thread) { 8925 pr_warn("%s: could not start resync thread...\n", 8926 mdname(mddev)); 8927 /* leave the spares where they are, it shouldn't hurt */ 8928 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8929 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8930 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 8931 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 8932 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8933 wake_up(&resync_wait); 8934 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 8935 &mddev->recovery)) 8936 if (mddev->sysfs_action) 8937 sysfs_notify_dirent_safe(mddev->sysfs_action); 8938 } else 8939 md_wakeup_thread(mddev->sync_thread); 8940 sysfs_notify_dirent_safe(mddev->sysfs_action); 8941 md_new_event(mddev); 8942 } 8943 8944 /* 8945 * This routine is regularly called by all per-raid-array threads to 8946 * deal with generic issues like resync and super-block update. 8947 * Raid personalities that don't have a thread (linear/raid0) do not 8948 * need this as they never do any recovery or update the superblock. 8949 * 8950 * It does not do any resync itself, but rather "forks" off other threads 8951 * to do that as needed. 8952 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 8953 * "->recovery" and create a thread at ->sync_thread. 8954 * When the thread finishes it sets MD_RECOVERY_DONE 8955 * and wakeups up this thread which will reap the thread and finish up. 8956 * This thread also removes any faulty devices (with nr_pending == 0). 8957 * 8958 * The overall approach is: 8959 * 1/ if the superblock needs updating, update it. 8960 * 2/ If a recovery thread is running, don't do anything else. 8961 * 3/ If recovery has finished, clean up, possibly marking spares active. 8962 * 4/ If there are any faulty devices, remove them. 8963 * 5/ If array is degraded, try to add spares devices 8964 * 6/ If array has spares or is not in-sync, start a resync thread. 8965 */ 8966 void md_check_recovery(struct mddev *mddev) 8967 { 8968 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { 8969 /* Write superblock - thread that called mddev_suspend() 8970 * holds reconfig_mutex for us. 8971 */ 8972 set_bit(MD_UPDATING_SB, &mddev->flags); 8973 smp_mb__after_atomic(); 8974 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) 8975 md_update_sb(mddev, 0); 8976 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); 8977 wake_up(&mddev->sb_wait); 8978 } 8979 8980 if (mddev->suspended) 8981 return; 8982 8983 if (mddev->bitmap) 8984 md_bitmap_daemon_work(mddev); 8985 8986 if (signal_pending(current)) { 8987 if (mddev->pers->sync_request && !mddev->external) { 8988 pr_debug("md: %s in immediate safe mode\n", 8989 mdname(mddev)); 8990 mddev->safemode = 2; 8991 } 8992 flush_signals(current); 8993 } 8994 8995 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 8996 return; 8997 if ( ! ( 8998 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) || 8999 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 9000 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 9001 (mddev->external == 0 && mddev->safemode == 1) || 9002 (mddev->safemode == 2 9003 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 9004 )) 9005 return; 9006 9007 if (mddev_trylock(mddev)) { 9008 int spares = 0; 9009 bool try_set_sync = mddev->safemode != 0; 9010 9011 if (!mddev->external && mddev->safemode == 1) 9012 mddev->safemode = 0; 9013 9014 if (mddev->ro) { 9015 struct md_rdev *rdev; 9016 if (!mddev->external && mddev->in_sync) 9017 /* 'Blocked' flag not needed as failed devices 9018 * will be recorded if array switched to read/write. 9019 * Leaving it set will prevent the device 9020 * from being removed. 9021 */ 9022 rdev_for_each(rdev, mddev) 9023 clear_bit(Blocked, &rdev->flags); 9024 /* On a read-only array we can: 9025 * - remove failed devices 9026 * - add already-in_sync devices if the array itself 9027 * is in-sync. 9028 * As we only add devices that are already in-sync, 9029 * we can activate the spares immediately. 9030 */ 9031 remove_and_add_spares(mddev, NULL); 9032 /* There is no thread, but we need to call 9033 * ->spare_active and clear saved_raid_disk 9034 */ 9035 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 9036 md_reap_sync_thread(mddev); 9037 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9038 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9039 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 9040 goto unlock; 9041 } 9042 9043 if (mddev_is_clustered(mddev)) { 9044 struct md_rdev *rdev; 9045 /* kick the device if another node issued a 9046 * remove disk. 9047 */ 9048 rdev_for_each(rdev, mddev) { 9049 if (test_and_clear_bit(ClusterRemove, &rdev->flags) && 9050 rdev->raid_disk < 0) 9051 md_kick_rdev_from_array(rdev); 9052 } 9053 } 9054 9055 if (try_set_sync && !mddev->external && !mddev->in_sync) { 9056 spin_lock(&mddev->lock); 9057 set_in_sync(mddev); 9058 spin_unlock(&mddev->lock); 9059 } 9060 9061 if (mddev->sb_flags) 9062 md_update_sb(mddev, 0); 9063 9064 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 9065 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 9066 /* resync/recovery still happening */ 9067 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9068 goto unlock; 9069 } 9070 if (mddev->sync_thread) { 9071 md_reap_sync_thread(mddev); 9072 goto unlock; 9073 } 9074 /* Set RUNNING before clearing NEEDED to avoid 9075 * any transients in the value of "sync_action". 9076 */ 9077 mddev->curr_resync_completed = 0; 9078 spin_lock(&mddev->lock); 9079 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9080 spin_unlock(&mddev->lock); 9081 /* Clear some bits that don't mean anything, but 9082 * might be left set 9083 */ 9084 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 9085 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 9086 9087 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 9088 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 9089 goto not_running; 9090 /* no recovery is running. 9091 * remove any failed drives, then 9092 * add spares if possible. 9093 * Spares are also removed and re-added, to allow 9094 * the personality to fail the re-add. 9095 */ 9096 9097 if (mddev->reshape_position != MaxSector) { 9098 if (mddev->pers->check_reshape == NULL || 9099 mddev->pers->check_reshape(mddev) != 0) 9100 /* Cannot proceed */ 9101 goto not_running; 9102 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9103 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9104 } else if ((spares = remove_and_add_spares(mddev, NULL))) { 9105 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9106 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9107 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9108 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9109 } else if (mddev->recovery_cp < MaxSector) { 9110 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9111 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9112 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 9113 /* nothing to be done ... */ 9114 goto not_running; 9115 9116 if (mddev->pers->sync_request) { 9117 if (spares) { 9118 /* We are adding a device or devices to an array 9119 * which has the bitmap stored on all devices. 9120 * So make sure all bitmap pages get written 9121 */ 9122 md_bitmap_write_all(mddev->bitmap); 9123 } 9124 INIT_WORK(&mddev->del_work, md_start_sync); 9125 queue_work(md_misc_wq, &mddev->del_work); 9126 goto unlock; 9127 } 9128 not_running: 9129 if (!mddev->sync_thread) { 9130 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9131 wake_up(&resync_wait); 9132 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 9133 &mddev->recovery)) 9134 if (mddev->sysfs_action) 9135 sysfs_notify_dirent_safe(mddev->sysfs_action); 9136 } 9137 unlock: 9138 wake_up(&mddev->sb_wait); 9139 mddev_unlock(mddev); 9140 } 9141 } 9142 EXPORT_SYMBOL(md_check_recovery); 9143 9144 void md_reap_sync_thread(struct mddev *mddev) 9145 { 9146 struct md_rdev *rdev; 9147 sector_t old_dev_sectors = mddev->dev_sectors; 9148 bool is_reshaped = false; 9149 9150 /* resync has finished, collect result */ 9151 md_unregister_thread(&mddev->sync_thread); 9152 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 9153 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 9154 mddev->degraded != mddev->raid_disks) { 9155 /* success...*/ 9156 /* activate any spares */ 9157 if (mddev->pers->spare_active(mddev)) { 9158 sysfs_notify(&mddev->kobj, NULL, 9159 "degraded"); 9160 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 9161 } 9162 } 9163 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9164 mddev->pers->finish_reshape) { 9165 mddev->pers->finish_reshape(mddev); 9166 if (mddev_is_clustered(mddev)) 9167 is_reshaped = true; 9168 } 9169 9170 /* If array is no-longer degraded, then any saved_raid_disk 9171 * information must be scrapped. 9172 */ 9173 if (!mddev->degraded) 9174 rdev_for_each(rdev, mddev) 9175 rdev->saved_raid_disk = -1; 9176 9177 md_update_sb(mddev, 1); 9178 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can 9179 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by 9180 * clustered raid */ 9181 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) 9182 md_cluster_ops->resync_finish(mddev); 9183 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9184 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 9185 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9186 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9187 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9188 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9189 /* 9190 * We call md_cluster_ops->update_size here because sync_size could 9191 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared, 9192 * so it is time to update size across cluster. 9193 */ 9194 if (mddev_is_clustered(mddev) && is_reshaped 9195 && !test_bit(MD_CLOSING, &mddev->flags)) 9196 md_cluster_ops->update_size(mddev, old_dev_sectors); 9197 wake_up(&resync_wait); 9198 /* flag recovery needed just to double check */ 9199 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9200 sysfs_notify_dirent_safe(mddev->sysfs_action); 9201 md_new_event(mddev); 9202 if (mddev->event_work.func) 9203 queue_work(md_misc_wq, &mddev->event_work); 9204 } 9205 EXPORT_SYMBOL(md_reap_sync_thread); 9206 9207 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) 9208 { 9209 sysfs_notify_dirent_safe(rdev->sysfs_state); 9210 wait_event_timeout(rdev->blocked_wait, 9211 !test_bit(Blocked, &rdev->flags) && 9212 !test_bit(BlockedBadBlocks, &rdev->flags), 9213 msecs_to_jiffies(5000)); 9214 rdev_dec_pending(rdev, mddev); 9215 } 9216 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 9217 9218 void md_finish_reshape(struct mddev *mddev) 9219 { 9220 /* called be personality module when reshape completes. */ 9221 struct md_rdev *rdev; 9222 9223 rdev_for_each(rdev, mddev) { 9224 if (rdev->data_offset > rdev->new_data_offset) 9225 rdev->sectors += rdev->data_offset - rdev->new_data_offset; 9226 else 9227 rdev->sectors -= rdev->new_data_offset - rdev->data_offset; 9228 rdev->data_offset = rdev->new_data_offset; 9229 } 9230 } 9231 EXPORT_SYMBOL(md_finish_reshape); 9232 9233 /* Bad block management */ 9234 9235 /* Returns 1 on success, 0 on failure */ 9236 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 9237 int is_new) 9238 { 9239 struct mddev *mddev = rdev->mddev; 9240 int rv; 9241 if (is_new) 9242 s += rdev->new_data_offset; 9243 else 9244 s += rdev->data_offset; 9245 rv = badblocks_set(&rdev->badblocks, s, sectors, 0); 9246 if (rv == 0) { 9247 /* Make sure they get written out promptly */ 9248 if (test_bit(ExternalBbl, &rdev->flags)) 9249 sysfs_notify(&rdev->kobj, NULL, 9250 "unacknowledged_bad_blocks"); 9251 sysfs_notify_dirent_safe(rdev->sysfs_state); 9252 set_mask_bits(&mddev->sb_flags, 0, 9253 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING)); 9254 md_wakeup_thread(rdev->mddev->thread); 9255 return 1; 9256 } else 9257 return 0; 9258 } 9259 EXPORT_SYMBOL_GPL(rdev_set_badblocks); 9260 9261 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 9262 int is_new) 9263 { 9264 int rv; 9265 if (is_new) 9266 s += rdev->new_data_offset; 9267 else 9268 s += rdev->data_offset; 9269 rv = badblocks_clear(&rdev->badblocks, s, sectors); 9270 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags)) 9271 sysfs_notify(&rdev->kobj, NULL, "bad_blocks"); 9272 return rv; 9273 } 9274 EXPORT_SYMBOL_GPL(rdev_clear_badblocks); 9275 9276 static int md_notify_reboot(struct notifier_block *this, 9277 unsigned long code, void *x) 9278 { 9279 struct list_head *tmp; 9280 struct mddev *mddev; 9281 int need_delay = 0; 9282 9283 for_each_mddev(mddev, tmp) { 9284 if (mddev_trylock(mddev)) { 9285 if (mddev->pers) 9286 __md_stop_writes(mddev); 9287 if (mddev->persistent) 9288 mddev->safemode = 2; 9289 mddev_unlock(mddev); 9290 } 9291 need_delay = 1; 9292 } 9293 /* 9294 * certain more exotic SCSI devices are known to be 9295 * volatile wrt too early system reboots. While the 9296 * right place to handle this issue is the given 9297 * driver, we do want to have a safe RAID driver ... 9298 */ 9299 if (need_delay) 9300 mdelay(1000*1); 9301 9302 return NOTIFY_DONE; 9303 } 9304 9305 static struct notifier_block md_notifier = { 9306 .notifier_call = md_notify_reboot, 9307 .next = NULL, 9308 .priority = INT_MAX, /* before any real devices */ 9309 }; 9310 9311 static void md_geninit(void) 9312 { 9313 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 9314 9315 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); 9316 } 9317 9318 static int __init md_init(void) 9319 { 9320 int ret = -ENOMEM; 9321 9322 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); 9323 if (!md_wq) 9324 goto err_wq; 9325 9326 md_misc_wq = alloc_workqueue("md_misc", 0, 0); 9327 if (!md_misc_wq) 9328 goto err_misc_wq; 9329 9330 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0) 9331 goto err_md; 9332 9333 if ((ret = register_blkdev(0, "mdp")) < 0) 9334 goto err_mdp; 9335 mdp_major = ret; 9336 9337 blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE, 9338 md_probe, NULL, NULL); 9339 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 9340 md_probe, NULL, NULL); 9341 9342 register_reboot_notifier(&md_notifier); 9343 raid_table_header = register_sysctl_table(raid_root_table); 9344 9345 md_geninit(); 9346 return 0; 9347 9348 err_mdp: 9349 unregister_blkdev(MD_MAJOR, "md"); 9350 err_md: 9351 destroy_workqueue(md_misc_wq); 9352 err_misc_wq: 9353 destroy_workqueue(md_wq); 9354 err_wq: 9355 return ret; 9356 } 9357 9358 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) 9359 { 9360 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 9361 struct md_rdev *rdev2; 9362 int role, ret; 9363 char b[BDEVNAME_SIZE]; 9364 9365 /* 9366 * If size is changed in another node then we need to 9367 * do resize as well. 9368 */ 9369 if (mddev->dev_sectors != le64_to_cpu(sb->size)) { 9370 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size)); 9371 if (ret) 9372 pr_info("md-cluster: resize failed\n"); 9373 else 9374 md_bitmap_update_sb(mddev->bitmap); 9375 } 9376 9377 /* Check for change of roles in the active devices */ 9378 rdev_for_each(rdev2, mddev) { 9379 if (test_bit(Faulty, &rdev2->flags)) 9380 continue; 9381 9382 /* Check if the roles changed */ 9383 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]); 9384 9385 if (test_bit(Candidate, &rdev2->flags)) { 9386 if (role == 0xfffe) { 9387 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b)); 9388 md_kick_rdev_from_array(rdev2); 9389 continue; 9390 } 9391 else 9392 clear_bit(Candidate, &rdev2->flags); 9393 } 9394 9395 if (role != rdev2->raid_disk) { 9396 /* 9397 * got activated except reshape is happening. 9398 */ 9399 if (rdev2->raid_disk == -1 && role != 0xffff && 9400 !(le32_to_cpu(sb->feature_map) & 9401 MD_FEATURE_RESHAPE_ACTIVE)) { 9402 rdev2->saved_raid_disk = role; 9403 ret = remove_and_add_spares(mddev, rdev2); 9404 pr_info("Activated spare: %s\n", 9405 bdevname(rdev2->bdev,b)); 9406 /* wakeup mddev->thread here, so array could 9407 * perform resync with the new activated disk */ 9408 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9409 md_wakeup_thread(mddev->thread); 9410 } 9411 /* device faulty 9412 * We just want to do the minimum to mark the disk 9413 * as faulty. The recovery is performed by the 9414 * one who initiated the error. 9415 */ 9416 if ((role == 0xfffe) || (role == 0xfffd)) { 9417 md_error(mddev, rdev2); 9418 clear_bit(Blocked, &rdev2->flags); 9419 } 9420 } 9421 } 9422 9423 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) 9424 update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); 9425 9426 /* 9427 * Since mddev->delta_disks has already updated in update_raid_disks, 9428 * so it is time to check reshape. 9429 */ 9430 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && 9431 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 9432 /* 9433 * reshape is happening in the remote node, we need to 9434 * update reshape_position and call start_reshape. 9435 */ 9436 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 9437 if (mddev->pers->update_reshape_pos) 9438 mddev->pers->update_reshape_pos(mddev); 9439 if (mddev->pers->start_reshape) 9440 mddev->pers->start_reshape(mddev); 9441 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && 9442 mddev->reshape_position != MaxSector && 9443 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 9444 /* reshape is just done in another node. */ 9445 mddev->reshape_position = MaxSector; 9446 if (mddev->pers->update_reshape_pos) 9447 mddev->pers->update_reshape_pos(mddev); 9448 } 9449 9450 /* Finally set the event to be up to date */ 9451 mddev->events = le64_to_cpu(sb->events); 9452 } 9453 9454 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) 9455 { 9456 int err; 9457 struct page *swapout = rdev->sb_page; 9458 struct mdp_superblock_1 *sb; 9459 9460 /* Store the sb page of the rdev in the swapout temporary 9461 * variable in case we err in the future 9462 */ 9463 rdev->sb_page = NULL; 9464 err = alloc_disk_sb(rdev); 9465 if (err == 0) { 9466 ClearPageUptodate(rdev->sb_page); 9467 rdev->sb_loaded = 0; 9468 err = super_types[mddev->major_version]. 9469 load_super(rdev, NULL, mddev->minor_version); 9470 } 9471 if (err < 0) { 9472 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n", 9473 __func__, __LINE__, rdev->desc_nr, err); 9474 if (rdev->sb_page) 9475 put_page(rdev->sb_page); 9476 rdev->sb_page = swapout; 9477 rdev->sb_loaded = 1; 9478 return err; 9479 } 9480 9481 sb = page_address(rdev->sb_page); 9482 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET 9483 * is not set 9484 */ 9485 9486 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET)) 9487 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 9488 9489 /* The other node finished recovery, call spare_active to set 9490 * device In_sync and mddev->degraded 9491 */ 9492 if (rdev->recovery_offset == MaxSector && 9493 !test_bit(In_sync, &rdev->flags) && 9494 mddev->pers->spare_active(mddev)) 9495 sysfs_notify(&mddev->kobj, NULL, "degraded"); 9496 9497 put_page(swapout); 9498 return 0; 9499 } 9500 9501 void md_reload_sb(struct mddev *mddev, int nr) 9502 { 9503 struct md_rdev *rdev; 9504 int err; 9505 9506 /* Find the rdev */ 9507 rdev_for_each_rcu(rdev, mddev) { 9508 if (rdev->desc_nr == nr) 9509 break; 9510 } 9511 9512 if (!rdev || rdev->desc_nr != nr) { 9513 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr); 9514 return; 9515 } 9516 9517 err = read_rdev(mddev, rdev); 9518 if (err < 0) 9519 return; 9520 9521 check_sb_changes(mddev, rdev); 9522 9523 /* Read all rdev's to update recovery_offset */ 9524 rdev_for_each_rcu(rdev, mddev) { 9525 if (!test_bit(Faulty, &rdev->flags)) 9526 read_rdev(mddev, rdev); 9527 } 9528 } 9529 EXPORT_SYMBOL(md_reload_sb); 9530 9531 #ifndef MODULE 9532 9533 /* 9534 * Searches all registered partitions for autorun RAID arrays 9535 * at boot time. 9536 */ 9537 9538 static DEFINE_MUTEX(detected_devices_mutex); 9539 static LIST_HEAD(all_detected_devices); 9540 struct detected_devices_node { 9541 struct list_head list; 9542 dev_t dev; 9543 }; 9544 9545 void md_autodetect_dev(dev_t dev) 9546 { 9547 struct detected_devices_node *node_detected_dev; 9548 9549 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 9550 if (node_detected_dev) { 9551 node_detected_dev->dev = dev; 9552 mutex_lock(&detected_devices_mutex); 9553 list_add_tail(&node_detected_dev->list, &all_detected_devices); 9554 mutex_unlock(&detected_devices_mutex); 9555 } 9556 } 9557 9558 static void autostart_arrays(int part) 9559 { 9560 struct md_rdev *rdev; 9561 struct detected_devices_node *node_detected_dev; 9562 dev_t dev; 9563 int i_scanned, i_passed; 9564 9565 i_scanned = 0; 9566 i_passed = 0; 9567 9568 pr_info("md: Autodetecting RAID arrays.\n"); 9569 9570 mutex_lock(&detected_devices_mutex); 9571 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 9572 i_scanned++; 9573 node_detected_dev = list_entry(all_detected_devices.next, 9574 struct detected_devices_node, list); 9575 list_del(&node_detected_dev->list); 9576 dev = node_detected_dev->dev; 9577 kfree(node_detected_dev); 9578 mutex_unlock(&detected_devices_mutex); 9579 rdev = md_import_device(dev,0, 90); 9580 mutex_lock(&detected_devices_mutex); 9581 if (IS_ERR(rdev)) 9582 continue; 9583 9584 if (test_bit(Faulty, &rdev->flags)) 9585 continue; 9586 9587 set_bit(AutoDetected, &rdev->flags); 9588 list_add(&rdev->same_set, &pending_raid_disks); 9589 i_passed++; 9590 } 9591 mutex_unlock(&detected_devices_mutex); 9592 9593 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed); 9594 9595 autorun_devices(part); 9596 } 9597 9598 #endif /* !MODULE */ 9599 9600 static __exit void md_exit(void) 9601 { 9602 struct mddev *mddev; 9603 struct list_head *tmp; 9604 int delay = 1; 9605 9606 blk_unregister_region(MKDEV(MD_MAJOR,0), 512); 9607 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 9608 9609 unregister_blkdev(MD_MAJOR,"md"); 9610 unregister_blkdev(mdp_major, "mdp"); 9611 unregister_reboot_notifier(&md_notifier); 9612 unregister_sysctl_table(raid_table_header); 9613 9614 /* We cannot unload the modules while some process is 9615 * waiting for us in select() or poll() - wake them up 9616 */ 9617 md_unloading = 1; 9618 while (waitqueue_active(&md_event_waiters)) { 9619 /* not safe to leave yet */ 9620 wake_up(&md_event_waiters); 9621 msleep(delay); 9622 delay += delay; 9623 } 9624 remove_proc_entry("mdstat", NULL); 9625 9626 for_each_mddev(mddev, tmp) { 9627 export_array(mddev); 9628 mddev->ctime = 0; 9629 mddev->hold_active = 0; 9630 /* 9631 * for_each_mddev() will call mddev_put() at the end of each 9632 * iteration. As the mddev is now fully clear, this will 9633 * schedule the mddev for destruction by a workqueue, and the 9634 * destroy_workqueue() below will wait for that to complete. 9635 */ 9636 } 9637 destroy_workqueue(md_misc_wq); 9638 destroy_workqueue(md_wq); 9639 } 9640 9641 subsys_initcall(md_init); 9642 module_exit(md_exit) 9643 9644 static int get_ro(char *buffer, const struct kernel_param *kp) 9645 { 9646 return sprintf(buffer, "%d", start_readonly); 9647 } 9648 static int set_ro(const char *val, const struct kernel_param *kp) 9649 { 9650 return kstrtouint(val, 10, (unsigned int *)&start_readonly); 9651 } 9652 9653 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 9654 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 9655 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 9656 module_param(create_on_open, bool, S_IRUSR|S_IWUSR); 9657 9658 MODULE_LICENSE("GPL"); 9659 MODULE_DESCRIPTION("MD RAID framework"); 9660 MODULE_ALIAS("md"); 9661 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 9662