1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 md.c : Multiple Devices driver for Linux 4 Copyright (C) 1998, 1999, 2000 Ingo Molnar 5 6 completely rewritten, based on the MD driver code from Marc Zyngier 7 8 Changes: 9 10 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 11 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 12 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 13 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 14 - kmod support by: Cyrus Durgin 15 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 16 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 17 18 - lots of fixes and improvements to the RAID1/RAID5 and generic 19 RAID code (such as request based resynchronization): 20 21 Neil Brown <neilb@cse.unsw.edu.au>. 22 23 - persistent bitmap code 24 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 25 26 27 Errors, Warnings, etc. 28 Please use: 29 pr_crit() for error conditions that risk data loss 30 pr_err() for error conditions that are unexpected, like an IO error 31 or internal inconsistency 32 pr_warn() for error conditions that could have been predicated, like 33 adding a device to an array when it has incompatible metadata 34 pr_info() for every interesting, very rare events, like an array starting 35 or stopping, or resync starting or stopping 36 pr_debug() for everything else. 37 38 */ 39 40 #include <linux/sched/mm.h> 41 #include <linux/sched/signal.h> 42 #include <linux/kthread.h> 43 #include <linux/blkdev.h> 44 #include <linux/badblocks.h> 45 #include <linux/sysctl.h> 46 #include <linux/seq_file.h> 47 #include <linux/fs.h> 48 #include <linux/poll.h> 49 #include <linux/ctype.h> 50 #include <linux/string.h> 51 #include <linux/hdreg.h> 52 #include <linux/proc_fs.h> 53 #include <linux/random.h> 54 #include <linux/module.h> 55 #include <linux/reboot.h> 56 #include <linux/file.h> 57 #include <linux/compat.h> 58 #include <linux/delay.h> 59 #include <linux/raid/md_p.h> 60 #include <linux/raid/md_u.h> 61 #include <linux/slab.h> 62 #include <linux/percpu-refcount.h> 63 64 #include <trace/events/block.h> 65 #include "md.h" 66 #include "md-bitmap.h" 67 #include "md-cluster.h" 68 69 #ifndef MODULE 70 static void autostart_arrays(int part); 71 #endif 72 73 /* pers_list is a list of registered personalities protected 74 * by pers_lock. 75 * pers_lock does extra service to protect accesses to 76 * mddev->thread when the mutex cannot be held. 77 */ 78 static LIST_HEAD(pers_list); 79 static DEFINE_SPINLOCK(pers_lock); 80 81 static struct kobj_type md_ktype; 82 83 struct md_cluster_operations *md_cluster_ops; 84 EXPORT_SYMBOL(md_cluster_ops); 85 static struct module *md_cluster_mod; 86 87 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 88 static struct workqueue_struct *md_wq; 89 static struct workqueue_struct *md_misc_wq; 90 91 static int remove_and_add_spares(struct mddev *mddev, 92 struct md_rdev *this); 93 static void mddev_detach(struct mddev *mddev); 94 95 /* 96 * Default number of read corrections we'll attempt on an rdev 97 * before ejecting it from the array. We divide the read error 98 * count by 2 for every hour elapsed between read errors. 99 */ 100 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 101 /* 102 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 103 * is 1000 KB/sec, so the extra system load does not show up that much. 104 * Increase it if you want to have more _guaranteed_ speed. Note that 105 * the RAID driver will use the maximum available bandwidth if the IO 106 * subsystem is idle. There is also an 'absolute maximum' reconstruction 107 * speed limit - in case reconstruction slows down your system despite 108 * idle IO detection. 109 * 110 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 111 * or /sys/block/mdX/md/sync_speed_{min,max} 112 */ 113 114 static int sysctl_speed_limit_min = 1000; 115 static int sysctl_speed_limit_max = 200000; 116 static inline int speed_min(struct mddev *mddev) 117 { 118 return mddev->sync_speed_min ? 119 mddev->sync_speed_min : sysctl_speed_limit_min; 120 } 121 122 static inline int speed_max(struct mddev *mddev) 123 { 124 return mddev->sync_speed_max ? 125 mddev->sync_speed_max : sysctl_speed_limit_max; 126 } 127 128 static int rdev_init_wb(struct md_rdev *rdev) 129 { 130 if (rdev->bdev->bd_queue->nr_hw_queues == 1) 131 return 0; 132 133 spin_lock_init(&rdev->wb_list_lock); 134 INIT_LIST_HEAD(&rdev->wb_list); 135 init_waitqueue_head(&rdev->wb_io_wait); 136 set_bit(WBCollisionCheck, &rdev->flags); 137 138 return 1; 139 } 140 141 /* 142 * Create wb_info_pool if rdev is the first multi-queue device flaged 143 * with writemostly, also write-behind mode is enabled. 144 */ 145 void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev, 146 bool is_suspend) 147 { 148 if (mddev->bitmap_info.max_write_behind == 0) 149 return; 150 151 if (!test_bit(WriteMostly, &rdev->flags) || !rdev_init_wb(rdev)) 152 return; 153 154 if (mddev->wb_info_pool == NULL) { 155 unsigned int noio_flag; 156 157 if (!is_suspend) 158 mddev_suspend(mddev); 159 noio_flag = memalloc_noio_save(); 160 mddev->wb_info_pool = mempool_create_kmalloc_pool(NR_WB_INFOS, 161 sizeof(struct wb_info)); 162 memalloc_noio_restore(noio_flag); 163 if (!mddev->wb_info_pool) 164 pr_err("can't alloc memory pool for writemostly\n"); 165 if (!is_suspend) 166 mddev_resume(mddev); 167 } 168 } 169 EXPORT_SYMBOL_GPL(mddev_create_wb_pool); 170 171 /* 172 * destroy wb_info_pool if rdev is the last device flaged with WBCollisionCheck. 173 */ 174 static void mddev_destroy_wb_pool(struct mddev *mddev, struct md_rdev *rdev) 175 { 176 if (!test_and_clear_bit(WBCollisionCheck, &rdev->flags)) 177 return; 178 179 if (mddev->wb_info_pool) { 180 struct md_rdev *temp; 181 int num = 0; 182 183 /* 184 * Check if other rdevs need wb_info_pool. 185 */ 186 rdev_for_each(temp, mddev) 187 if (temp != rdev && 188 test_bit(WBCollisionCheck, &temp->flags)) 189 num++; 190 if (!num) { 191 mddev_suspend(rdev->mddev); 192 mempool_destroy(mddev->wb_info_pool); 193 mddev->wb_info_pool = NULL; 194 mddev_resume(rdev->mddev); 195 } 196 } 197 } 198 199 static struct ctl_table_header *raid_table_header; 200 201 static struct ctl_table raid_table[] = { 202 { 203 .procname = "speed_limit_min", 204 .data = &sysctl_speed_limit_min, 205 .maxlen = sizeof(int), 206 .mode = S_IRUGO|S_IWUSR, 207 .proc_handler = proc_dointvec, 208 }, 209 { 210 .procname = "speed_limit_max", 211 .data = &sysctl_speed_limit_max, 212 .maxlen = sizeof(int), 213 .mode = S_IRUGO|S_IWUSR, 214 .proc_handler = proc_dointvec, 215 }, 216 { } 217 }; 218 219 static struct ctl_table raid_dir_table[] = { 220 { 221 .procname = "raid", 222 .maxlen = 0, 223 .mode = S_IRUGO|S_IXUGO, 224 .child = raid_table, 225 }, 226 { } 227 }; 228 229 static struct ctl_table raid_root_table[] = { 230 { 231 .procname = "dev", 232 .maxlen = 0, 233 .mode = 0555, 234 .child = raid_dir_table, 235 }, 236 { } 237 }; 238 239 static const struct block_device_operations md_fops; 240 241 static int start_readonly; 242 243 /* 244 * The original mechanism for creating an md device is to create 245 * a device node in /dev and to open it. This causes races with device-close. 246 * The preferred method is to write to the "new_array" module parameter. 247 * This can avoid races. 248 * Setting create_on_open to false disables the original mechanism 249 * so all the races disappear. 250 */ 251 static bool create_on_open = true; 252 253 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 254 struct mddev *mddev) 255 { 256 if (!mddev || !bioset_initialized(&mddev->bio_set)) 257 return bio_alloc(gfp_mask, nr_iovecs); 258 259 return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); 260 } 261 EXPORT_SYMBOL_GPL(bio_alloc_mddev); 262 263 static struct bio *md_bio_alloc_sync(struct mddev *mddev) 264 { 265 if (!mddev || !bioset_initialized(&mddev->sync_set)) 266 return bio_alloc(GFP_NOIO, 1); 267 268 return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set); 269 } 270 271 /* 272 * We have a system wide 'event count' that is incremented 273 * on any 'interesting' event, and readers of /proc/mdstat 274 * can use 'poll' or 'select' to find out when the event 275 * count increases. 276 * 277 * Events are: 278 * start array, stop array, error, add device, remove device, 279 * start build, activate spare 280 */ 281 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 282 static atomic_t md_event_count; 283 void md_new_event(struct mddev *mddev) 284 { 285 atomic_inc(&md_event_count); 286 wake_up(&md_event_waiters); 287 } 288 EXPORT_SYMBOL_GPL(md_new_event); 289 290 /* 291 * Enables to iterate over all existing md arrays 292 * all_mddevs_lock protects this list. 293 */ 294 static LIST_HEAD(all_mddevs); 295 static DEFINE_SPINLOCK(all_mddevs_lock); 296 297 /* 298 * iterates through all used mddevs in the system. 299 * We take care to grab the all_mddevs_lock whenever navigating 300 * the list, and to always hold a refcount when unlocked. 301 * Any code which breaks out of this loop while own 302 * a reference to the current mddev and must mddev_put it. 303 */ 304 #define for_each_mddev(_mddev,_tmp) \ 305 \ 306 for (({ spin_lock(&all_mddevs_lock); \ 307 _tmp = all_mddevs.next; \ 308 _mddev = NULL;}); \ 309 ({ if (_tmp != &all_mddevs) \ 310 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\ 311 spin_unlock(&all_mddevs_lock); \ 312 if (_mddev) mddev_put(_mddev); \ 313 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \ 314 _tmp != &all_mddevs;}); \ 315 ({ spin_lock(&all_mddevs_lock); \ 316 _tmp = _tmp->next;}) \ 317 ) 318 319 /* Rather than calling directly into the personality make_request function, 320 * IO requests come here first so that we can check if the device is 321 * being suspended pending a reconfiguration. 322 * We hold a refcount over the call to ->make_request. By the time that 323 * call has finished, the bio has been linked into some internal structure 324 * and so is visible to ->quiesce(), so we don't need the refcount any more. 325 */ 326 static bool is_suspended(struct mddev *mddev, struct bio *bio) 327 { 328 if (mddev->suspended) 329 return true; 330 if (bio_data_dir(bio) != WRITE) 331 return false; 332 if (mddev->suspend_lo >= mddev->suspend_hi) 333 return false; 334 if (bio->bi_iter.bi_sector >= mddev->suspend_hi) 335 return false; 336 if (bio_end_sector(bio) < mddev->suspend_lo) 337 return false; 338 return true; 339 } 340 341 void md_handle_request(struct mddev *mddev, struct bio *bio) 342 { 343 check_suspended: 344 rcu_read_lock(); 345 if (is_suspended(mddev, bio)) { 346 DEFINE_WAIT(__wait); 347 for (;;) { 348 prepare_to_wait(&mddev->sb_wait, &__wait, 349 TASK_UNINTERRUPTIBLE); 350 if (!is_suspended(mddev, bio)) 351 break; 352 rcu_read_unlock(); 353 schedule(); 354 rcu_read_lock(); 355 } 356 finish_wait(&mddev->sb_wait, &__wait); 357 } 358 atomic_inc(&mddev->active_io); 359 rcu_read_unlock(); 360 361 if (!mddev->pers->make_request(mddev, bio)) { 362 atomic_dec(&mddev->active_io); 363 wake_up(&mddev->sb_wait); 364 goto check_suspended; 365 } 366 367 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 368 wake_up(&mddev->sb_wait); 369 } 370 EXPORT_SYMBOL(md_handle_request); 371 372 static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) 373 { 374 const int rw = bio_data_dir(bio); 375 const int sgrp = op_stat_group(bio_op(bio)); 376 struct mddev *mddev = q->queuedata; 377 unsigned int sectors; 378 379 blk_queue_split(q, &bio); 380 381 if (mddev == NULL || mddev->pers == NULL) { 382 bio_io_error(bio); 383 return BLK_QC_T_NONE; 384 } 385 if (mddev->ro == 1 && unlikely(rw == WRITE)) { 386 if (bio_sectors(bio) != 0) 387 bio->bi_status = BLK_STS_IOERR; 388 bio_endio(bio); 389 return BLK_QC_T_NONE; 390 } 391 392 /* 393 * save the sectors now since our bio can 394 * go away inside make_request 395 */ 396 sectors = bio_sectors(bio); 397 /* bio could be mergeable after passing to underlayer */ 398 bio->bi_opf &= ~REQ_NOMERGE; 399 400 md_handle_request(mddev, bio); 401 402 part_stat_lock(); 403 part_stat_inc(&mddev->gendisk->part0, ios[sgrp]); 404 part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors); 405 part_stat_unlock(); 406 407 return BLK_QC_T_NONE; 408 } 409 410 /* mddev_suspend makes sure no new requests are submitted 411 * to the device, and that any requests that have been submitted 412 * are completely handled. 413 * Once mddev_detach() is called and completes, the module will be 414 * completely unused. 415 */ 416 void mddev_suspend(struct mddev *mddev) 417 { 418 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); 419 lockdep_assert_held(&mddev->reconfig_mutex); 420 if (mddev->suspended++) 421 return; 422 synchronize_rcu(); 423 wake_up(&mddev->sb_wait); 424 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags); 425 smp_mb__after_atomic(); 426 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 427 mddev->pers->quiesce(mddev, 1); 428 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); 429 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags)); 430 431 del_timer_sync(&mddev->safemode_timer); 432 } 433 EXPORT_SYMBOL_GPL(mddev_suspend); 434 435 void mddev_resume(struct mddev *mddev) 436 { 437 lockdep_assert_held(&mddev->reconfig_mutex); 438 if (--mddev->suspended) 439 return; 440 wake_up(&mddev->sb_wait); 441 mddev->pers->quiesce(mddev, 0); 442 443 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 444 md_wakeup_thread(mddev->thread); 445 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 446 } 447 EXPORT_SYMBOL_GPL(mddev_resume); 448 449 int mddev_congested(struct mddev *mddev, int bits) 450 { 451 struct md_personality *pers = mddev->pers; 452 int ret = 0; 453 454 rcu_read_lock(); 455 if (mddev->suspended) 456 ret = 1; 457 else if (pers && pers->congested) 458 ret = pers->congested(mddev, bits); 459 rcu_read_unlock(); 460 return ret; 461 } 462 EXPORT_SYMBOL_GPL(mddev_congested); 463 static int md_congested(void *data, int bits) 464 { 465 struct mddev *mddev = data; 466 return mddev_congested(mddev, bits); 467 } 468 469 /* 470 * Generic flush handling for md 471 */ 472 473 static void md_end_flush(struct bio *bio) 474 { 475 struct md_rdev *rdev = bio->bi_private; 476 struct mddev *mddev = rdev->mddev; 477 478 rdev_dec_pending(rdev, mddev); 479 480 if (atomic_dec_and_test(&mddev->flush_pending)) { 481 /* The pre-request flush has finished */ 482 queue_work(md_wq, &mddev->flush_work); 483 } 484 bio_put(bio); 485 } 486 487 static void md_submit_flush_data(struct work_struct *ws); 488 489 static void submit_flushes(struct work_struct *ws) 490 { 491 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 492 struct md_rdev *rdev; 493 494 mddev->start_flush = ktime_get_boottime(); 495 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 496 atomic_set(&mddev->flush_pending, 1); 497 rcu_read_lock(); 498 rdev_for_each_rcu(rdev, mddev) 499 if (rdev->raid_disk >= 0 && 500 !test_bit(Faulty, &rdev->flags)) { 501 /* Take two references, one is dropped 502 * when request finishes, one after 503 * we reclaim rcu_read_lock 504 */ 505 struct bio *bi; 506 atomic_inc(&rdev->nr_pending); 507 atomic_inc(&rdev->nr_pending); 508 rcu_read_unlock(); 509 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); 510 bi->bi_end_io = md_end_flush; 511 bi->bi_private = rdev; 512 bio_set_dev(bi, rdev->bdev); 513 bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 514 atomic_inc(&mddev->flush_pending); 515 submit_bio(bi); 516 rcu_read_lock(); 517 rdev_dec_pending(rdev, mddev); 518 } 519 rcu_read_unlock(); 520 if (atomic_dec_and_test(&mddev->flush_pending)) 521 queue_work(md_wq, &mddev->flush_work); 522 } 523 524 static void md_submit_flush_data(struct work_struct *ws) 525 { 526 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 527 struct bio *bio = mddev->flush_bio; 528 529 /* 530 * must reset flush_bio before calling into md_handle_request to avoid a 531 * deadlock, because other bios passed md_handle_request suspend check 532 * could wait for this and below md_handle_request could wait for those 533 * bios because of suspend check 534 */ 535 mddev->last_flush = mddev->start_flush; 536 mddev->flush_bio = NULL; 537 wake_up(&mddev->sb_wait); 538 539 if (bio->bi_iter.bi_size == 0) { 540 /* an empty barrier - all done */ 541 bio_endio(bio); 542 } else { 543 bio->bi_opf &= ~REQ_PREFLUSH; 544 md_handle_request(mddev, bio); 545 } 546 } 547 548 void md_flush_request(struct mddev *mddev, struct bio *bio) 549 { 550 ktime_t start = ktime_get_boottime(); 551 spin_lock_irq(&mddev->lock); 552 wait_event_lock_irq(mddev->sb_wait, 553 !mddev->flush_bio || 554 ktime_after(mddev->last_flush, start), 555 mddev->lock); 556 if (!ktime_after(mddev->last_flush, start)) { 557 WARN_ON(mddev->flush_bio); 558 mddev->flush_bio = bio; 559 bio = NULL; 560 } 561 spin_unlock_irq(&mddev->lock); 562 563 if (!bio) { 564 INIT_WORK(&mddev->flush_work, submit_flushes); 565 queue_work(md_wq, &mddev->flush_work); 566 } else { 567 /* flush was performed for some other bio while we waited. */ 568 if (bio->bi_iter.bi_size == 0) 569 /* an empty barrier - all done */ 570 bio_endio(bio); 571 else { 572 bio->bi_opf &= ~REQ_PREFLUSH; 573 mddev->pers->make_request(mddev, bio); 574 } 575 } 576 } 577 EXPORT_SYMBOL(md_flush_request); 578 579 static inline struct mddev *mddev_get(struct mddev *mddev) 580 { 581 atomic_inc(&mddev->active); 582 return mddev; 583 } 584 585 static void mddev_delayed_delete(struct work_struct *ws); 586 587 static void mddev_put(struct mddev *mddev) 588 { 589 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 590 return; 591 if (!mddev->raid_disks && list_empty(&mddev->disks) && 592 mddev->ctime == 0 && !mddev->hold_active) { 593 /* Array is not configured at all, and not held active, 594 * so destroy it */ 595 list_del_init(&mddev->all_mddevs); 596 597 /* 598 * Call queue_work inside the spinlock so that 599 * flush_workqueue() after mddev_find will succeed in waiting 600 * for the work to be done. 601 */ 602 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 603 queue_work(md_misc_wq, &mddev->del_work); 604 } 605 spin_unlock(&all_mddevs_lock); 606 } 607 608 static void md_safemode_timeout(struct timer_list *t); 609 610 void mddev_init(struct mddev *mddev) 611 { 612 kobject_init(&mddev->kobj, &md_ktype); 613 mutex_init(&mddev->open_mutex); 614 mutex_init(&mddev->reconfig_mutex); 615 mutex_init(&mddev->bitmap_info.mutex); 616 INIT_LIST_HEAD(&mddev->disks); 617 INIT_LIST_HEAD(&mddev->all_mddevs); 618 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); 619 atomic_set(&mddev->active, 1); 620 atomic_set(&mddev->openers, 0); 621 atomic_set(&mddev->active_io, 0); 622 spin_lock_init(&mddev->lock); 623 atomic_set(&mddev->flush_pending, 0); 624 init_waitqueue_head(&mddev->sb_wait); 625 init_waitqueue_head(&mddev->recovery_wait); 626 mddev->reshape_position = MaxSector; 627 mddev->reshape_backwards = 0; 628 mddev->last_sync_action = "none"; 629 mddev->resync_min = 0; 630 mddev->resync_max = MaxSector; 631 mddev->level = LEVEL_NONE; 632 } 633 EXPORT_SYMBOL_GPL(mddev_init); 634 635 static struct mddev *mddev_find(dev_t unit) 636 { 637 struct mddev *mddev, *new = NULL; 638 639 if (unit && MAJOR(unit) != MD_MAJOR) 640 unit &= ~((1<<MdpMinorShift)-1); 641 642 retry: 643 spin_lock(&all_mddevs_lock); 644 645 if (unit) { 646 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 647 if (mddev->unit == unit) { 648 mddev_get(mddev); 649 spin_unlock(&all_mddevs_lock); 650 kfree(new); 651 return mddev; 652 } 653 654 if (new) { 655 list_add(&new->all_mddevs, &all_mddevs); 656 spin_unlock(&all_mddevs_lock); 657 new->hold_active = UNTIL_IOCTL; 658 return new; 659 } 660 } else if (new) { 661 /* find an unused unit number */ 662 static int next_minor = 512; 663 int start = next_minor; 664 int is_free = 0; 665 int dev = 0; 666 while (!is_free) { 667 dev = MKDEV(MD_MAJOR, next_minor); 668 next_minor++; 669 if (next_minor > MINORMASK) 670 next_minor = 0; 671 if (next_minor == start) { 672 /* Oh dear, all in use. */ 673 spin_unlock(&all_mddevs_lock); 674 kfree(new); 675 return NULL; 676 } 677 678 is_free = 1; 679 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 680 if (mddev->unit == dev) { 681 is_free = 0; 682 break; 683 } 684 } 685 new->unit = dev; 686 new->md_minor = MINOR(dev); 687 new->hold_active = UNTIL_STOP; 688 list_add(&new->all_mddevs, &all_mddevs); 689 spin_unlock(&all_mddevs_lock); 690 return new; 691 } 692 spin_unlock(&all_mddevs_lock); 693 694 new = kzalloc(sizeof(*new), GFP_KERNEL); 695 if (!new) 696 return NULL; 697 698 new->unit = unit; 699 if (MAJOR(unit) == MD_MAJOR) 700 new->md_minor = MINOR(unit); 701 else 702 new->md_minor = MINOR(unit) >> MdpMinorShift; 703 704 mddev_init(new); 705 706 goto retry; 707 } 708 709 static struct attribute_group md_redundancy_group; 710 711 void mddev_unlock(struct mddev *mddev) 712 { 713 if (mddev->to_remove) { 714 /* These cannot be removed under reconfig_mutex as 715 * an access to the files will try to take reconfig_mutex 716 * while holding the file unremovable, which leads to 717 * a deadlock. 718 * So hold set sysfs_active while the remove in happeing, 719 * and anything else which might set ->to_remove or my 720 * otherwise change the sysfs namespace will fail with 721 * -EBUSY if sysfs_active is still set. 722 * We set sysfs_active under reconfig_mutex and elsewhere 723 * test it under the same mutex to ensure its correct value 724 * is seen. 725 */ 726 struct attribute_group *to_remove = mddev->to_remove; 727 mddev->to_remove = NULL; 728 mddev->sysfs_active = 1; 729 mutex_unlock(&mddev->reconfig_mutex); 730 731 if (mddev->kobj.sd) { 732 if (to_remove != &md_redundancy_group) 733 sysfs_remove_group(&mddev->kobj, to_remove); 734 if (mddev->pers == NULL || 735 mddev->pers->sync_request == NULL) { 736 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 737 if (mddev->sysfs_action) 738 sysfs_put(mddev->sysfs_action); 739 mddev->sysfs_action = NULL; 740 } 741 } 742 mddev->sysfs_active = 0; 743 } else 744 mutex_unlock(&mddev->reconfig_mutex); 745 746 /* As we've dropped the mutex we need a spinlock to 747 * make sure the thread doesn't disappear 748 */ 749 spin_lock(&pers_lock); 750 md_wakeup_thread(mddev->thread); 751 wake_up(&mddev->sb_wait); 752 spin_unlock(&pers_lock); 753 } 754 EXPORT_SYMBOL_GPL(mddev_unlock); 755 756 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) 757 { 758 struct md_rdev *rdev; 759 760 rdev_for_each_rcu(rdev, mddev) 761 if (rdev->desc_nr == nr) 762 return rdev; 763 764 return NULL; 765 } 766 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu); 767 768 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) 769 { 770 struct md_rdev *rdev; 771 772 rdev_for_each(rdev, mddev) 773 if (rdev->bdev->bd_dev == dev) 774 return rdev; 775 776 return NULL; 777 } 778 779 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev) 780 { 781 struct md_rdev *rdev; 782 783 rdev_for_each_rcu(rdev, mddev) 784 if (rdev->bdev->bd_dev == dev) 785 return rdev; 786 787 return NULL; 788 } 789 EXPORT_SYMBOL_GPL(md_find_rdev_rcu); 790 791 static struct md_personality *find_pers(int level, char *clevel) 792 { 793 struct md_personality *pers; 794 list_for_each_entry(pers, &pers_list, list) { 795 if (level != LEVEL_NONE && pers->level == level) 796 return pers; 797 if (strcmp(pers->name, clevel)==0) 798 return pers; 799 } 800 return NULL; 801 } 802 803 /* return the offset of the super block in 512byte sectors */ 804 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) 805 { 806 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; 807 return MD_NEW_SIZE_SECTORS(num_sectors); 808 } 809 810 static int alloc_disk_sb(struct md_rdev *rdev) 811 { 812 rdev->sb_page = alloc_page(GFP_KERNEL); 813 if (!rdev->sb_page) 814 return -ENOMEM; 815 return 0; 816 } 817 818 void md_rdev_clear(struct md_rdev *rdev) 819 { 820 if (rdev->sb_page) { 821 put_page(rdev->sb_page); 822 rdev->sb_loaded = 0; 823 rdev->sb_page = NULL; 824 rdev->sb_start = 0; 825 rdev->sectors = 0; 826 } 827 if (rdev->bb_page) { 828 put_page(rdev->bb_page); 829 rdev->bb_page = NULL; 830 } 831 badblocks_exit(&rdev->badblocks); 832 } 833 EXPORT_SYMBOL_GPL(md_rdev_clear); 834 835 static void super_written(struct bio *bio) 836 { 837 struct md_rdev *rdev = bio->bi_private; 838 struct mddev *mddev = rdev->mddev; 839 840 if (bio->bi_status) { 841 pr_err("md: super_written gets error=%d\n", bio->bi_status); 842 md_error(mddev, rdev); 843 if (!test_bit(Faulty, &rdev->flags) 844 && (bio->bi_opf & MD_FAILFAST)) { 845 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); 846 set_bit(LastDev, &rdev->flags); 847 } 848 } else 849 clear_bit(LastDev, &rdev->flags); 850 851 if (atomic_dec_and_test(&mddev->pending_writes)) 852 wake_up(&mddev->sb_wait); 853 rdev_dec_pending(rdev, mddev); 854 bio_put(bio); 855 } 856 857 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, 858 sector_t sector, int size, struct page *page) 859 { 860 /* write first size bytes of page to sector of rdev 861 * Increment mddev->pending_writes before returning 862 * and decrement it on completion, waking up sb_wait 863 * if zero is reached. 864 * If an error occurred, call md_error 865 */ 866 struct bio *bio; 867 int ff = 0; 868 869 if (!page) 870 return; 871 872 if (test_bit(Faulty, &rdev->flags)) 873 return; 874 875 bio = md_bio_alloc_sync(mddev); 876 877 atomic_inc(&rdev->nr_pending); 878 879 bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev); 880 bio->bi_iter.bi_sector = sector; 881 bio_add_page(bio, page, size, 0); 882 bio->bi_private = rdev; 883 bio->bi_end_io = super_written; 884 885 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && 886 test_bit(FailFast, &rdev->flags) && 887 !test_bit(LastDev, &rdev->flags)) 888 ff = MD_FAILFAST; 889 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff; 890 891 atomic_inc(&mddev->pending_writes); 892 submit_bio(bio); 893 } 894 895 int md_super_wait(struct mddev *mddev) 896 { 897 /* wait for all superblock writes that were scheduled to complete */ 898 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); 899 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) 900 return -EAGAIN; 901 return 0; 902 } 903 904 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 905 struct page *page, int op, int op_flags, bool metadata_op) 906 { 907 struct bio *bio = md_bio_alloc_sync(rdev->mddev); 908 int ret; 909 910 if (metadata_op && rdev->meta_bdev) 911 bio_set_dev(bio, rdev->meta_bdev); 912 else 913 bio_set_dev(bio, rdev->bdev); 914 bio_set_op_attrs(bio, op, op_flags); 915 if (metadata_op) 916 bio->bi_iter.bi_sector = sector + rdev->sb_start; 917 else if (rdev->mddev->reshape_position != MaxSector && 918 (rdev->mddev->reshape_backwards == 919 (sector >= rdev->mddev->reshape_position))) 920 bio->bi_iter.bi_sector = sector + rdev->new_data_offset; 921 else 922 bio->bi_iter.bi_sector = sector + rdev->data_offset; 923 bio_add_page(bio, page, size, 0); 924 925 submit_bio_wait(bio); 926 927 ret = !bio->bi_status; 928 bio_put(bio); 929 return ret; 930 } 931 EXPORT_SYMBOL_GPL(sync_page_io); 932 933 static int read_disk_sb(struct md_rdev *rdev, int size) 934 { 935 char b[BDEVNAME_SIZE]; 936 937 if (rdev->sb_loaded) 938 return 0; 939 940 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) 941 goto fail; 942 rdev->sb_loaded = 1; 943 return 0; 944 945 fail: 946 pr_err("md: disabled device %s, could not read superblock.\n", 947 bdevname(rdev->bdev,b)); 948 return -EINVAL; 949 } 950 951 static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 952 { 953 return sb1->set_uuid0 == sb2->set_uuid0 && 954 sb1->set_uuid1 == sb2->set_uuid1 && 955 sb1->set_uuid2 == sb2->set_uuid2 && 956 sb1->set_uuid3 == sb2->set_uuid3; 957 } 958 959 static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 960 { 961 int ret; 962 mdp_super_t *tmp1, *tmp2; 963 964 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 965 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 966 967 if (!tmp1 || !tmp2) { 968 ret = 0; 969 goto abort; 970 } 971 972 *tmp1 = *sb1; 973 *tmp2 = *sb2; 974 975 /* 976 * nr_disks is not constant 977 */ 978 tmp1->nr_disks = 0; 979 tmp2->nr_disks = 0; 980 981 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 982 abort: 983 kfree(tmp1); 984 kfree(tmp2); 985 return ret; 986 } 987 988 static u32 md_csum_fold(u32 csum) 989 { 990 csum = (csum & 0xffff) + (csum >> 16); 991 return (csum & 0xffff) + (csum >> 16); 992 } 993 994 static unsigned int calc_sb_csum(mdp_super_t *sb) 995 { 996 u64 newcsum = 0; 997 u32 *sb32 = (u32*)sb; 998 int i; 999 unsigned int disk_csum, csum; 1000 1001 disk_csum = sb->sb_csum; 1002 sb->sb_csum = 0; 1003 1004 for (i = 0; i < MD_SB_BYTES/4 ; i++) 1005 newcsum += sb32[i]; 1006 csum = (newcsum & 0xffffffff) + (newcsum>>32); 1007 1008 #ifdef CONFIG_ALPHA 1009 /* This used to use csum_partial, which was wrong for several 1010 * reasons including that different results are returned on 1011 * different architectures. It isn't critical that we get exactly 1012 * the same return value as before (we always csum_fold before 1013 * testing, and that removes any differences). However as we 1014 * know that csum_partial always returned a 16bit value on 1015 * alphas, do a fold to maximise conformity to previous behaviour. 1016 */ 1017 sb->sb_csum = md_csum_fold(disk_csum); 1018 #else 1019 sb->sb_csum = disk_csum; 1020 #endif 1021 return csum; 1022 } 1023 1024 /* 1025 * Handle superblock details. 1026 * We want to be able to handle multiple superblock formats 1027 * so we have a common interface to them all, and an array of 1028 * different handlers. 1029 * We rely on user-space to write the initial superblock, and support 1030 * reading and updating of superblocks. 1031 * Interface methods are: 1032 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version) 1033 * loads and validates a superblock on dev. 1034 * if refdev != NULL, compare superblocks on both devices 1035 * Return: 1036 * 0 - dev has a superblock that is compatible with refdev 1037 * 1 - dev has a superblock that is compatible and newer than refdev 1038 * so dev should be used as the refdev in future 1039 * -EINVAL superblock incompatible or invalid 1040 * -othererror e.g. -EIO 1041 * 1042 * int validate_super(struct mddev *mddev, struct md_rdev *dev) 1043 * Verify that dev is acceptable into mddev. 1044 * The first time, mddev->raid_disks will be 0, and data from 1045 * dev should be merged in. Subsequent calls check that dev 1046 * is new enough. Return 0 or -EINVAL 1047 * 1048 * void sync_super(struct mddev *mddev, struct md_rdev *dev) 1049 * Update the superblock for rdev with data in mddev 1050 * This does not write to disc. 1051 * 1052 */ 1053 1054 struct super_type { 1055 char *name; 1056 struct module *owner; 1057 int (*load_super)(struct md_rdev *rdev, 1058 struct md_rdev *refdev, 1059 int minor_version); 1060 int (*validate_super)(struct mddev *mddev, 1061 struct md_rdev *rdev); 1062 void (*sync_super)(struct mddev *mddev, 1063 struct md_rdev *rdev); 1064 unsigned long long (*rdev_size_change)(struct md_rdev *rdev, 1065 sector_t num_sectors); 1066 int (*allow_new_offset)(struct md_rdev *rdev, 1067 unsigned long long new_offset); 1068 }; 1069 1070 /* 1071 * Check that the given mddev has no bitmap. 1072 * 1073 * This function is called from the run method of all personalities that do not 1074 * support bitmaps. It prints an error message and returns non-zero if mddev 1075 * has a bitmap. Otherwise, it returns 0. 1076 * 1077 */ 1078 int md_check_no_bitmap(struct mddev *mddev) 1079 { 1080 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 1081 return 0; 1082 pr_warn("%s: bitmaps are not supported for %s\n", 1083 mdname(mddev), mddev->pers->name); 1084 return 1; 1085 } 1086 EXPORT_SYMBOL(md_check_no_bitmap); 1087 1088 /* 1089 * load_super for 0.90.0 1090 */ 1091 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1092 { 1093 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1094 mdp_super_t *sb; 1095 int ret; 1096 1097 /* 1098 * Calculate the position of the superblock (512byte sectors), 1099 * it's at the end of the disk. 1100 * 1101 * It also happens to be a multiple of 4Kb. 1102 */ 1103 rdev->sb_start = calc_dev_sboffset(rdev); 1104 1105 ret = read_disk_sb(rdev, MD_SB_BYTES); 1106 if (ret) 1107 return ret; 1108 1109 ret = -EINVAL; 1110 1111 bdevname(rdev->bdev, b); 1112 sb = page_address(rdev->sb_page); 1113 1114 if (sb->md_magic != MD_SB_MAGIC) { 1115 pr_warn("md: invalid raid superblock magic on %s\n", b); 1116 goto abort; 1117 } 1118 1119 if (sb->major_version != 0 || 1120 sb->minor_version < 90 || 1121 sb->minor_version > 91) { 1122 pr_warn("Bad version number %d.%d on %s\n", 1123 sb->major_version, sb->minor_version, b); 1124 goto abort; 1125 } 1126 1127 if (sb->raid_disks <= 0) 1128 goto abort; 1129 1130 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 1131 pr_warn("md: invalid superblock checksum on %s\n", b); 1132 goto abort; 1133 } 1134 1135 rdev->preferred_minor = sb->md_minor; 1136 rdev->data_offset = 0; 1137 rdev->new_data_offset = 0; 1138 rdev->sb_size = MD_SB_BYTES; 1139 rdev->badblocks.shift = -1; 1140 1141 if (sb->level == LEVEL_MULTIPATH) 1142 rdev->desc_nr = -1; 1143 else 1144 rdev->desc_nr = sb->this_disk.number; 1145 1146 if (!refdev) { 1147 ret = 1; 1148 } else { 1149 __u64 ev1, ev2; 1150 mdp_super_t *refsb = page_address(refdev->sb_page); 1151 if (!md_uuid_equal(refsb, sb)) { 1152 pr_warn("md: %s has different UUID to %s\n", 1153 b, bdevname(refdev->bdev,b2)); 1154 goto abort; 1155 } 1156 if (!md_sb_equal(refsb, sb)) { 1157 pr_warn("md: %s has same UUID but different superblock to %s\n", 1158 b, bdevname(refdev->bdev, b2)); 1159 goto abort; 1160 } 1161 ev1 = md_event(sb); 1162 ev2 = md_event(refsb); 1163 if (ev1 > ev2) 1164 ret = 1; 1165 else 1166 ret = 0; 1167 } 1168 rdev->sectors = rdev->sb_start; 1169 /* Limit to 4TB as metadata cannot record more than that. 1170 * (not needed for Linear and RAID0 as metadata doesn't 1171 * record this size) 1172 */ 1173 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1) 1174 rdev->sectors = (sector_t)(2ULL << 32) - 2; 1175 1176 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) 1177 /* "this cannot possibly happen" ... */ 1178 ret = -EINVAL; 1179 1180 abort: 1181 return ret; 1182 } 1183 1184 /* 1185 * validate_super for 0.90.0 1186 */ 1187 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) 1188 { 1189 mdp_disk_t *desc; 1190 mdp_super_t *sb = page_address(rdev->sb_page); 1191 __u64 ev1 = md_event(sb); 1192 1193 rdev->raid_disk = -1; 1194 clear_bit(Faulty, &rdev->flags); 1195 clear_bit(In_sync, &rdev->flags); 1196 clear_bit(Bitmap_sync, &rdev->flags); 1197 clear_bit(WriteMostly, &rdev->flags); 1198 1199 if (mddev->raid_disks == 0) { 1200 mddev->major_version = 0; 1201 mddev->minor_version = sb->minor_version; 1202 mddev->patch_version = sb->patch_version; 1203 mddev->external = 0; 1204 mddev->chunk_sectors = sb->chunk_size >> 9; 1205 mddev->ctime = sb->ctime; 1206 mddev->utime = sb->utime; 1207 mddev->level = sb->level; 1208 mddev->clevel[0] = 0; 1209 mddev->layout = sb->layout; 1210 mddev->raid_disks = sb->raid_disks; 1211 mddev->dev_sectors = ((sector_t)sb->size) * 2; 1212 mddev->events = ev1; 1213 mddev->bitmap_info.offset = 0; 1214 mddev->bitmap_info.space = 0; 1215 /* bitmap can use 60 K after the 4K superblocks */ 1216 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1217 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 1218 mddev->reshape_backwards = 0; 1219 1220 if (mddev->minor_version >= 91) { 1221 mddev->reshape_position = sb->reshape_position; 1222 mddev->delta_disks = sb->delta_disks; 1223 mddev->new_level = sb->new_level; 1224 mddev->new_layout = sb->new_layout; 1225 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1226 if (mddev->delta_disks < 0) 1227 mddev->reshape_backwards = 1; 1228 } else { 1229 mddev->reshape_position = MaxSector; 1230 mddev->delta_disks = 0; 1231 mddev->new_level = mddev->level; 1232 mddev->new_layout = mddev->layout; 1233 mddev->new_chunk_sectors = mddev->chunk_sectors; 1234 } 1235 1236 if (sb->state & (1<<MD_SB_CLEAN)) 1237 mddev->recovery_cp = MaxSector; 1238 else { 1239 if (sb->events_hi == sb->cp_events_hi && 1240 sb->events_lo == sb->cp_events_lo) { 1241 mddev->recovery_cp = sb->recovery_cp; 1242 } else 1243 mddev->recovery_cp = 0; 1244 } 1245 1246 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1247 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1248 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1249 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1250 1251 mddev->max_disks = MD_SB_DISKS; 1252 1253 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1254 mddev->bitmap_info.file == NULL) { 1255 mddev->bitmap_info.offset = 1256 mddev->bitmap_info.default_offset; 1257 mddev->bitmap_info.space = 1258 mddev->bitmap_info.default_space; 1259 } 1260 1261 } else if (mddev->pers == NULL) { 1262 /* Insist on good event counter while assembling, except 1263 * for spares (which don't need an event count) */ 1264 ++ev1; 1265 if (sb->disks[rdev->desc_nr].state & ( 1266 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1267 if (ev1 < mddev->events) 1268 return -EINVAL; 1269 } else if (mddev->bitmap) { 1270 /* if adding to array with a bitmap, then we can accept an 1271 * older device ... but not too old. 1272 */ 1273 if (ev1 < mddev->bitmap->events_cleared) 1274 return 0; 1275 if (ev1 < mddev->events) 1276 set_bit(Bitmap_sync, &rdev->flags); 1277 } else { 1278 if (ev1 < mddev->events) 1279 /* just a hot-add of a new device, leave raid_disk at -1 */ 1280 return 0; 1281 } 1282 1283 if (mddev->level != LEVEL_MULTIPATH) { 1284 desc = sb->disks + rdev->desc_nr; 1285 1286 if (desc->state & (1<<MD_DISK_FAULTY)) 1287 set_bit(Faulty, &rdev->flags); 1288 else if (desc->state & (1<<MD_DISK_SYNC) /* && 1289 desc->raid_disk < mddev->raid_disks */) { 1290 set_bit(In_sync, &rdev->flags); 1291 rdev->raid_disk = desc->raid_disk; 1292 rdev->saved_raid_disk = desc->raid_disk; 1293 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1294 /* active but not in sync implies recovery up to 1295 * reshape position. We don't know exactly where 1296 * that is, so set to zero for now */ 1297 if (mddev->minor_version >= 91) { 1298 rdev->recovery_offset = 0; 1299 rdev->raid_disk = desc->raid_disk; 1300 } 1301 } 1302 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1303 set_bit(WriteMostly, &rdev->flags); 1304 if (desc->state & (1<<MD_DISK_FAILFAST)) 1305 set_bit(FailFast, &rdev->flags); 1306 } else /* MULTIPATH are always insync */ 1307 set_bit(In_sync, &rdev->flags); 1308 return 0; 1309 } 1310 1311 /* 1312 * sync_super for 0.90.0 1313 */ 1314 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) 1315 { 1316 mdp_super_t *sb; 1317 struct md_rdev *rdev2; 1318 int next_spare = mddev->raid_disks; 1319 1320 /* make rdev->sb match mddev data.. 1321 * 1322 * 1/ zero out disks 1323 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1324 * 3/ any empty disks < next_spare become removed 1325 * 1326 * disks[0] gets initialised to REMOVED because 1327 * we cannot be sure from other fields if it has 1328 * been initialised or not. 1329 */ 1330 int i; 1331 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1332 1333 rdev->sb_size = MD_SB_BYTES; 1334 1335 sb = page_address(rdev->sb_page); 1336 1337 memset(sb, 0, sizeof(*sb)); 1338 1339 sb->md_magic = MD_SB_MAGIC; 1340 sb->major_version = mddev->major_version; 1341 sb->patch_version = mddev->patch_version; 1342 sb->gvalid_words = 0; /* ignored */ 1343 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1344 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1345 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1346 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1347 1348 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); 1349 sb->level = mddev->level; 1350 sb->size = mddev->dev_sectors / 2; 1351 sb->raid_disks = mddev->raid_disks; 1352 sb->md_minor = mddev->md_minor; 1353 sb->not_persistent = 0; 1354 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); 1355 sb->state = 0; 1356 sb->events_hi = (mddev->events>>32); 1357 sb->events_lo = (u32)mddev->events; 1358 1359 if (mddev->reshape_position == MaxSector) 1360 sb->minor_version = 90; 1361 else { 1362 sb->minor_version = 91; 1363 sb->reshape_position = mddev->reshape_position; 1364 sb->new_level = mddev->new_level; 1365 sb->delta_disks = mddev->delta_disks; 1366 sb->new_layout = mddev->new_layout; 1367 sb->new_chunk = mddev->new_chunk_sectors << 9; 1368 } 1369 mddev->minor_version = sb->minor_version; 1370 if (mddev->in_sync) 1371 { 1372 sb->recovery_cp = mddev->recovery_cp; 1373 sb->cp_events_hi = (mddev->events>>32); 1374 sb->cp_events_lo = (u32)mddev->events; 1375 if (mddev->recovery_cp == MaxSector) 1376 sb->state = (1<< MD_SB_CLEAN); 1377 } else 1378 sb->recovery_cp = 0; 1379 1380 sb->layout = mddev->layout; 1381 sb->chunk_size = mddev->chunk_sectors << 9; 1382 1383 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1384 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1385 1386 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1387 rdev_for_each(rdev2, mddev) { 1388 mdp_disk_t *d; 1389 int desc_nr; 1390 int is_active = test_bit(In_sync, &rdev2->flags); 1391 1392 if (rdev2->raid_disk >= 0 && 1393 sb->minor_version >= 91) 1394 /* we have nowhere to store the recovery_offset, 1395 * but if it is not below the reshape_position, 1396 * we can piggy-back on that. 1397 */ 1398 is_active = 1; 1399 if (rdev2->raid_disk < 0 || 1400 test_bit(Faulty, &rdev2->flags)) 1401 is_active = 0; 1402 if (is_active) 1403 desc_nr = rdev2->raid_disk; 1404 else 1405 desc_nr = next_spare++; 1406 rdev2->desc_nr = desc_nr; 1407 d = &sb->disks[rdev2->desc_nr]; 1408 nr_disks++; 1409 d->number = rdev2->desc_nr; 1410 d->major = MAJOR(rdev2->bdev->bd_dev); 1411 d->minor = MINOR(rdev2->bdev->bd_dev); 1412 if (is_active) 1413 d->raid_disk = rdev2->raid_disk; 1414 else 1415 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1416 if (test_bit(Faulty, &rdev2->flags)) 1417 d->state = (1<<MD_DISK_FAULTY); 1418 else if (is_active) { 1419 d->state = (1<<MD_DISK_ACTIVE); 1420 if (test_bit(In_sync, &rdev2->flags)) 1421 d->state |= (1<<MD_DISK_SYNC); 1422 active++; 1423 working++; 1424 } else { 1425 d->state = 0; 1426 spare++; 1427 working++; 1428 } 1429 if (test_bit(WriteMostly, &rdev2->flags)) 1430 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1431 if (test_bit(FailFast, &rdev2->flags)) 1432 d->state |= (1<<MD_DISK_FAILFAST); 1433 } 1434 /* now set the "removed" and "faulty" bits on any missing devices */ 1435 for (i=0 ; i < mddev->raid_disks ; i++) { 1436 mdp_disk_t *d = &sb->disks[i]; 1437 if (d->state == 0 && d->number == 0) { 1438 d->number = i; 1439 d->raid_disk = i; 1440 d->state = (1<<MD_DISK_REMOVED); 1441 d->state |= (1<<MD_DISK_FAULTY); 1442 failed++; 1443 } 1444 } 1445 sb->nr_disks = nr_disks; 1446 sb->active_disks = active; 1447 sb->working_disks = working; 1448 sb->failed_disks = failed; 1449 sb->spare_disks = spare; 1450 1451 sb->this_disk = sb->disks[rdev->desc_nr]; 1452 sb->sb_csum = calc_sb_csum(sb); 1453 } 1454 1455 /* 1456 * rdev_size_change for 0.90.0 1457 */ 1458 static unsigned long long 1459 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1460 { 1461 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1462 return 0; /* component must fit device */ 1463 if (rdev->mddev->bitmap_info.offset) 1464 return 0; /* can't move bitmap */ 1465 rdev->sb_start = calc_dev_sboffset(rdev); 1466 if (!num_sectors || num_sectors > rdev->sb_start) 1467 num_sectors = rdev->sb_start; 1468 /* Limit to 4TB as metadata cannot record more than that. 1469 * 4TB == 2^32 KB, or 2*2^32 sectors. 1470 */ 1471 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) 1472 num_sectors = (sector_t)(2ULL << 32) - 2; 1473 do { 1474 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1475 rdev->sb_page); 1476 } while (md_super_wait(rdev->mddev) < 0); 1477 return num_sectors; 1478 } 1479 1480 static int 1481 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) 1482 { 1483 /* non-zero offset changes not possible with v0.90 */ 1484 return new_offset == 0; 1485 } 1486 1487 /* 1488 * version 1 superblock 1489 */ 1490 1491 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb) 1492 { 1493 __le32 disk_csum; 1494 u32 csum; 1495 unsigned long long newcsum; 1496 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1497 __le32 *isuper = (__le32*)sb; 1498 1499 disk_csum = sb->sb_csum; 1500 sb->sb_csum = 0; 1501 newcsum = 0; 1502 for (; size >= 4; size -= 4) 1503 newcsum += le32_to_cpu(*isuper++); 1504 1505 if (size == 2) 1506 newcsum += le16_to_cpu(*(__le16*) isuper); 1507 1508 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1509 sb->sb_csum = disk_csum; 1510 return cpu_to_le32(csum); 1511 } 1512 1513 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1514 { 1515 struct mdp_superblock_1 *sb; 1516 int ret; 1517 sector_t sb_start; 1518 sector_t sectors; 1519 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1520 int bmask; 1521 1522 /* 1523 * Calculate the position of the superblock in 512byte sectors. 1524 * It is always aligned to a 4K boundary and 1525 * depeding on minor_version, it can be: 1526 * 0: At least 8K, but less than 12K, from end of device 1527 * 1: At start of device 1528 * 2: 4K from start of device. 1529 */ 1530 switch(minor_version) { 1531 case 0: 1532 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; 1533 sb_start -= 8*2; 1534 sb_start &= ~(sector_t)(4*2-1); 1535 break; 1536 case 1: 1537 sb_start = 0; 1538 break; 1539 case 2: 1540 sb_start = 8; 1541 break; 1542 default: 1543 return -EINVAL; 1544 } 1545 rdev->sb_start = sb_start; 1546 1547 /* superblock is rarely larger than 1K, but it can be larger, 1548 * and it is safe to read 4k, so we do that 1549 */ 1550 ret = read_disk_sb(rdev, 4096); 1551 if (ret) return ret; 1552 1553 sb = page_address(rdev->sb_page); 1554 1555 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1556 sb->major_version != cpu_to_le32(1) || 1557 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1558 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1559 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1560 return -EINVAL; 1561 1562 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1563 pr_warn("md: invalid superblock checksum on %s\n", 1564 bdevname(rdev->bdev,b)); 1565 return -EINVAL; 1566 } 1567 if (le64_to_cpu(sb->data_size) < 10) { 1568 pr_warn("md: data_size too small on %s\n", 1569 bdevname(rdev->bdev,b)); 1570 return -EINVAL; 1571 } 1572 if (sb->pad0 || 1573 sb->pad3[0] || 1574 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) 1575 /* Some padding is non-zero, might be a new feature */ 1576 return -EINVAL; 1577 1578 rdev->preferred_minor = 0xffff; 1579 rdev->data_offset = le64_to_cpu(sb->data_offset); 1580 rdev->new_data_offset = rdev->data_offset; 1581 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) && 1582 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) 1583 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); 1584 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1585 1586 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1587 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1588 if (rdev->sb_size & bmask) 1589 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1590 1591 if (minor_version 1592 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1593 return -EINVAL; 1594 if (minor_version 1595 && rdev->new_data_offset < sb_start + (rdev->sb_size/512)) 1596 return -EINVAL; 1597 1598 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1599 rdev->desc_nr = -1; 1600 else 1601 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1602 1603 if (!rdev->bb_page) { 1604 rdev->bb_page = alloc_page(GFP_KERNEL); 1605 if (!rdev->bb_page) 1606 return -ENOMEM; 1607 } 1608 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && 1609 rdev->badblocks.count == 0) { 1610 /* need to load the bad block list. 1611 * Currently we limit it to one page. 1612 */ 1613 s32 offset; 1614 sector_t bb_sector; 1615 __le64 *bbp; 1616 int i; 1617 int sectors = le16_to_cpu(sb->bblog_size); 1618 if (sectors > (PAGE_SIZE / 512)) 1619 return -EINVAL; 1620 offset = le32_to_cpu(sb->bblog_offset); 1621 if (offset == 0) 1622 return -EINVAL; 1623 bb_sector = (long long)offset; 1624 if (!sync_page_io(rdev, bb_sector, sectors << 9, 1625 rdev->bb_page, REQ_OP_READ, 0, true)) 1626 return -EIO; 1627 bbp = (__le64 *)page_address(rdev->bb_page); 1628 rdev->badblocks.shift = sb->bblog_shift; 1629 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { 1630 u64 bb = le64_to_cpu(*bbp); 1631 int count = bb & (0x3ff); 1632 u64 sector = bb >> 10; 1633 sector <<= sb->bblog_shift; 1634 count <<= sb->bblog_shift; 1635 if (bb + 1 == 0) 1636 break; 1637 if (badblocks_set(&rdev->badblocks, sector, count, 1)) 1638 return -EINVAL; 1639 } 1640 } else if (sb->bblog_offset != 0) 1641 rdev->badblocks.shift = 0; 1642 1643 if ((le32_to_cpu(sb->feature_map) & 1644 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) { 1645 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset); 1646 rdev->ppl.size = le16_to_cpu(sb->ppl.size); 1647 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset; 1648 } 1649 1650 if (!refdev) { 1651 ret = 1; 1652 } else { 1653 __u64 ev1, ev2; 1654 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); 1655 1656 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1657 sb->level != refsb->level || 1658 sb->layout != refsb->layout || 1659 sb->chunksize != refsb->chunksize) { 1660 pr_warn("md: %s has strangely different superblock to %s\n", 1661 bdevname(rdev->bdev,b), 1662 bdevname(refdev->bdev,b2)); 1663 return -EINVAL; 1664 } 1665 ev1 = le64_to_cpu(sb->events); 1666 ev2 = le64_to_cpu(refsb->events); 1667 1668 if (ev1 > ev2) 1669 ret = 1; 1670 else 1671 ret = 0; 1672 } 1673 if (minor_version) { 1674 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); 1675 sectors -= rdev->data_offset; 1676 } else 1677 sectors = rdev->sb_start; 1678 if (sectors < le64_to_cpu(sb->data_size)) 1679 return -EINVAL; 1680 rdev->sectors = le64_to_cpu(sb->data_size); 1681 return ret; 1682 } 1683 1684 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) 1685 { 1686 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 1687 __u64 ev1 = le64_to_cpu(sb->events); 1688 1689 rdev->raid_disk = -1; 1690 clear_bit(Faulty, &rdev->flags); 1691 clear_bit(In_sync, &rdev->flags); 1692 clear_bit(Bitmap_sync, &rdev->flags); 1693 clear_bit(WriteMostly, &rdev->flags); 1694 1695 if (mddev->raid_disks == 0) { 1696 mddev->major_version = 1; 1697 mddev->patch_version = 0; 1698 mddev->external = 0; 1699 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1700 mddev->ctime = le64_to_cpu(sb->ctime); 1701 mddev->utime = le64_to_cpu(sb->utime); 1702 mddev->level = le32_to_cpu(sb->level); 1703 mddev->clevel[0] = 0; 1704 mddev->layout = le32_to_cpu(sb->layout); 1705 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1706 mddev->dev_sectors = le64_to_cpu(sb->size); 1707 mddev->events = ev1; 1708 mddev->bitmap_info.offset = 0; 1709 mddev->bitmap_info.space = 0; 1710 /* Default location for bitmap is 1K after superblock 1711 * using 3K - total of 4K 1712 */ 1713 mddev->bitmap_info.default_offset = 1024 >> 9; 1714 mddev->bitmap_info.default_space = (4096-1024) >> 9; 1715 mddev->reshape_backwards = 0; 1716 1717 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1718 memcpy(mddev->uuid, sb->set_uuid, 16); 1719 1720 mddev->max_disks = (4096-256)/2; 1721 1722 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1723 mddev->bitmap_info.file == NULL) { 1724 mddev->bitmap_info.offset = 1725 (__s32)le32_to_cpu(sb->bitmap_offset); 1726 /* Metadata doesn't record how much space is available. 1727 * For 1.0, we assume we can use up to the superblock 1728 * if before, else to 4K beyond superblock. 1729 * For others, assume no change is possible. 1730 */ 1731 if (mddev->minor_version > 0) 1732 mddev->bitmap_info.space = 0; 1733 else if (mddev->bitmap_info.offset > 0) 1734 mddev->bitmap_info.space = 1735 8 - mddev->bitmap_info.offset; 1736 else 1737 mddev->bitmap_info.space = 1738 -mddev->bitmap_info.offset; 1739 } 1740 1741 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1742 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1743 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1744 mddev->new_level = le32_to_cpu(sb->new_level); 1745 mddev->new_layout = le32_to_cpu(sb->new_layout); 1746 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1747 if (mddev->delta_disks < 0 || 1748 (mddev->delta_disks == 0 && 1749 (le32_to_cpu(sb->feature_map) 1750 & MD_FEATURE_RESHAPE_BACKWARDS))) 1751 mddev->reshape_backwards = 1; 1752 } else { 1753 mddev->reshape_position = MaxSector; 1754 mddev->delta_disks = 0; 1755 mddev->new_level = mddev->level; 1756 mddev->new_layout = mddev->layout; 1757 mddev->new_chunk_sectors = mddev->chunk_sectors; 1758 } 1759 1760 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) 1761 set_bit(MD_HAS_JOURNAL, &mddev->flags); 1762 1763 if (le32_to_cpu(sb->feature_map) & 1764 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) { 1765 if (le32_to_cpu(sb->feature_map) & 1766 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL)) 1767 return -EINVAL; 1768 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) && 1769 (le32_to_cpu(sb->feature_map) & 1770 MD_FEATURE_MULTIPLE_PPLS)) 1771 return -EINVAL; 1772 set_bit(MD_HAS_PPL, &mddev->flags); 1773 } 1774 } else if (mddev->pers == NULL) { 1775 /* Insist of good event counter while assembling, except for 1776 * spares (which don't need an event count) */ 1777 ++ev1; 1778 if (rdev->desc_nr >= 0 && 1779 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1780 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || 1781 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)) 1782 if (ev1 < mddev->events) 1783 return -EINVAL; 1784 } else if (mddev->bitmap) { 1785 /* If adding to array with a bitmap, then we can accept an 1786 * older device, but not too old. 1787 */ 1788 if (ev1 < mddev->bitmap->events_cleared) 1789 return 0; 1790 if (ev1 < mddev->events) 1791 set_bit(Bitmap_sync, &rdev->flags); 1792 } else { 1793 if (ev1 < mddev->events) 1794 /* just a hot-add of a new device, leave raid_disk at -1 */ 1795 return 0; 1796 } 1797 if (mddev->level != LEVEL_MULTIPATH) { 1798 int role; 1799 if (rdev->desc_nr < 0 || 1800 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1801 role = MD_DISK_ROLE_SPARE; 1802 rdev->desc_nr = -1; 1803 } else 1804 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1805 switch(role) { 1806 case MD_DISK_ROLE_SPARE: /* spare */ 1807 break; 1808 case MD_DISK_ROLE_FAULTY: /* faulty */ 1809 set_bit(Faulty, &rdev->flags); 1810 break; 1811 case MD_DISK_ROLE_JOURNAL: /* journal device */ 1812 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) { 1813 /* journal device without journal feature */ 1814 pr_warn("md: journal device provided without journal feature, ignoring the device\n"); 1815 return -EINVAL; 1816 } 1817 set_bit(Journal, &rdev->flags); 1818 rdev->journal_tail = le64_to_cpu(sb->journal_tail); 1819 rdev->raid_disk = 0; 1820 break; 1821 default: 1822 rdev->saved_raid_disk = role; 1823 if ((le32_to_cpu(sb->feature_map) & 1824 MD_FEATURE_RECOVERY_OFFSET)) { 1825 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1826 if (!(le32_to_cpu(sb->feature_map) & 1827 MD_FEATURE_RECOVERY_BITMAP)) 1828 rdev->saved_raid_disk = -1; 1829 } else { 1830 /* 1831 * If the array is FROZEN, then the device can't 1832 * be in_sync with rest of array. 1833 */ 1834 if (!test_bit(MD_RECOVERY_FROZEN, 1835 &mddev->recovery)) 1836 set_bit(In_sync, &rdev->flags); 1837 } 1838 rdev->raid_disk = role; 1839 break; 1840 } 1841 if (sb->devflags & WriteMostly1) 1842 set_bit(WriteMostly, &rdev->flags); 1843 if (sb->devflags & FailFast1) 1844 set_bit(FailFast, &rdev->flags); 1845 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) 1846 set_bit(Replacement, &rdev->flags); 1847 } else /* MULTIPATH are always insync */ 1848 set_bit(In_sync, &rdev->flags); 1849 1850 return 0; 1851 } 1852 1853 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) 1854 { 1855 struct mdp_superblock_1 *sb; 1856 struct md_rdev *rdev2; 1857 int max_dev, i; 1858 /* make rdev->sb match mddev and rdev data. */ 1859 1860 sb = page_address(rdev->sb_page); 1861 1862 sb->feature_map = 0; 1863 sb->pad0 = 0; 1864 sb->recovery_offset = cpu_to_le64(0); 1865 memset(sb->pad3, 0, sizeof(sb->pad3)); 1866 1867 sb->utime = cpu_to_le64((__u64)mddev->utime); 1868 sb->events = cpu_to_le64(mddev->events); 1869 if (mddev->in_sync) 1870 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1871 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) 1872 sb->resync_offset = cpu_to_le64(MaxSector); 1873 else 1874 sb->resync_offset = cpu_to_le64(0); 1875 1876 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1877 1878 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1879 sb->size = cpu_to_le64(mddev->dev_sectors); 1880 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 1881 sb->level = cpu_to_le32(mddev->level); 1882 sb->layout = cpu_to_le32(mddev->layout); 1883 if (test_bit(FailFast, &rdev->flags)) 1884 sb->devflags |= FailFast1; 1885 else 1886 sb->devflags &= ~FailFast1; 1887 1888 if (test_bit(WriteMostly, &rdev->flags)) 1889 sb->devflags |= WriteMostly1; 1890 else 1891 sb->devflags &= ~WriteMostly1; 1892 sb->data_offset = cpu_to_le64(rdev->data_offset); 1893 sb->data_size = cpu_to_le64(rdev->sectors); 1894 1895 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1896 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 1897 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1898 } 1899 1900 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) && 1901 !test_bit(In_sync, &rdev->flags)) { 1902 sb->feature_map |= 1903 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1904 sb->recovery_offset = 1905 cpu_to_le64(rdev->recovery_offset); 1906 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) 1907 sb->feature_map |= 1908 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP); 1909 } 1910 /* Note: recovery_offset and journal_tail share space */ 1911 if (test_bit(Journal, &rdev->flags)) 1912 sb->journal_tail = cpu_to_le64(rdev->journal_tail); 1913 if (test_bit(Replacement, &rdev->flags)) 1914 sb->feature_map |= 1915 cpu_to_le32(MD_FEATURE_REPLACEMENT); 1916 1917 if (mddev->reshape_position != MaxSector) { 1918 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1919 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1920 sb->new_layout = cpu_to_le32(mddev->new_layout); 1921 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1922 sb->new_level = cpu_to_le32(mddev->new_level); 1923 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 1924 if (mddev->delta_disks == 0 && 1925 mddev->reshape_backwards) 1926 sb->feature_map 1927 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS); 1928 if (rdev->new_data_offset != rdev->data_offset) { 1929 sb->feature_map 1930 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET); 1931 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset 1932 - rdev->data_offset)); 1933 } 1934 } 1935 1936 if (mddev_is_clustered(mddev)) 1937 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED); 1938 1939 if (rdev->badblocks.count == 0) 1940 /* Nothing to do for bad blocks*/ ; 1941 else if (sb->bblog_offset == 0) 1942 /* Cannot record bad blocks on this device */ 1943 md_error(mddev, rdev); 1944 else { 1945 struct badblocks *bb = &rdev->badblocks; 1946 __le64 *bbp = (__le64 *)page_address(rdev->bb_page); 1947 u64 *p = bb->page; 1948 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); 1949 if (bb->changed) { 1950 unsigned seq; 1951 1952 retry: 1953 seq = read_seqbegin(&bb->lock); 1954 1955 memset(bbp, 0xff, PAGE_SIZE); 1956 1957 for (i = 0 ; i < bb->count ; i++) { 1958 u64 internal_bb = p[i]; 1959 u64 store_bb = ((BB_OFFSET(internal_bb) << 10) 1960 | BB_LEN(internal_bb)); 1961 bbp[i] = cpu_to_le64(store_bb); 1962 } 1963 bb->changed = 0; 1964 if (read_seqretry(&bb->lock, seq)) 1965 goto retry; 1966 1967 bb->sector = (rdev->sb_start + 1968 (int)le32_to_cpu(sb->bblog_offset)); 1969 bb->size = le16_to_cpu(sb->bblog_size); 1970 } 1971 } 1972 1973 max_dev = 0; 1974 rdev_for_each(rdev2, mddev) 1975 if (rdev2->desc_nr+1 > max_dev) 1976 max_dev = rdev2->desc_nr+1; 1977 1978 if (max_dev > le32_to_cpu(sb->max_dev)) { 1979 int bmask; 1980 sb->max_dev = cpu_to_le32(max_dev); 1981 rdev->sb_size = max_dev * 2 + 256; 1982 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1983 if (rdev->sb_size & bmask) 1984 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1985 } else 1986 max_dev = le32_to_cpu(sb->max_dev); 1987 1988 for (i=0; i<max_dev;i++) 1989 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 1990 1991 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) 1992 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL); 1993 1994 if (test_bit(MD_HAS_PPL, &mddev->flags)) { 1995 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags)) 1996 sb->feature_map |= 1997 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS); 1998 else 1999 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL); 2000 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset); 2001 sb->ppl.size = cpu_to_le16(rdev->ppl.size); 2002 } 2003 2004 rdev_for_each(rdev2, mddev) { 2005 i = rdev2->desc_nr; 2006 if (test_bit(Faulty, &rdev2->flags)) 2007 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); 2008 else if (test_bit(In_sync, &rdev2->flags)) 2009 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 2010 else if (test_bit(Journal, &rdev2->flags)) 2011 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL); 2012 else if (rdev2->raid_disk >= 0) 2013 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 2014 else 2015 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 2016 } 2017 2018 sb->sb_csum = calc_sb_1_csum(sb); 2019 } 2020 2021 static unsigned long long 2022 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 2023 { 2024 struct mdp_superblock_1 *sb; 2025 sector_t max_sectors; 2026 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 2027 return 0; /* component must fit device */ 2028 if (rdev->data_offset != rdev->new_data_offset) 2029 return 0; /* too confusing */ 2030 if (rdev->sb_start < rdev->data_offset) { 2031 /* minor versions 1 and 2; superblock before data */ 2032 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; 2033 max_sectors -= rdev->data_offset; 2034 if (!num_sectors || num_sectors > max_sectors) 2035 num_sectors = max_sectors; 2036 } else if (rdev->mddev->bitmap_info.offset) { 2037 /* minor version 0 with bitmap we can't move */ 2038 return 0; 2039 } else { 2040 /* minor version 0; superblock after data */ 2041 sector_t sb_start; 2042 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2; 2043 sb_start &= ~(sector_t)(4*2 - 1); 2044 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 2045 if (!num_sectors || num_sectors > max_sectors) 2046 num_sectors = max_sectors; 2047 rdev->sb_start = sb_start; 2048 } 2049 sb = page_address(rdev->sb_page); 2050 sb->data_size = cpu_to_le64(num_sectors); 2051 sb->super_offset = cpu_to_le64(rdev->sb_start); 2052 sb->sb_csum = calc_sb_1_csum(sb); 2053 do { 2054 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 2055 rdev->sb_page); 2056 } while (md_super_wait(rdev->mddev) < 0); 2057 return num_sectors; 2058 2059 } 2060 2061 static int 2062 super_1_allow_new_offset(struct md_rdev *rdev, 2063 unsigned long long new_offset) 2064 { 2065 /* All necessary checks on new >= old have been done */ 2066 struct bitmap *bitmap; 2067 if (new_offset >= rdev->data_offset) 2068 return 1; 2069 2070 /* with 1.0 metadata, there is no metadata to tread on 2071 * so we can always move back */ 2072 if (rdev->mddev->minor_version == 0) 2073 return 1; 2074 2075 /* otherwise we must be sure not to step on 2076 * any metadata, so stay: 2077 * 36K beyond start of superblock 2078 * beyond end of badblocks 2079 * beyond write-intent bitmap 2080 */ 2081 if (rdev->sb_start + (32+4)*2 > new_offset) 2082 return 0; 2083 bitmap = rdev->mddev->bitmap; 2084 if (bitmap && !rdev->mddev->bitmap_info.file && 2085 rdev->sb_start + rdev->mddev->bitmap_info.offset + 2086 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset) 2087 return 0; 2088 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) 2089 return 0; 2090 2091 return 1; 2092 } 2093 2094 static struct super_type super_types[] = { 2095 [0] = { 2096 .name = "0.90.0", 2097 .owner = THIS_MODULE, 2098 .load_super = super_90_load, 2099 .validate_super = super_90_validate, 2100 .sync_super = super_90_sync, 2101 .rdev_size_change = super_90_rdev_size_change, 2102 .allow_new_offset = super_90_allow_new_offset, 2103 }, 2104 [1] = { 2105 .name = "md-1", 2106 .owner = THIS_MODULE, 2107 .load_super = super_1_load, 2108 .validate_super = super_1_validate, 2109 .sync_super = super_1_sync, 2110 .rdev_size_change = super_1_rdev_size_change, 2111 .allow_new_offset = super_1_allow_new_offset, 2112 }, 2113 }; 2114 2115 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) 2116 { 2117 if (mddev->sync_super) { 2118 mddev->sync_super(mddev, rdev); 2119 return; 2120 } 2121 2122 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); 2123 2124 super_types[mddev->major_version].sync_super(mddev, rdev); 2125 } 2126 2127 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) 2128 { 2129 struct md_rdev *rdev, *rdev2; 2130 2131 rcu_read_lock(); 2132 rdev_for_each_rcu(rdev, mddev1) { 2133 if (test_bit(Faulty, &rdev->flags) || 2134 test_bit(Journal, &rdev->flags) || 2135 rdev->raid_disk == -1) 2136 continue; 2137 rdev_for_each_rcu(rdev2, mddev2) { 2138 if (test_bit(Faulty, &rdev2->flags) || 2139 test_bit(Journal, &rdev2->flags) || 2140 rdev2->raid_disk == -1) 2141 continue; 2142 if (rdev->bdev->bd_contains == 2143 rdev2->bdev->bd_contains) { 2144 rcu_read_unlock(); 2145 return 1; 2146 } 2147 } 2148 } 2149 rcu_read_unlock(); 2150 return 0; 2151 } 2152 2153 static LIST_HEAD(pending_raid_disks); 2154 2155 /* 2156 * Try to register data integrity profile for an mddev 2157 * 2158 * This is called when an array is started and after a disk has been kicked 2159 * from the array. It only succeeds if all working and active component devices 2160 * are integrity capable with matching profiles. 2161 */ 2162 int md_integrity_register(struct mddev *mddev) 2163 { 2164 struct md_rdev *rdev, *reference = NULL; 2165 2166 if (list_empty(&mddev->disks)) 2167 return 0; /* nothing to do */ 2168 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) 2169 return 0; /* shouldn't register, or already is */ 2170 rdev_for_each(rdev, mddev) { 2171 /* skip spares and non-functional disks */ 2172 if (test_bit(Faulty, &rdev->flags)) 2173 continue; 2174 if (rdev->raid_disk < 0) 2175 continue; 2176 if (!reference) { 2177 /* Use the first rdev as the reference */ 2178 reference = rdev; 2179 continue; 2180 } 2181 /* does this rdev's profile match the reference profile? */ 2182 if (blk_integrity_compare(reference->bdev->bd_disk, 2183 rdev->bdev->bd_disk) < 0) 2184 return -EINVAL; 2185 } 2186 if (!reference || !bdev_get_integrity(reference->bdev)) 2187 return 0; 2188 /* 2189 * All component devices are integrity capable and have matching 2190 * profiles, register the common profile for the md device. 2191 */ 2192 blk_integrity_register(mddev->gendisk, 2193 bdev_get_integrity(reference->bdev)); 2194 2195 pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); 2196 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) { 2197 pr_err("md: failed to create integrity pool for %s\n", 2198 mdname(mddev)); 2199 return -EINVAL; 2200 } 2201 return 0; 2202 } 2203 EXPORT_SYMBOL(md_integrity_register); 2204 2205 /* 2206 * Attempt to add an rdev, but only if it is consistent with the current 2207 * integrity profile 2208 */ 2209 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) 2210 { 2211 struct blk_integrity *bi_mddev; 2212 char name[BDEVNAME_SIZE]; 2213 2214 if (!mddev->gendisk) 2215 return 0; 2216 2217 bi_mddev = blk_get_integrity(mddev->gendisk); 2218 2219 if (!bi_mddev) /* nothing to do */ 2220 return 0; 2221 2222 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { 2223 pr_err("%s: incompatible integrity profile for %s\n", 2224 mdname(mddev), bdevname(rdev->bdev, name)); 2225 return -ENXIO; 2226 } 2227 2228 return 0; 2229 } 2230 EXPORT_SYMBOL(md_integrity_add_rdev); 2231 2232 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) 2233 { 2234 char b[BDEVNAME_SIZE]; 2235 struct kobject *ko; 2236 int err; 2237 2238 /* prevent duplicates */ 2239 if (find_rdev(mddev, rdev->bdev->bd_dev)) 2240 return -EEXIST; 2241 2242 if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) && 2243 mddev->pers) 2244 return -EROFS; 2245 2246 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 2247 if (!test_bit(Journal, &rdev->flags) && 2248 rdev->sectors && 2249 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { 2250 if (mddev->pers) { 2251 /* Cannot change size, so fail 2252 * If mddev->level <= 0, then we don't care 2253 * about aligning sizes (e.g. linear) 2254 */ 2255 if (mddev->level > 0) 2256 return -ENOSPC; 2257 } else 2258 mddev->dev_sectors = rdev->sectors; 2259 } 2260 2261 /* Verify rdev->desc_nr is unique. 2262 * If it is -1, assign a free number, else 2263 * check number is not in use 2264 */ 2265 rcu_read_lock(); 2266 if (rdev->desc_nr < 0) { 2267 int choice = 0; 2268 if (mddev->pers) 2269 choice = mddev->raid_disks; 2270 while (md_find_rdev_nr_rcu(mddev, choice)) 2271 choice++; 2272 rdev->desc_nr = choice; 2273 } else { 2274 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { 2275 rcu_read_unlock(); 2276 return -EBUSY; 2277 } 2278 } 2279 rcu_read_unlock(); 2280 if (!test_bit(Journal, &rdev->flags) && 2281 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 2282 pr_warn("md: %s: array is limited to %d devices\n", 2283 mdname(mddev), mddev->max_disks); 2284 return -EBUSY; 2285 } 2286 bdevname(rdev->bdev,b); 2287 strreplace(b, '/', '!'); 2288 2289 rdev->mddev = mddev; 2290 pr_debug("md: bind<%s>\n", b); 2291 2292 if (mddev->raid_disks) 2293 mddev_create_wb_pool(mddev, rdev, false); 2294 2295 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 2296 goto fail; 2297 2298 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 2299 if (sysfs_create_link(&rdev->kobj, ko, "block")) 2300 /* failure here is OK */; 2301 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 2302 2303 list_add_rcu(&rdev->same_set, &mddev->disks); 2304 bd_link_disk_holder(rdev->bdev, mddev->gendisk); 2305 2306 /* May as well allow recovery to be retried once */ 2307 mddev->recovery_disabled++; 2308 2309 return 0; 2310 2311 fail: 2312 pr_warn("md: failed to register dev-%s for %s\n", 2313 b, mdname(mddev)); 2314 return err; 2315 } 2316 2317 static void md_delayed_delete(struct work_struct *ws) 2318 { 2319 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); 2320 kobject_del(&rdev->kobj); 2321 kobject_put(&rdev->kobj); 2322 } 2323 2324 static void unbind_rdev_from_array(struct md_rdev *rdev) 2325 { 2326 char b[BDEVNAME_SIZE]; 2327 2328 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); 2329 list_del_rcu(&rdev->same_set); 2330 pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b)); 2331 mddev_destroy_wb_pool(rdev->mddev, rdev); 2332 rdev->mddev = NULL; 2333 sysfs_remove_link(&rdev->kobj, "block"); 2334 sysfs_put(rdev->sysfs_state); 2335 rdev->sysfs_state = NULL; 2336 rdev->badblocks.count = 0; 2337 /* We need to delay this, otherwise we can deadlock when 2338 * writing to 'remove' to "dev/state". We also need 2339 * to delay it due to rcu usage. 2340 */ 2341 synchronize_rcu(); 2342 INIT_WORK(&rdev->del_work, md_delayed_delete); 2343 kobject_get(&rdev->kobj); 2344 queue_work(md_misc_wq, &rdev->del_work); 2345 } 2346 2347 /* 2348 * prevent the device from being mounted, repartitioned or 2349 * otherwise reused by a RAID array (or any other kernel 2350 * subsystem), by bd_claiming the device. 2351 */ 2352 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) 2353 { 2354 int err = 0; 2355 struct block_device *bdev; 2356 char b[BDEVNAME_SIZE]; 2357 2358 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2359 shared ? (struct md_rdev *)lock_rdev : rdev); 2360 if (IS_ERR(bdev)) { 2361 pr_warn("md: could not open %s.\n", __bdevname(dev, b)); 2362 return PTR_ERR(bdev); 2363 } 2364 rdev->bdev = bdev; 2365 return err; 2366 } 2367 2368 static void unlock_rdev(struct md_rdev *rdev) 2369 { 2370 struct block_device *bdev = rdev->bdev; 2371 rdev->bdev = NULL; 2372 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2373 } 2374 2375 void md_autodetect_dev(dev_t dev); 2376 2377 static void export_rdev(struct md_rdev *rdev) 2378 { 2379 char b[BDEVNAME_SIZE]; 2380 2381 pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b)); 2382 md_rdev_clear(rdev); 2383 #ifndef MODULE 2384 if (test_bit(AutoDetected, &rdev->flags)) 2385 md_autodetect_dev(rdev->bdev->bd_dev); 2386 #endif 2387 unlock_rdev(rdev); 2388 kobject_put(&rdev->kobj); 2389 } 2390 2391 void md_kick_rdev_from_array(struct md_rdev *rdev) 2392 { 2393 unbind_rdev_from_array(rdev); 2394 export_rdev(rdev); 2395 } 2396 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array); 2397 2398 static void export_array(struct mddev *mddev) 2399 { 2400 struct md_rdev *rdev; 2401 2402 while (!list_empty(&mddev->disks)) { 2403 rdev = list_first_entry(&mddev->disks, struct md_rdev, 2404 same_set); 2405 md_kick_rdev_from_array(rdev); 2406 } 2407 mddev->raid_disks = 0; 2408 mddev->major_version = 0; 2409 } 2410 2411 static bool set_in_sync(struct mddev *mddev) 2412 { 2413 lockdep_assert_held(&mddev->lock); 2414 if (!mddev->in_sync) { 2415 mddev->sync_checkers++; 2416 spin_unlock(&mddev->lock); 2417 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending); 2418 spin_lock(&mddev->lock); 2419 if (!mddev->in_sync && 2420 percpu_ref_is_zero(&mddev->writes_pending)) { 2421 mddev->in_sync = 1; 2422 /* 2423 * Ensure ->in_sync is visible before we clear 2424 * ->sync_checkers. 2425 */ 2426 smp_mb(); 2427 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 2428 sysfs_notify_dirent_safe(mddev->sysfs_state); 2429 } 2430 if (--mddev->sync_checkers == 0) 2431 percpu_ref_switch_to_percpu(&mddev->writes_pending); 2432 } 2433 if (mddev->safemode == 1) 2434 mddev->safemode = 0; 2435 return mddev->in_sync; 2436 } 2437 2438 static void sync_sbs(struct mddev *mddev, int nospares) 2439 { 2440 /* Update each superblock (in-memory image), but 2441 * if we are allowed to, skip spares which already 2442 * have the right event counter, or have one earlier 2443 * (which would mean they aren't being marked as dirty 2444 * with the rest of the array) 2445 */ 2446 struct md_rdev *rdev; 2447 rdev_for_each(rdev, mddev) { 2448 if (rdev->sb_events == mddev->events || 2449 (nospares && 2450 rdev->raid_disk < 0 && 2451 rdev->sb_events+1 == mddev->events)) { 2452 /* Don't update this superblock */ 2453 rdev->sb_loaded = 2; 2454 } else { 2455 sync_super(mddev, rdev); 2456 rdev->sb_loaded = 1; 2457 } 2458 } 2459 } 2460 2461 static bool does_sb_need_changing(struct mddev *mddev) 2462 { 2463 struct md_rdev *rdev; 2464 struct mdp_superblock_1 *sb; 2465 int role; 2466 2467 /* Find a good rdev */ 2468 rdev_for_each(rdev, mddev) 2469 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags)) 2470 break; 2471 2472 /* No good device found. */ 2473 if (!rdev) 2474 return false; 2475 2476 sb = page_address(rdev->sb_page); 2477 /* Check if a device has become faulty or a spare become active */ 2478 rdev_for_each(rdev, mddev) { 2479 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 2480 /* Device activated? */ 2481 if (role == 0xffff && rdev->raid_disk >=0 && 2482 !test_bit(Faulty, &rdev->flags)) 2483 return true; 2484 /* Device turned faulty? */ 2485 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd)) 2486 return true; 2487 } 2488 2489 /* Check if any mddev parameters have changed */ 2490 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || 2491 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || 2492 (mddev->layout != le32_to_cpu(sb->layout)) || 2493 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || 2494 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) 2495 return true; 2496 2497 return false; 2498 } 2499 2500 void md_update_sb(struct mddev *mddev, int force_change) 2501 { 2502 struct md_rdev *rdev; 2503 int sync_req; 2504 int nospares = 0; 2505 int any_badblocks_changed = 0; 2506 int ret = -1; 2507 2508 if (mddev->ro) { 2509 if (force_change) 2510 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2511 return; 2512 } 2513 2514 repeat: 2515 if (mddev_is_clustered(mddev)) { 2516 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) 2517 force_change = 1; 2518 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) 2519 nospares = 1; 2520 ret = md_cluster_ops->metadata_update_start(mddev); 2521 /* Has someone else has updated the sb */ 2522 if (!does_sb_need_changing(mddev)) { 2523 if (ret == 0) 2524 md_cluster_ops->metadata_update_cancel(mddev); 2525 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), 2526 BIT(MD_SB_CHANGE_DEVS) | 2527 BIT(MD_SB_CHANGE_CLEAN)); 2528 return; 2529 } 2530 } 2531 2532 /* 2533 * First make sure individual recovery_offsets are correct 2534 * curr_resync_completed can only be used during recovery. 2535 * During reshape/resync it might use array-addresses rather 2536 * that device addresses. 2537 */ 2538 rdev_for_each(rdev, mddev) { 2539 if (rdev->raid_disk >= 0 && 2540 mddev->delta_disks >= 0 && 2541 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 2542 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && 2543 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 2544 !test_bit(Journal, &rdev->flags) && 2545 !test_bit(In_sync, &rdev->flags) && 2546 mddev->curr_resync_completed > rdev->recovery_offset) 2547 rdev->recovery_offset = mddev->curr_resync_completed; 2548 2549 } 2550 if (!mddev->persistent) { 2551 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 2552 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2553 if (!mddev->external) { 2554 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 2555 rdev_for_each(rdev, mddev) { 2556 if (rdev->badblocks.changed) { 2557 rdev->badblocks.changed = 0; 2558 ack_all_badblocks(&rdev->badblocks); 2559 md_error(mddev, rdev); 2560 } 2561 clear_bit(Blocked, &rdev->flags); 2562 clear_bit(BlockedBadBlocks, &rdev->flags); 2563 wake_up(&rdev->blocked_wait); 2564 } 2565 } 2566 wake_up(&mddev->sb_wait); 2567 return; 2568 } 2569 2570 spin_lock(&mddev->lock); 2571 2572 mddev->utime = ktime_get_real_seconds(); 2573 2574 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) 2575 force_change = 1; 2576 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) 2577 /* just a clean<-> dirty transition, possibly leave spares alone, 2578 * though if events isn't the right even/odd, we will have to do 2579 * spares after all 2580 */ 2581 nospares = 1; 2582 if (force_change) 2583 nospares = 0; 2584 if (mddev->degraded) 2585 /* If the array is degraded, then skipping spares is both 2586 * dangerous and fairly pointless. 2587 * Dangerous because a device that was removed from the array 2588 * might have a event_count that still looks up-to-date, 2589 * so it can be re-added without a resync. 2590 * Pointless because if there are any spares to skip, 2591 * then a recovery will happen and soon that array won't 2592 * be degraded any more and the spare can go back to sleep then. 2593 */ 2594 nospares = 0; 2595 2596 sync_req = mddev->in_sync; 2597 2598 /* If this is just a dirty<->clean transition, and the array is clean 2599 * and 'events' is odd, we can roll back to the previous clean state */ 2600 if (nospares 2601 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2602 && mddev->can_decrease_events 2603 && mddev->events != 1) { 2604 mddev->events--; 2605 mddev->can_decrease_events = 0; 2606 } else { 2607 /* otherwise we have to go forward and ... */ 2608 mddev->events ++; 2609 mddev->can_decrease_events = nospares; 2610 } 2611 2612 /* 2613 * This 64-bit counter should never wrap. 2614 * Either we are in around ~1 trillion A.C., assuming 2615 * 1 reboot per second, or we have a bug... 2616 */ 2617 WARN_ON(mddev->events == 0); 2618 2619 rdev_for_each(rdev, mddev) { 2620 if (rdev->badblocks.changed) 2621 any_badblocks_changed++; 2622 if (test_bit(Faulty, &rdev->flags)) 2623 set_bit(FaultRecorded, &rdev->flags); 2624 } 2625 2626 sync_sbs(mddev, nospares); 2627 spin_unlock(&mddev->lock); 2628 2629 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", 2630 mdname(mddev), mddev->in_sync); 2631 2632 if (mddev->queue) 2633 blk_add_trace_msg(mddev->queue, "md md_update_sb"); 2634 rewrite: 2635 md_bitmap_update_sb(mddev->bitmap); 2636 rdev_for_each(rdev, mddev) { 2637 char b[BDEVNAME_SIZE]; 2638 2639 if (rdev->sb_loaded != 1) 2640 continue; /* no noise on spare devices */ 2641 2642 if (!test_bit(Faulty, &rdev->flags)) { 2643 md_super_write(mddev,rdev, 2644 rdev->sb_start, rdev->sb_size, 2645 rdev->sb_page); 2646 pr_debug("md: (write) %s's sb offset: %llu\n", 2647 bdevname(rdev->bdev, b), 2648 (unsigned long long)rdev->sb_start); 2649 rdev->sb_events = mddev->events; 2650 if (rdev->badblocks.size) { 2651 md_super_write(mddev, rdev, 2652 rdev->badblocks.sector, 2653 rdev->badblocks.size << 9, 2654 rdev->bb_page); 2655 rdev->badblocks.size = 0; 2656 } 2657 2658 } else 2659 pr_debug("md: %s (skipping faulty)\n", 2660 bdevname(rdev->bdev, b)); 2661 2662 if (mddev->level == LEVEL_MULTIPATH) 2663 /* only need to write one superblock... */ 2664 break; 2665 } 2666 if (md_super_wait(mddev) < 0) 2667 goto rewrite; 2668 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */ 2669 2670 if (mddev_is_clustered(mddev) && ret == 0) 2671 md_cluster_ops->metadata_update_finish(mddev); 2672 2673 if (mddev->in_sync != sync_req || 2674 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), 2675 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN))) 2676 /* have to write it out again */ 2677 goto repeat; 2678 wake_up(&mddev->sb_wait); 2679 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2680 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2681 2682 rdev_for_each(rdev, mddev) { 2683 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) 2684 clear_bit(Blocked, &rdev->flags); 2685 2686 if (any_badblocks_changed) 2687 ack_all_badblocks(&rdev->badblocks); 2688 clear_bit(BlockedBadBlocks, &rdev->flags); 2689 wake_up(&rdev->blocked_wait); 2690 } 2691 } 2692 EXPORT_SYMBOL(md_update_sb); 2693 2694 static int add_bound_rdev(struct md_rdev *rdev) 2695 { 2696 struct mddev *mddev = rdev->mddev; 2697 int err = 0; 2698 bool add_journal = test_bit(Journal, &rdev->flags); 2699 2700 if (!mddev->pers->hot_remove_disk || add_journal) { 2701 /* If there is hot_add_disk but no hot_remove_disk 2702 * then added disks for geometry changes, 2703 * and should be added immediately. 2704 */ 2705 super_types[mddev->major_version]. 2706 validate_super(mddev, rdev); 2707 if (add_journal) 2708 mddev_suspend(mddev); 2709 err = mddev->pers->hot_add_disk(mddev, rdev); 2710 if (add_journal) 2711 mddev_resume(mddev); 2712 if (err) { 2713 md_kick_rdev_from_array(rdev); 2714 return err; 2715 } 2716 } 2717 sysfs_notify_dirent_safe(rdev->sysfs_state); 2718 2719 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2720 if (mddev->degraded) 2721 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 2722 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2723 md_new_event(mddev); 2724 md_wakeup_thread(mddev->thread); 2725 return 0; 2726 } 2727 2728 /* words written to sysfs files may, or may not, be \n terminated. 2729 * We want to accept with case. For this we use cmd_match. 2730 */ 2731 static int cmd_match(const char *cmd, const char *str) 2732 { 2733 /* See if cmd, written into a sysfs file, matches 2734 * str. They must either be the same, or cmd can 2735 * have a trailing newline 2736 */ 2737 while (*cmd && *str && *cmd == *str) { 2738 cmd++; 2739 str++; 2740 } 2741 if (*cmd == '\n') 2742 cmd++; 2743 if (*str || *cmd) 2744 return 0; 2745 return 1; 2746 } 2747 2748 struct rdev_sysfs_entry { 2749 struct attribute attr; 2750 ssize_t (*show)(struct md_rdev *, char *); 2751 ssize_t (*store)(struct md_rdev *, const char *, size_t); 2752 }; 2753 2754 static ssize_t 2755 state_show(struct md_rdev *rdev, char *page) 2756 { 2757 char *sep = ","; 2758 size_t len = 0; 2759 unsigned long flags = READ_ONCE(rdev->flags); 2760 2761 if (test_bit(Faulty, &flags) || 2762 (!test_bit(ExternalBbl, &flags) && 2763 rdev->badblocks.unacked_exist)) 2764 len += sprintf(page+len, "faulty%s", sep); 2765 if (test_bit(In_sync, &flags)) 2766 len += sprintf(page+len, "in_sync%s", sep); 2767 if (test_bit(Journal, &flags)) 2768 len += sprintf(page+len, "journal%s", sep); 2769 if (test_bit(WriteMostly, &flags)) 2770 len += sprintf(page+len, "write_mostly%s", sep); 2771 if (test_bit(Blocked, &flags) || 2772 (rdev->badblocks.unacked_exist 2773 && !test_bit(Faulty, &flags))) 2774 len += sprintf(page+len, "blocked%s", sep); 2775 if (!test_bit(Faulty, &flags) && 2776 !test_bit(Journal, &flags) && 2777 !test_bit(In_sync, &flags)) 2778 len += sprintf(page+len, "spare%s", sep); 2779 if (test_bit(WriteErrorSeen, &flags)) 2780 len += sprintf(page+len, "write_error%s", sep); 2781 if (test_bit(WantReplacement, &flags)) 2782 len += sprintf(page+len, "want_replacement%s", sep); 2783 if (test_bit(Replacement, &flags)) 2784 len += sprintf(page+len, "replacement%s", sep); 2785 if (test_bit(ExternalBbl, &flags)) 2786 len += sprintf(page+len, "external_bbl%s", sep); 2787 if (test_bit(FailFast, &flags)) 2788 len += sprintf(page+len, "failfast%s", sep); 2789 2790 if (len) 2791 len -= strlen(sep); 2792 2793 return len+sprintf(page+len, "\n"); 2794 } 2795 2796 static ssize_t 2797 state_store(struct md_rdev *rdev, const char *buf, size_t len) 2798 { 2799 /* can write 2800 * faulty - simulates an error 2801 * remove - disconnects the device 2802 * writemostly - sets write_mostly 2803 * -writemostly - clears write_mostly 2804 * blocked - sets the Blocked flags 2805 * -blocked - clears the Blocked and possibly simulates an error 2806 * insync - sets Insync providing device isn't active 2807 * -insync - clear Insync for a device with a slot assigned, 2808 * so that it gets rebuilt based on bitmap 2809 * write_error - sets WriteErrorSeen 2810 * -write_error - clears WriteErrorSeen 2811 * {,-}failfast - set/clear FailFast 2812 */ 2813 int err = -EINVAL; 2814 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2815 md_error(rdev->mddev, rdev); 2816 if (test_bit(Faulty, &rdev->flags)) 2817 err = 0; 2818 else 2819 err = -EBUSY; 2820 } else if (cmd_match(buf, "remove")) { 2821 if (rdev->mddev->pers) { 2822 clear_bit(Blocked, &rdev->flags); 2823 remove_and_add_spares(rdev->mddev, rdev); 2824 } 2825 if (rdev->raid_disk >= 0) 2826 err = -EBUSY; 2827 else { 2828 struct mddev *mddev = rdev->mddev; 2829 err = 0; 2830 if (mddev_is_clustered(mddev)) 2831 err = md_cluster_ops->remove_disk(mddev, rdev); 2832 2833 if (err == 0) { 2834 md_kick_rdev_from_array(rdev); 2835 if (mddev->pers) { 2836 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2837 md_wakeup_thread(mddev->thread); 2838 } 2839 md_new_event(mddev); 2840 } 2841 } 2842 } else if (cmd_match(buf, "writemostly")) { 2843 set_bit(WriteMostly, &rdev->flags); 2844 mddev_create_wb_pool(rdev->mddev, rdev, false); 2845 err = 0; 2846 } else if (cmd_match(buf, "-writemostly")) { 2847 mddev_destroy_wb_pool(rdev->mddev, rdev); 2848 clear_bit(WriteMostly, &rdev->flags); 2849 err = 0; 2850 } else if (cmd_match(buf, "blocked")) { 2851 set_bit(Blocked, &rdev->flags); 2852 err = 0; 2853 } else if (cmd_match(buf, "-blocked")) { 2854 if (!test_bit(Faulty, &rdev->flags) && 2855 !test_bit(ExternalBbl, &rdev->flags) && 2856 rdev->badblocks.unacked_exist) { 2857 /* metadata handler doesn't understand badblocks, 2858 * so we need to fail the device 2859 */ 2860 md_error(rdev->mddev, rdev); 2861 } 2862 clear_bit(Blocked, &rdev->flags); 2863 clear_bit(BlockedBadBlocks, &rdev->flags); 2864 wake_up(&rdev->blocked_wait); 2865 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2866 md_wakeup_thread(rdev->mddev->thread); 2867 2868 err = 0; 2869 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2870 set_bit(In_sync, &rdev->flags); 2871 err = 0; 2872 } else if (cmd_match(buf, "failfast")) { 2873 set_bit(FailFast, &rdev->flags); 2874 err = 0; 2875 } else if (cmd_match(buf, "-failfast")) { 2876 clear_bit(FailFast, &rdev->flags); 2877 err = 0; 2878 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 && 2879 !test_bit(Journal, &rdev->flags)) { 2880 if (rdev->mddev->pers == NULL) { 2881 clear_bit(In_sync, &rdev->flags); 2882 rdev->saved_raid_disk = rdev->raid_disk; 2883 rdev->raid_disk = -1; 2884 err = 0; 2885 } 2886 } else if (cmd_match(buf, "write_error")) { 2887 set_bit(WriteErrorSeen, &rdev->flags); 2888 err = 0; 2889 } else if (cmd_match(buf, "-write_error")) { 2890 clear_bit(WriteErrorSeen, &rdev->flags); 2891 err = 0; 2892 } else if (cmd_match(buf, "want_replacement")) { 2893 /* Any non-spare device that is not a replacement can 2894 * become want_replacement at any time, but we then need to 2895 * check if recovery is needed. 2896 */ 2897 if (rdev->raid_disk >= 0 && 2898 !test_bit(Journal, &rdev->flags) && 2899 !test_bit(Replacement, &rdev->flags)) 2900 set_bit(WantReplacement, &rdev->flags); 2901 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2902 md_wakeup_thread(rdev->mddev->thread); 2903 err = 0; 2904 } else if (cmd_match(buf, "-want_replacement")) { 2905 /* Clearing 'want_replacement' is always allowed. 2906 * Once replacements starts it is too late though. 2907 */ 2908 err = 0; 2909 clear_bit(WantReplacement, &rdev->flags); 2910 } else if (cmd_match(buf, "replacement")) { 2911 /* Can only set a device as a replacement when array has not 2912 * yet been started. Once running, replacement is automatic 2913 * from spares, or by assigning 'slot'. 2914 */ 2915 if (rdev->mddev->pers) 2916 err = -EBUSY; 2917 else { 2918 set_bit(Replacement, &rdev->flags); 2919 err = 0; 2920 } 2921 } else if (cmd_match(buf, "-replacement")) { 2922 /* Similarly, can only clear Replacement before start */ 2923 if (rdev->mddev->pers) 2924 err = -EBUSY; 2925 else { 2926 clear_bit(Replacement, &rdev->flags); 2927 err = 0; 2928 } 2929 } else if (cmd_match(buf, "re-add")) { 2930 if (!rdev->mddev->pers) 2931 err = -EINVAL; 2932 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) && 2933 rdev->saved_raid_disk >= 0) { 2934 /* clear_bit is performed _after_ all the devices 2935 * have their local Faulty bit cleared. If any writes 2936 * happen in the meantime in the local node, they 2937 * will land in the local bitmap, which will be synced 2938 * by this node eventually 2939 */ 2940 if (!mddev_is_clustered(rdev->mddev) || 2941 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) { 2942 clear_bit(Faulty, &rdev->flags); 2943 err = add_bound_rdev(rdev); 2944 } 2945 } else 2946 err = -EBUSY; 2947 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) { 2948 set_bit(ExternalBbl, &rdev->flags); 2949 rdev->badblocks.shift = 0; 2950 err = 0; 2951 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) { 2952 clear_bit(ExternalBbl, &rdev->flags); 2953 err = 0; 2954 } 2955 if (!err) 2956 sysfs_notify_dirent_safe(rdev->sysfs_state); 2957 return err ? err : len; 2958 } 2959 static struct rdev_sysfs_entry rdev_state = 2960 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store); 2961 2962 static ssize_t 2963 errors_show(struct md_rdev *rdev, char *page) 2964 { 2965 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 2966 } 2967 2968 static ssize_t 2969 errors_store(struct md_rdev *rdev, const char *buf, size_t len) 2970 { 2971 unsigned int n; 2972 int rv; 2973 2974 rv = kstrtouint(buf, 10, &n); 2975 if (rv < 0) 2976 return rv; 2977 atomic_set(&rdev->corrected_errors, n); 2978 return len; 2979 } 2980 static struct rdev_sysfs_entry rdev_errors = 2981 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 2982 2983 static ssize_t 2984 slot_show(struct md_rdev *rdev, char *page) 2985 { 2986 if (test_bit(Journal, &rdev->flags)) 2987 return sprintf(page, "journal\n"); 2988 else if (rdev->raid_disk < 0) 2989 return sprintf(page, "none\n"); 2990 else 2991 return sprintf(page, "%d\n", rdev->raid_disk); 2992 } 2993 2994 static ssize_t 2995 slot_store(struct md_rdev *rdev, const char *buf, size_t len) 2996 { 2997 int slot; 2998 int err; 2999 3000 if (test_bit(Journal, &rdev->flags)) 3001 return -EBUSY; 3002 if (strncmp(buf, "none", 4)==0) 3003 slot = -1; 3004 else { 3005 err = kstrtouint(buf, 10, (unsigned int *)&slot); 3006 if (err < 0) 3007 return err; 3008 } 3009 if (rdev->mddev->pers && slot == -1) { 3010 /* Setting 'slot' on an active array requires also 3011 * updating the 'rd%d' link, and communicating 3012 * with the personality with ->hot_*_disk. 3013 * For now we only support removing 3014 * failed/spare devices. This normally happens automatically, 3015 * but not when the metadata is externally managed. 3016 */ 3017 if (rdev->raid_disk == -1) 3018 return -EEXIST; 3019 /* personality does all needed checks */ 3020 if (rdev->mddev->pers->hot_remove_disk == NULL) 3021 return -EINVAL; 3022 clear_bit(Blocked, &rdev->flags); 3023 remove_and_add_spares(rdev->mddev, rdev); 3024 if (rdev->raid_disk >= 0) 3025 return -EBUSY; 3026 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 3027 md_wakeup_thread(rdev->mddev->thread); 3028 } else if (rdev->mddev->pers) { 3029 /* Activating a spare .. or possibly reactivating 3030 * if we ever get bitmaps working here. 3031 */ 3032 int err; 3033 3034 if (rdev->raid_disk != -1) 3035 return -EBUSY; 3036 3037 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) 3038 return -EBUSY; 3039 3040 if (rdev->mddev->pers->hot_add_disk == NULL) 3041 return -EINVAL; 3042 3043 if (slot >= rdev->mddev->raid_disks && 3044 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 3045 return -ENOSPC; 3046 3047 rdev->raid_disk = slot; 3048 if (test_bit(In_sync, &rdev->flags)) 3049 rdev->saved_raid_disk = slot; 3050 else 3051 rdev->saved_raid_disk = -1; 3052 clear_bit(In_sync, &rdev->flags); 3053 clear_bit(Bitmap_sync, &rdev->flags); 3054 err = rdev->mddev->pers-> 3055 hot_add_disk(rdev->mddev, rdev); 3056 if (err) { 3057 rdev->raid_disk = -1; 3058 return err; 3059 } else 3060 sysfs_notify_dirent_safe(rdev->sysfs_state); 3061 if (sysfs_link_rdev(rdev->mddev, rdev)) 3062 /* failure here is OK */; 3063 /* don't wakeup anyone, leave that to userspace. */ 3064 } else { 3065 if (slot >= rdev->mddev->raid_disks && 3066 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 3067 return -ENOSPC; 3068 rdev->raid_disk = slot; 3069 /* assume it is working */ 3070 clear_bit(Faulty, &rdev->flags); 3071 clear_bit(WriteMostly, &rdev->flags); 3072 set_bit(In_sync, &rdev->flags); 3073 sysfs_notify_dirent_safe(rdev->sysfs_state); 3074 } 3075 return len; 3076 } 3077 3078 static struct rdev_sysfs_entry rdev_slot = 3079 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 3080 3081 static ssize_t 3082 offset_show(struct md_rdev *rdev, char *page) 3083 { 3084 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 3085 } 3086 3087 static ssize_t 3088 offset_store(struct md_rdev *rdev, const char *buf, size_t len) 3089 { 3090 unsigned long long offset; 3091 if (kstrtoull(buf, 10, &offset) < 0) 3092 return -EINVAL; 3093 if (rdev->mddev->pers && rdev->raid_disk >= 0) 3094 return -EBUSY; 3095 if (rdev->sectors && rdev->mddev->external) 3096 /* Must set offset before size, so overlap checks 3097 * can be sane */ 3098 return -EBUSY; 3099 rdev->data_offset = offset; 3100 rdev->new_data_offset = offset; 3101 return len; 3102 } 3103 3104 static struct rdev_sysfs_entry rdev_offset = 3105 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 3106 3107 static ssize_t new_offset_show(struct md_rdev *rdev, char *page) 3108 { 3109 return sprintf(page, "%llu\n", 3110 (unsigned long long)rdev->new_data_offset); 3111 } 3112 3113 static ssize_t new_offset_store(struct md_rdev *rdev, 3114 const char *buf, size_t len) 3115 { 3116 unsigned long long new_offset; 3117 struct mddev *mddev = rdev->mddev; 3118 3119 if (kstrtoull(buf, 10, &new_offset) < 0) 3120 return -EINVAL; 3121 3122 if (mddev->sync_thread || 3123 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) 3124 return -EBUSY; 3125 if (new_offset == rdev->data_offset) 3126 /* reset is always permitted */ 3127 ; 3128 else if (new_offset > rdev->data_offset) { 3129 /* must not push array size beyond rdev_sectors */ 3130 if (new_offset - rdev->data_offset 3131 + mddev->dev_sectors > rdev->sectors) 3132 return -E2BIG; 3133 } 3134 /* Metadata worries about other space details. */ 3135 3136 /* decreasing the offset is inconsistent with a backwards 3137 * reshape. 3138 */ 3139 if (new_offset < rdev->data_offset && 3140 mddev->reshape_backwards) 3141 return -EINVAL; 3142 /* Increasing offset is inconsistent with forwards 3143 * reshape. reshape_direction should be set to 3144 * 'backwards' first. 3145 */ 3146 if (new_offset > rdev->data_offset && 3147 !mddev->reshape_backwards) 3148 return -EINVAL; 3149 3150 if (mddev->pers && mddev->persistent && 3151 !super_types[mddev->major_version] 3152 .allow_new_offset(rdev, new_offset)) 3153 return -E2BIG; 3154 rdev->new_data_offset = new_offset; 3155 if (new_offset > rdev->data_offset) 3156 mddev->reshape_backwards = 1; 3157 else if (new_offset < rdev->data_offset) 3158 mddev->reshape_backwards = 0; 3159 3160 return len; 3161 } 3162 static struct rdev_sysfs_entry rdev_new_offset = 3163 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store); 3164 3165 static ssize_t 3166 rdev_size_show(struct md_rdev *rdev, char *page) 3167 { 3168 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 3169 } 3170 3171 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 3172 { 3173 /* check if two start/length pairs overlap */ 3174 if (s1+l1 <= s2) 3175 return 0; 3176 if (s2+l2 <= s1) 3177 return 0; 3178 return 1; 3179 } 3180 3181 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 3182 { 3183 unsigned long long blocks; 3184 sector_t new; 3185 3186 if (kstrtoull(buf, 10, &blocks) < 0) 3187 return -EINVAL; 3188 3189 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 3190 return -EINVAL; /* sector conversion overflow */ 3191 3192 new = blocks * 2; 3193 if (new != blocks * 2) 3194 return -EINVAL; /* unsigned long long to sector_t overflow */ 3195 3196 *sectors = new; 3197 return 0; 3198 } 3199 3200 static ssize_t 3201 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) 3202 { 3203 struct mddev *my_mddev = rdev->mddev; 3204 sector_t oldsectors = rdev->sectors; 3205 sector_t sectors; 3206 3207 if (test_bit(Journal, &rdev->flags)) 3208 return -EBUSY; 3209 if (strict_blocks_to_sectors(buf, §ors) < 0) 3210 return -EINVAL; 3211 if (rdev->data_offset != rdev->new_data_offset) 3212 return -EINVAL; /* too confusing */ 3213 if (my_mddev->pers && rdev->raid_disk >= 0) { 3214 if (my_mddev->persistent) { 3215 sectors = super_types[my_mddev->major_version]. 3216 rdev_size_change(rdev, sectors); 3217 if (!sectors) 3218 return -EBUSY; 3219 } else if (!sectors) 3220 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 3221 rdev->data_offset; 3222 if (!my_mddev->pers->resize) 3223 /* Cannot change size for RAID0 or Linear etc */ 3224 return -EINVAL; 3225 } 3226 if (sectors < my_mddev->dev_sectors) 3227 return -EINVAL; /* component must fit device */ 3228 3229 rdev->sectors = sectors; 3230 if (sectors > oldsectors && my_mddev->external) { 3231 /* Need to check that all other rdevs with the same 3232 * ->bdev do not overlap. 'rcu' is sufficient to walk 3233 * the rdev lists safely. 3234 * This check does not provide a hard guarantee, it 3235 * just helps avoid dangerous mistakes. 3236 */ 3237 struct mddev *mddev; 3238 int overlap = 0; 3239 struct list_head *tmp; 3240 3241 rcu_read_lock(); 3242 for_each_mddev(mddev, tmp) { 3243 struct md_rdev *rdev2; 3244 3245 rdev_for_each(rdev2, mddev) 3246 if (rdev->bdev == rdev2->bdev && 3247 rdev != rdev2 && 3248 overlaps(rdev->data_offset, rdev->sectors, 3249 rdev2->data_offset, 3250 rdev2->sectors)) { 3251 overlap = 1; 3252 break; 3253 } 3254 if (overlap) { 3255 mddev_put(mddev); 3256 break; 3257 } 3258 } 3259 rcu_read_unlock(); 3260 if (overlap) { 3261 /* Someone else could have slipped in a size 3262 * change here, but doing so is just silly. 3263 * We put oldsectors back because we *know* it is 3264 * safe, and trust userspace not to race with 3265 * itself 3266 */ 3267 rdev->sectors = oldsectors; 3268 return -EBUSY; 3269 } 3270 } 3271 return len; 3272 } 3273 3274 static struct rdev_sysfs_entry rdev_size = 3275 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 3276 3277 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) 3278 { 3279 unsigned long long recovery_start = rdev->recovery_offset; 3280 3281 if (test_bit(In_sync, &rdev->flags) || 3282 recovery_start == MaxSector) 3283 return sprintf(page, "none\n"); 3284 3285 return sprintf(page, "%llu\n", recovery_start); 3286 } 3287 3288 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) 3289 { 3290 unsigned long long recovery_start; 3291 3292 if (cmd_match(buf, "none")) 3293 recovery_start = MaxSector; 3294 else if (kstrtoull(buf, 10, &recovery_start)) 3295 return -EINVAL; 3296 3297 if (rdev->mddev->pers && 3298 rdev->raid_disk >= 0) 3299 return -EBUSY; 3300 3301 rdev->recovery_offset = recovery_start; 3302 if (recovery_start == MaxSector) 3303 set_bit(In_sync, &rdev->flags); 3304 else 3305 clear_bit(In_sync, &rdev->flags); 3306 return len; 3307 } 3308 3309 static struct rdev_sysfs_entry rdev_recovery_start = 3310 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 3311 3312 /* sysfs access to bad-blocks list. 3313 * We present two files. 3314 * 'bad-blocks' lists sector numbers and lengths of ranges that 3315 * are recorded as bad. The list is truncated to fit within 3316 * the one-page limit of sysfs. 3317 * Writing "sector length" to this file adds an acknowledged 3318 * bad block list. 3319 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet 3320 * been acknowledged. Writing to this file adds bad blocks 3321 * without acknowledging them. This is largely for testing. 3322 */ 3323 static ssize_t bb_show(struct md_rdev *rdev, char *page) 3324 { 3325 return badblocks_show(&rdev->badblocks, page, 0); 3326 } 3327 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) 3328 { 3329 int rv = badblocks_store(&rdev->badblocks, page, len, 0); 3330 /* Maybe that ack was all we needed */ 3331 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) 3332 wake_up(&rdev->blocked_wait); 3333 return rv; 3334 } 3335 static struct rdev_sysfs_entry rdev_bad_blocks = 3336 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); 3337 3338 static ssize_t ubb_show(struct md_rdev *rdev, char *page) 3339 { 3340 return badblocks_show(&rdev->badblocks, page, 1); 3341 } 3342 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) 3343 { 3344 return badblocks_store(&rdev->badblocks, page, len, 1); 3345 } 3346 static struct rdev_sysfs_entry rdev_unack_bad_blocks = 3347 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store); 3348 3349 static ssize_t 3350 ppl_sector_show(struct md_rdev *rdev, char *page) 3351 { 3352 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector); 3353 } 3354 3355 static ssize_t 3356 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len) 3357 { 3358 unsigned long long sector; 3359 3360 if (kstrtoull(buf, 10, §or) < 0) 3361 return -EINVAL; 3362 if (sector != (sector_t)sector) 3363 return -EINVAL; 3364 3365 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && 3366 rdev->raid_disk >= 0) 3367 return -EBUSY; 3368 3369 if (rdev->mddev->persistent) { 3370 if (rdev->mddev->major_version == 0) 3371 return -EINVAL; 3372 if ((sector > rdev->sb_start && 3373 sector - rdev->sb_start > S16_MAX) || 3374 (sector < rdev->sb_start && 3375 rdev->sb_start - sector > -S16_MIN)) 3376 return -EINVAL; 3377 rdev->ppl.offset = sector - rdev->sb_start; 3378 } else if (!rdev->mddev->external) { 3379 return -EBUSY; 3380 } 3381 rdev->ppl.sector = sector; 3382 return len; 3383 } 3384 3385 static struct rdev_sysfs_entry rdev_ppl_sector = 3386 __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store); 3387 3388 static ssize_t 3389 ppl_size_show(struct md_rdev *rdev, char *page) 3390 { 3391 return sprintf(page, "%u\n", rdev->ppl.size); 3392 } 3393 3394 static ssize_t 3395 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len) 3396 { 3397 unsigned int size; 3398 3399 if (kstrtouint(buf, 10, &size) < 0) 3400 return -EINVAL; 3401 3402 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && 3403 rdev->raid_disk >= 0) 3404 return -EBUSY; 3405 3406 if (rdev->mddev->persistent) { 3407 if (rdev->mddev->major_version == 0) 3408 return -EINVAL; 3409 if (size > U16_MAX) 3410 return -EINVAL; 3411 } else if (!rdev->mddev->external) { 3412 return -EBUSY; 3413 } 3414 rdev->ppl.size = size; 3415 return len; 3416 } 3417 3418 static struct rdev_sysfs_entry rdev_ppl_size = 3419 __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store); 3420 3421 static struct attribute *rdev_default_attrs[] = { 3422 &rdev_state.attr, 3423 &rdev_errors.attr, 3424 &rdev_slot.attr, 3425 &rdev_offset.attr, 3426 &rdev_new_offset.attr, 3427 &rdev_size.attr, 3428 &rdev_recovery_start.attr, 3429 &rdev_bad_blocks.attr, 3430 &rdev_unack_bad_blocks.attr, 3431 &rdev_ppl_sector.attr, 3432 &rdev_ppl_size.attr, 3433 NULL, 3434 }; 3435 static ssize_t 3436 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3437 { 3438 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3439 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3440 3441 if (!entry->show) 3442 return -EIO; 3443 if (!rdev->mddev) 3444 return -ENODEV; 3445 return entry->show(rdev, page); 3446 } 3447 3448 static ssize_t 3449 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 3450 const char *page, size_t length) 3451 { 3452 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3453 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3454 ssize_t rv; 3455 struct mddev *mddev = rdev->mddev; 3456 3457 if (!entry->store) 3458 return -EIO; 3459 if (!capable(CAP_SYS_ADMIN)) 3460 return -EACCES; 3461 rv = mddev ? mddev_lock(mddev) : -ENODEV; 3462 if (!rv) { 3463 if (rdev->mddev == NULL) 3464 rv = -ENODEV; 3465 else 3466 rv = entry->store(rdev, page, length); 3467 mddev_unlock(mddev); 3468 } 3469 return rv; 3470 } 3471 3472 static void rdev_free(struct kobject *ko) 3473 { 3474 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); 3475 kfree(rdev); 3476 } 3477 static const struct sysfs_ops rdev_sysfs_ops = { 3478 .show = rdev_attr_show, 3479 .store = rdev_attr_store, 3480 }; 3481 static struct kobj_type rdev_ktype = { 3482 .release = rdev_free, 3483 .sysfs_ops = &rdev_sysfs_ops, 3484 .default_attrs = rdev_default_attrs, 3485 }; 3486 3487 int md_rdev_init(struct md_rdev *rdev) 3488 { 3489 rdev->desc_nr = -1; 3490 rdev->saved_raid_disk = -1; 3491 rdev->raid_disk = -1; 3492 rdev->flags = 0; 3493 rdev->data_offset = 0; 3494 rdev->new_data_offset = 0; 3495 rdev->sb_events = 0; 3496 rdev->last_read_error = 0; 3497 rdev->sb_loaded = 0; 3498 rdev->bb_page = NULL; 3499 atomic_set(&rdev->nr_pending, 0); 3500 atomic_set(&rdev->read_errors, 0); 3501 atomic_set(&rdev->corrected_errors, 0); 3502 3503 INIT_LIST_HEAD(&rdev->same_set); 3504 init_waitqueue_head(&rdev->blocked_wait); 3505 3506 /* Add space to store bad block list. 3507 * This reserves the space even on arrays where it cannot 3508 * be used - I wonder if that matters 3509 */ 3510 return badblocks_init(&rdev->badblocks, 0); 3511 } 3512 EXPORT_SYMBOL_GPL(md_rdev_init); 3513 /* 3514 * Import a device. If 'super_format' >= 0, then sanity check the superblock 3515 * 3516 * mark the device faulty if: 3517 * 3518 * - the device is nonexistent (zero size) 3519 * - the device has no valid superblock 3520 * 3521 * a faulty rdev _never_ has rdev->sb set. 3522 */ 3523 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) 3524 { 3525 char b[BDEVNAME_SIZE]; 3526 int err; 3527 struct md_rdev *rdev; 3528 sector_t size; 3529 3530 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 3531 if (!rdev) 3532 return ERR_PTR(-ENOMEM); 3533 3534 err = md_rdev_init(rdev); 3535 if (err) 3536 goto abort_free; 3537 err = alloc_disk_sb(rdev); 3538 if (err) 3539 goto abort_free; 3540 3541 err = lock_rdev(rdev, newdev, super_format == -2); 3542 if (err) 3543 goto abort_free; 3544 3545 kobject_init(&rdev->kobj, &rdev_ktype); 3546 3547 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; 3548 if (!size) { 3549 pr_warn("md: %s has zero or unknown size, marking faulty!\n", 3550 bdevname(rdev->bdev,b)); 3551 err = -EINVAL; 3552 goto abort_free; 3553 } 3554 3555 if (super_format >= 0) { 3556 err = super_types[super_format]. 3557 load_super(rdev, NULL, super_minor); 3558 if (err == -EINVAL) { 3559 pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n", 3560 bdevname(rdev->bdev,b), 3561 super_format, super_minor); 3562 goto abort_free; 3563 } 3564 if (err < 0) { 3565 pr_warn("md: could not read %s's sb, not importing!\n", 3566 bdevname(rdev->bdev,b)); 3567 goto abort_free; 3568 } 3569 } 3570 3571 return rdev; 3572 3573 abort_free: 3574 if (rdev->bdev) 3575 unlock_rdev(rdev); 3576 md_rdev_clear(rdev); 3577 kfree(rdev); 3578 return ERR_PTR(err); 3579 } 3580 3581 /* 3582 * Check a full RAID array for plausibility 3583 */ 3584 3585 static void analyze_sbs(struct mddev *mddev) 3586 { 3587 int i; 3588 struct md_rdev *rdev, *freshest, *tmp; 3589 char b[BDEVNAME_SIZE]; 3590 3591 freshest = NULL; 3592 rdev_for_each_safe(rdev, tmp, mddev) 3593 switch (super_types[mddev->major_version]. 3594 load_super(rdev, freshest, mddev->minor_version)) { 3595 case 1: 3596 freshest = rdev; 3597 break; 3598 case 0: 3599 break; 3600 default: 3601 pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n", 3602 bdevname(rdev->bdev,b)); 3603 md_kick_rdev_from_array(rdev); 3604 } 3605 3606 super_types[mddev->major_version]. 3607 validate_super(mddev, freshest); 3608 3609 i = 0; 3610 rdev_for_each_safe(rdev, tmp, mddev) { 3611 if (mddev->max_disks && 3612 (rdev->desc_nr >= mddev->max_disks || 3613 i > mddev->max_disks)) { 3614 pr_warn("md: %s: %s: only %d devices permitted\n", 3615 mdname(mddev), bdevname(rdev->bdev, b), 3616 mddev->max_disks); 3617 md_kick_rdev_from_array(rdev); 3618 continue; 3619 } 3620 if (rdev != freshest) { 3621 if (super_types[mddev->major_version]. 3622 validate_super(mddev, rdev)) { 3623 pr_warn("md: kicking non-fresh %s from array!\n", 3624 bdevname(rdev->bdev,b)); 3625 md_kick_rdev_from_array(rdev); 3626 continue; 3627 } 3628 } 3629 if (mddev->level == LEVEL_MULTIPATH) { 3630 rdev->desc_nr = i++; 3631 rdev->raid_disk = rdev->desc_nr; 3632 set_bit(In_sync, &rdev->flags); 3633 } else if (rdev->raid_disk >= 3634 (mddev->raid_disks - min(0, mddev->delta_disks)) && 3635 !test_bit(Journal, &rdev->flags)) { 3636 rdev->raid_disk = -1; 3637 clear_bit(In_sync, &rdev->flags); 3638 } 3639 } 3640 } 3641 3642 /* Read a fixed-point number. 3643 * Numbers in sysfs attributes should be in "standard" units where 3644 * possible, so time should be in seconds. 3645 * However we internally use a a much smaller unit such as 3646 * milliseconds or jiffies. 3647 * This function takes a decimal number with a possible fractional 3648 * component, and produces an integer which is the result of 3649 * multiplying that number by 10^'scale'. 3650 * all without any floating-point arithmetic. 3651 */ 3652 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 3653 { 3654 unsigned long result = 0; 3655 long decimals = -1; 3656 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 3657 if (*cp == '.') 3658 decimals = 0; 3659 else if (decimals < scale) { 3660 unsigned int value; 3661 value = *cp - '0'; 3662 result = result * 10 + value; 3663 if (decimals >= 0) 3664 decimals++; 3665 } 3666 cp++; 3667 } 3668 if (*cp == '\n') 3669 cp++; 3670 if (*cp) 3671 return -EINVAL; 3672 if (decimals < 0) 3673 decimals = 0; 3674 *res = result * int_pow(10, scale - decimals); 3675 return 0; 3676 } 3677 3678 static ssize_t 3679 safe_delay_show(struct mddev *mddev, char *page) 3680 { 3681 int msec = (mddev->safemode_delay*1000)/HZ; 3682 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 3683 } 3684 static ssize_t 3685 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) 3686 { 3687 unsigned long msec; 3688 3689 if (mddev_is_clustered(mddev)) { 3690 pr_warn("md: Safemode is disabled for clustered mode\n"); 3691 return -EINVAL; 3692 } 3693 3694 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) 3695 return -EINVAL; 3696 if (msec == 0) 3697 mddev->safemode_delay = 0; 3698 else { 3699 unsigned long old_delay = mddev->safemode_delay; 3700 unsigned long new_delay = (msec*HZ)/1000; 3701 3702 if (new_delay == 0) 3703 new_delay = 1; 3704 mddev->safemode_delay = new_delay; 3705 if (new_delay < old_delay || old_delay == 0) 3706 mod_timer(&mddev->safemode_timer, jiffies+1); 3707 } 3708 return len; 3709 } 3710 static struct md_sysfs_entry md_safe_delay = 3711 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 3712 3713 static ssize_t 3714 level_show(struct mddev *mddev, char *page) 3715 { 3716 struct md_personality *p; 3717 int ret; 3718 spin_lock(&mddev->lock); 3719 p = mddev->pers; 3720 if (p) 3721 ret = sprintf(page, "%s\n", p->name); 3722 else if (mddev->clevel[0]) 3723 ret = sprintf(page, "%s\n", mddev->clevel); 3724 else if (mddev->level != LEVEL_NONE) 3725 ret = sprintf(page, "%d\n", mddev->level); 3726 else 3727 ret = 0; 3728 spin_unlock(&mddev->lock); 3729 return ret; 3730 } 3731 3732 static ssize_t 3733 level_store(struct mddev *mddev, const char *buf, size_t len) 3734 { 3735 char clevel[16]; 3736 ssize_t rv; 3737 size_t slen = len; 3738 struct md_personality *pers, *oldpers; 3739 long level; 3740 void *priv, *oldpriv; 3741 struct md_rdev *rdev; 3742 3743 if (slen == 0 || slen >= sizeof(clevel)) 3744 return -EINVAL; 3745 3746 rv = mddev_lock(mddev); 3747 if (rv) 3748 return rv; 3749 3750 if (mddev->pers == NULL) { 3751 strncpy(mddev->clevel, buf, slen); 3752 if (mddev->clevel[slen-1] == '\n') 3753 slen--; 3754 mddev->clevel[slen] = 0; 3755 mddev->level = LEVEL_NONE; 3756 rv = len; 3757 goto out_unlock; 3758 } 3759 rv = -EROFS; 3760 if (mddev->ro) 3761 goto out_unlock; 3762 3763 /* request to change the personality. Need to ensure: 3764 * - array is not engaged in resync/recovery/reshape 3765 * - old personality can be suspended 3766 * - new personality will access other array. 3767 */ 3768 3769 rv = -EBUSY; 3770 if (mddev->sync_thread || 3771 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3772 mddev->reshape_position != MaxSector || 3773 mddev->sysfs_active) 3774 goto out_unlock; 3775 3776 rv = -EINVAL; 3777 if (!mddev->pers->quiesce) { 3778 pr_warn("md: %s: %s does not support online personality change\n", 3779 mdname(mddev), mddev->pers->name); 3780 goto out_unlock; 3781 } 3782 3783 /* Now find the new personality */ 3784 strncpy(clevel, buf, slen); 3785 if (clevel[slen-1] == '\n') 3786 slen--; 3787 clevel[slen] = 0; 3788 if (kstrtol(clevel, 10, &level)) 3789 level = LEVEL_NONE; 3790 3791 if (request_module("md-%s", clevel) != 0) 3792 request_module("md-level-%s", clevel); 3793 spin_lock(&pers_lock); 3794 pers = find_pers(level, clevel); 3795 if (!pers || !try_module_get(pers->owner)) { 3796 spin_unlock(&pers_lock); 3797 pr_warn("md: personality %s not loaded\n", clevel); 3798 rv = -EINVAL; 3799 goto out_unlock; 3800 } 3801 spin_unlock(&pers_lock); 3802 3803 if (pers == mddev->pers) { 3804 /* Nothing to do! */ 3805 module_put(pers->owner); 3806 rv = len; 3807 goto out_unlock; 3808 } 3809 if (!pers->takeover) { 3810 module_put(pers->owner); 3811 pr_warn("md: %s: %s does not support personality takeover\n", 3812 mdname(mddev), clevel); 3813 rv = -EINVAL; 3814 goto out_unlock; 3815 } 3816 3817 rdev_for_each(rdev, mddev) 3818 rdev->new_raid_disk = rdev->raid_disk; 3819 3820 /* ->takeover must set new_* and/or delta_disks 3821 * if it succeeds, and may set them when it fails. 3822 */ 3823 priv = pers->takeover(mddev); 3824 if (IS_ERR(priv)) { 3825 mddev->new_level = mddev->level; 3826 mddev->new_layout = mddev->layout; 3827 mddev->new_chunk_sectors = mddev->chunk_sectors; 3828 mddev->raid_disks -= mddev->delta_disks; 3829 mddev->delta_disks = 0; 3830 mddev->reshape_backwards = 0; 3831 module_put(pers->owner); 3832 pr_warn("md: %s: %s would not accept array\n", 3833 mdname(mddev), clevel); 3834 rv = PTR_ERR(priv); 3835 goto out_unlock; 3836 } 3837 3838 /* Looks like we have a winner */ 3839 mddev_suspend(mddev); 3840 mddev_detach(mddev); 3841 3842 spin_lock(&mddev->lock); 3843 oldpers = mddev->pers; 3844 oldpriv = mddev->private; 3845 mddev->pers = pers; 3846 mddev->private = priv; 3847 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 3848 mddev->level = mddev->new_level; 3849 mddev->layout = mddev->new_layout; 3850 mddev->chunk_sectors = mddev->new_chunk_sectors; 3851 mddev->delta_disks = 0; 3852 mddev->reshape_backwards = 0; 3853 mddev->degraded = 0; 3854 spin_unlock(&mddev->lock); 3855 3856 if (oldpers->sync_request == NULL && 3857 mddev->external) { 3858 /* We are converting from a no-redundancy array 3859 * to a redundancy array and metadata is managed 3860 * externally so we need to be sure that writes 3861 * won't block due to a need to transition 3862 * clean->dirty 3863 * until external management is started. 3864 */ 3865 mddev->in_sync = 0; 3866 mddev->safemode_delay = 0; 3867 mddev->safemode = 0; 3868 } 3869 3870 oldpers->free(mddev, oldpriv); 3871 3872 if (oldpers->sync_request == NULL && 3873 pers->sync_request != NULL) { 3874 /* need to add the md_redundancy_group */ 3875 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 3876 pr_warn("md: cannot register extra attributes for %s\n", 3877 mdname(mddev)); 3878 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); 3879 } 3880 if (oldpers->sync_request != NULL && 3881 pers->sync_request == NULL) { 3882 /* need to remove the md_redundancy_group */ 3883 if (mddev->to_remove == NULL) 3884 mddev->to_remove = &md_redundancy_group; 3885 } 3886 3887 module_put(oldpers->owner); 3888 3889 rdev_for_each(rdev, mddev) { 3890 if (rdev->raid_disk < 0) 3891 continue; 3892 if (rdev->new_raid_disk >= mddev->raid_disks) 3893 rdev->new_raid_disk = -1; 3894 if (rdev->new_raid_disk == rdev->raid_disk) 3895 continue; 3896 sysfs_unlink_rdev(mddev, rdev); 3897 } 3898 rdev_for_each(rdev, mddev) { 3899 if (rdev->raid_disk < 0) 3900 continue; 3901 if (rdev->new_raid_disk == rdev->raid_disk) 3902 continue; 3903 rdev->raid_disk = rdev->new_raid_disk; 3904 if (rdev->raid_disk < 0) 3905 clear_bit(In_sync, &rdev->flags); 3906 else { 3907 if (sysfs_link_rdev(mddev, rdev)) 3908 pr_warn("md: cannot register rd%d for %s after level change\n", 3909 rdev->raid_disk, mdname(mddev)); 3910 } 3911 } 3912 3913 if (pers->sync_request == NULL) { 3914 /* this is now an array without redundancy, so 3915 * it must always be in_sync 3916 */ 3917 mddev->in_sync = 1; 3918 del_timer_sync(&mddev->safemode_timer); 3919 } 3920 blk_set_stacking_limits(&mddev->queue->limits); 3921 pers->run(mddev); 3922 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 3923 mddev_resume(mddev); 3924 if (!mddev->thread) 3925 md_update_sb(mddev, 1); 3926 sysfs_notify(&mddev->kobj, NULL, "level"); 3927 md_new_event(mddev); 3928 rv = len; 3929 out_unlock: 3930 mddev_unlock(mddev); 3931 return rv; 3932 } 3933 3934 static struct md_sysfs_entry md_level = 3935 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 3936 3937 static ssize_t 3938 layout_show(struct mddev *mddev, char *page) 3939 { 3940 /* just a number, not meaningful for all levels */ 3941 if (mddev->reshape_position != MaxSector && 3942 mddev->layout != mddev->new_layout) 3943 return sprintf(page, "%d (%d)\n", 3944 mddev->new_layout, mddev->layout); 3945 return sprintf(page, "%d\n", mddev->layout); 3946 } 3947 3948 static ssize_t 3949 layout_store(struct mddev *mddev, const char *buf, size_t len) 3950 { 3951 unsigned int n; 3952 int err; 3953 3954 err = kstrtouint(buf, 10, &n); 3955 if (err < 0) 3956 return err; 3957 err = mddev_lock(mddev); 3958 if (err) 3959 return err; 3960 3961 if (mddev->pers) { 3962 if (mddev->pers->check_reshape == NULL) 3963 err = -EBUSY; 3964 else if (mddev->ro) 3965 err = -EROFS; 3966 else { 3967 mddev->new_layout = n; 3968 err = mddev->pers->check_reshape(mddev); 3969 if (err) 3970 mddev->new_layout = mddev->layout; 3971 } 3972 } else { 3973 mddev->new_layout = n; 3974 if (mddev->reshape_position == MaxSector) 3975 mddev->layout = n; 3976 } 3977 mddev_unlock(mddev); 3978 return err ?: len; 3979 } 3980 static struct md_sysfs_entry md_layout = 3981 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 3982 3983 static ssize_t 3984 raid_disks_show(struct mddev *mddev, char *page) 3985 { 3986 if (mddev->raid_disks == 0) 3987 return 0; 3988 if (mddev->reshape_position != MaxSector && 3989 mddev->delta_disks != 0) 3990 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 3991 mddev->raid_disks - mddev->delta_disks); 3992 return sprintf(page, "%d\n", mddev->raid_disks); 3993 } 3994 3995 static int update_raid_disks(struct mddev *mddev, int raid_disks); 3996 3997 static ssize_t 3998 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) 3999 { 4000 unsigned int n; 4001 int err; 4002 4003 err = kstrtouint(buf, 10, &n); 4004 if (err < 0) 4005 return err; 4006 4007 err = mddev_lock(mddev); 4008 if (err) 4009 return err; 4010 if (mddev->pers) 4011 err = update_raid_disks(mddev, n); 4012 else if (mddev->reshape_position != MaxSector) { 4013 struct md_rdev *rdev; 4014 int olddisks = mddev->raid_disks - mddev->delta_disks; 4015 4016 err = -EINVAL; 4017 rdev_for_each(rdev, mddev) { 4018 if (olddisks < n && 4019 rdev->data_offset < rdev->new_data_offset) 4020 goto out_unlock; 4021 if (olddisks > n && 4022 rdev->data_offset > rdev->new_data_offset) 4023 goto out_unlock; 4024 } 4025 err = 0; 4026 mddev->delta_disks = n - olddisks; 4027 mddev->raid_disks = n; 4028 mddev->reshape_backwards = (mddev->delta_disks < 0); 4029 } else 4030 mddev->raid_disks = n; 4031 out_unlock: 4032 mddev_unlock(mddev); 4033 return err ? err : len; 4034 } 4035 static struct md_sysfs_entry md_raid_disks = 4036 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 4037 4038 static ssize_t 4039 chunk_size_show(struct mddev *mddev, char *page) 4040 { 4041 if (mddev->reshape_position != MaxSector && 4042 mddev->chunk_sectors != mddev->new_chunk_sectors) 4043 return sprintf(page, "%d (%d)\n", 4044 mddev->new_chunk_sectors << 9, 4045 mddev->chunk_sectors << 9); 4046 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 4047 } 4048 4049 static ssize_t 4050 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) 4051 { 4052 unsigned long n; 4053 int err; 4054 4055 err = kstrtoul(buf, 10, &n); 4056 if (err < 0) 4057 return err; 4058 4059 err = mddev_lock(mddev); 4060 if (err) 4061 return err; 4062 if (mddev->pers) { 4063 if (mddev->pers->check_reshape == NULL) 4064 err = -EBUSY; 4065 else if (mddev->ro) 4066 err = -EROFS; 4067 else { 4068 mddev->new_chunk_sectors = n >> 9; 4069 err = mddev->pers->check_reshape(mddev); 4070 if (err) 4071 mddev->new_chunk_sectors = mddev->chunk_sectors; 4072 } 4073 } else { 4074 mddev->new_chunk_sectors = n >> 9; 4075 if (mddev->reshape_position == MaxSector) 4076 mddev->chunk_sectors = n >> 9; 4077 } 4078 mddev_unlock(mddev); 4079 return err ?: len; 4080 } 4081 static struct md_sysfs_entry md_chunk_size = 4082 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 4083 4084 static ssize_t 4085 resync_start_show(struct mddev *mddev, char *page) 4086 { 4087 if (mddev->recovery_cp == MaxSector) 4088 return sprintf(page, "none\n"); 4089 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 4090 } 4091 4092 static ssize_t 4093 resync_start_store(struct mddev *mddev, const char *buf, size_t len) 4094 { 4095 unsigned long long n; 4096 int err; 4097 4098 if (cmd_match(buf, "none")) 4099 n = MaxSector; 4100 else { 4101 err = kstrtoull(buf, 10, &n); 4102 if (err < 0) 4103 return err; 4104 if (n != (sector_t)n) 4105 return -EINVAL; 4106 } 4107 4108 err = mddev_lock(mddev); 4109 if (err) 4110 return err; 4111 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 4112 err = -EBUSY; 4113 4114 if (!err) { 4115 mddev->recovery_cp = n; 4116 if (mddev->pers) 4117 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 4118 } 4119 mddev_unlock(mddev); 4120 return err ?: len; 4121 } 4122 static struct md_sysfs_entry md_resync_start = 4123 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR, 4124 resync_start_show, resync_start_store); 4125 4126 /* 4127 * The array state can be: 4128 * 4129 * clear 4130 * No devices, no size, no level 4131 * Equivalent to STOP_ARRAY ioctl 4132 * inactive 4133 * May have some settings, but array is not active 4134 * all IO results in error 4135 * When written, doesn't tear down array, but just stops it 4136 * suspended (not supported yet) 4137 * All IO requests will block. The array can be reconfigured. 4138 * Writing this, if accepted, will block until array is quiescent 4139 * readonly 4140 * no resync can happen. no superblocks get written. 4141 * write requests fail 4142 * read-auto 4143 * like readonly, but behaves like 'clean' on a write request. 4144 * 4145 * clean - no pending writes, but otherwise active. 4146 * When written to inactive array, starts without resync 4147 * If a write request arrives then 4148 * if metadata is known, mark 'dirty' and switch to 'active'. 4149 * if not known, block and switch to write-pending 4150 * If written to an active array that has pending writes, then fails. 4151 * active 4152 * fully active: IO and resync can be happening. 4153 * When written to inactive array, starts with resync 4154 * 4155 * write-pending 4156 * clean, but writes are blocked waiting for 'active' to be written. 4157 * 4158 * active-idle 4159 * like active, but no writes have been seen for a while (100msec). 4160 * 4161 */ 4162 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 4163 write_pending, active_idle, bad_word}; 4164 static char *array_states[] = { 4165 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 4166 "write-pending", "active-idle", NULL }; 4167 4168 static int match_word(const char *word, char **list) 4169 { 4170 int n; 4171 for (n=0; list[n]; n++) 4172 if (cmd_match(word, list[n])) 4173 break; 4174 return n; 4175 } 4176 4177 static ssize_t 4178 array_state_show(struct mddev *mddev, char *page) 4179 { 4180 enum array_state st = inactive; 4181 4182 if (mddev->pers) 4183 switch(mddev->ro) { 4184 case 1: 4185 st = readonly; 4186 break; 4187 case 2: 4188 st = read_auto; 4189 break; 4190 case 0: 4191 spin_lock(&mddev->lock); 4192 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 4193 st = write_pending; 4194 else if (mddev->in_sync) 4195 st = clean; 4196 else if (mddev->safemode) 4197 st = active_idle; 4198 else 4199 st = active; 4200 spin_unlock(&mddev->lock); 4201 } 4202 else { 4203 if (list_empty(&mddev->disks) && 4204 mddev->raid_disks == 0 && 4205 mddev->dev_sectors == 0) 4206 st = clear; 4207 else 4208 st = inactive; 4209 } 4210 return sprintf(page, "%s\n", array_states[st]); 4211 } 4212 4213 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev); 4214 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev); 4215 static int do_md_run(struct mddev *mddev); 4216 static int restart_array(struct mddev *mddev); 4217 4218 static ssize_t 4219 array_state_store(struct mddev *mddev, const char *buf, size_t len) 4220 { 4221 int err = 0; 4222 enum array_state st = match_word(buf, array_states); 4223 4224 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) { 4225 /* don't take reconfig_mutex when toggling between 4226 * clean and active 4227 */ 4228 spin_lock(&mddev->lock); 4229 if (st == active) { 4230 restart_array(mddev); 4231 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 4232 md_wakeup_thread(mddev->thread); 4233 wake_up(&mddev->sb_wait); 4234 } else /* st == clean */ { 4235 restart_array(mddev); 4236 if (!set_in_sync(mddev)) 4237 err = -EBUSY; 4238 } 4239 if (!err) 4240 sysfs_notify_dirent_safe(mddev->sysfs_state); 4241 spin_unlock(&mddev->lock); 4242 return err ?: len; 4243 } 4244 err = mddev_lock(mddev); 4245 if (err) 4246 return err; 4247 err = -EINVAL; 4248 switch(st) { 4249 case bad_word: 4250 break; 4251 case clear: 4252 /* stopping an active array */ 4253 err = do_md_stop(mddev, 0, NULL); 4254 break; 4255 case inactive: 4256 /* stopping an active array */ 4257 if (mddev->pers) 4258 err = do_md_stop(mddev, 2, NULL); 4259 else 4260 err = 0; /* already inactive */ 4261 break; 4262 case suspended: 4263 break; /* not supported yet */ 4264 case readonly: 4265 if (mddev->pers) 4266 err = md_set_readonly(mddev, NULL); 4267 else { 4268 mddev->ro = 1; 4269 set_disk_ro(mddev->gendisk, 1); 4270 err = do_md_run(mddev); 4271 } 4272 break; 4273 case read_auto: 4274 if (mddev->pers) { 4275 if (mddev->ro == 0) 4276 err = md_set_readonly(mddev, NULL); 4277 else if (mddev->ro == 1) 4278 err = restart_array(mddev); 4279 if (err == 0) { 4280 mddev->ro = 2; 4281 set_disk_ro(mddev->gendisk, 0); 4282 } 4283 } else { 4284 mddev->ro = 2; 4285 err = do_md_run(mddev); 4286 } 4287 break; 4288 case clean: 4289 if (mddev->pers) { 4290 err = restart_array(mddev); 4291 if (err) 4292 break; 4293 spin_lock(&mddev->lock); 4294 if (!set_in_sync(mddev)) 4295 err = -EBUSY; 4296 spin_unlock(&mddev->lock); 4297 } else 4298 err = -EINVAL; 4299 break; 4300 case active: 4301 if (mddev->pers) { 4302 err = restart_array(mddev); 4303 if (err) 4304 break; 4305 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 4306 wake_up(&mddev->sb_wait); 4307 err = 0; 4308 } else { 4309 mddev->ro = 0; 4310 set_disk_ro(mddev->gendisk, 0); 4311 err = do_md_run(mddev); 4312 } 4313 break; 4314 case write_pending: 4315 case active_idle: 4316 /* these cannot be set */ 4317 break; 4318 } 4319 4320 if (!err) { 4321 if (mddev->hold_active == UNTIL_IOCTL) 4322 mddev->hold_active = 0; 4323 sysfs_notify_dirent_safe(mddev->sysfs_state); 4324 } 4325 mddev_unlock(mddev); 4326 return err ?: len; 4327 } 4328 static struct md_sysfs_entry md_array_state = 4329 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 4330 4331 static ssize_t 4332 max_corrected_read_errors_show(struct mddev *mddev, char *page) { 4333 return sprintf(page, "%d\n", 4334 atomic_read(&mddev->max_corr_read_errors)); 4335 } 4336 4337 static ssize_t 4338 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) 4339 { 4340 unsigned int n; 4341 int rv; 4342 4343 rv = kstrtouint(buf, 10, &n); 4344 if (rv < 0) 4345 return rv; 4346 atomic_set(&mddev->max_corr_read_errors, n); 4347 return len; 4348 } 4349 4350 static struct md_sysfs_entry max_corr_read_errors = 4351 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 4352 max_corrected_read_errors_store); 4353 4354 static ssize_t 4355 null_show(struct mddev *mddev, char *page) 4356 { 4357 return -EINVAL; 4358 } 4359 4360 static ssize_t 4361 new_dev_store(struct mddev *mddev, const char *buf, size_t len) 4362 { 4363 /* buf must be %d:%d\n? giving major and minor numbers */ 4364 /* The new device is added to the array. 4365 * If the array has a persistent superblock, we read the 4366 * superblock to initialise info and check validity. 4367 * Otherwise, only checking done is that in bind_rdev_to_array, 4368 * which mainly checks size. 4369 */ 4370 char *e; 4371 int major = simple_strtoul(buf, &e, 10); 4372 int minor; 4373 dev_t dev; 4374 struct md_rdev *rdev; 4375 int err; 4376 4377 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 4378 return -EINVAL; 4379 minor = simple_strtoul(e+1, &e, 10); 4380 if (*e && *e != '\n') 4381 return -EINVAL; 4382 dev = MKDEV(major, minor); 4383 if (major != MAJOR(dev) || 4384 minor != MINOR(dev)) 4385 return -EOVERFLOW; 4386 4387 flush_workqueue(md_misc_wq); 4388 4389 err = mddev_lock(mddev); 4390 if (err) 4391 return err; 4392 if (mddev->persistent) { 4393 rdev = md_import_device(dev, mddev->major_version, 4394 mddev->minor_version); 4395 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 4396 struct md_rdev *rdev0 4397 = list_entry(mddev->disks.next, 4398 struct md_rdev, same_set); 4399 err = super_types[mddev->major_version] 4400 .load_super(rdev, rdev0, mddev->minor_version); 4401 if (err < 0) 4402 goto out; 4403 } 4404 } else if (mddev->external) 4405 rdev = md_import_device(dev, -2, -1); 4406 else 4407 rdev = md_import_device(dev, -1, -1); 4408 4409 if (IS_ERR(rdev)) { 4410 mddev_unlock(mddev); 4411 return PTR_ERR(rdev); 4412 } 4413 err = bind_rdev_to_array(rdev, mddev); 4414 out: 4415 if (err) 4416 export_rdev(rdev); 4417 mddev_unlock(mddev); 4418 if (!err) 4419 md_new_event(mddev); 4420 return err ? err : len; 4421 } 4422 4423 static struct md_sysfs_entry md_new_device = 4424 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 4425 4426 static ssize_t 4427 bitmap_store(struct mddev *mddev, const char *buf, size_t len) 4428 { 4429 char *end; 4430 unsigned long chunk, end_chunk; 4431 int err; 4432 4433 err = mddev_lock(mddev); 4434 if (err) 4435 return err; 4436 if (!mddev->bitmap) 4437 goto out; 4438 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 4439 while (*buf) { 4440 chunk = end_chunk = simple_strtoul(buf, &end, 0); 4441 if (buf == end) break; 4442 if (*end == '-') { /* range */ 4443 buf = end + 1; 4444 end_chunk = simple_strtoul(buf, &end, 0); 4445 if (buf == end) break; 4446 } 4447 if (*end && !isspace(*end)) break; 4448 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 4449 buf = skip_spaces(end); 4450 } 4451 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 4452 out: 4453 mddev_unlock(mddev); 4454 return len; 4455 } 4456 4457 static struct md_sysfs_entry md_bitmap = 4458 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 4459 4460 static ssize_t 4461 size_show(struct mddev *mddev, char *page) 4462 { 4463 return sprintf(page, "%llu\n", 4464 (unsigned long long)mddev->dev_sectors / 2); 4465 } 4466 4467 static int update_size(struct mddev *mddev, sector_t num_sectors); 4468 4469 static ssize_t 4470 size_store(struct mddev *mddev, const char *buf, size_t len) 4471 { 4472 /* If array is inactive, we can reduce the component size, but 4473 * not increase it (except from 0). 4474 * If array is active, we can try an on-line resize 4475 */ 4476 sector_t sectors; 4477 int err = strict_blocks_to_sectors(buf, §ors); 4478 4479 if (err < 0) 4480 return err; 4481 err = mddev_lock(mddev); 4482 if (err) 4483 return err; 4484 if (mddev->pers) { 4485 err = update_size(mddev, sectors); 4486 if (err == 0) 4487 md_update_sb(mddev, 1); 4488 } else { 4489 if (mddev->dev_sectors == 0 || 4490 mddev->dev_sectors > sectors) 4491 mddev->dev_sectors = sectors; 4492 else 4493 err = -ENOSPC; 4494 } 4495 mddev_unlock(mddev); 4496 return err ? err : len; 4497 } 4498 4499 static struct md_sysfs_entry md_size = 4500 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 4501 4502 /* Metadata version. 4503 * This is one of 4504 * 'none' for arrays with no metadata (good luck...) 4505 * 'external' for arrays with externally managed metadata, 4506 * or N.M for internally known formats 4507 */ 4508 static ssize_t 4509 metadata_show(struct mddev *mddev, char *page) 4510 { 4511 if (mddev->persistent) 4512 return sprintf(page, "%d.%d\n", 4513 mddev->major_version, mddev->minor_version); 4514 else if (mddev->external) 4515 return sprintf(page, "external:%s\n", mddev->metadata_type); 4516 else 4517 return sprintf(page, "none\n"); 4518 } 4519 4520 static ssize_t 4521 metadata_store(struct mddev *mddev, const char *buf, size_t len) 4522 { 4523 int major, minor; 4524 char *e; 4525 int err; 4526 /* Changing the details of 'external' metadata is 4527 * always permitted. Otherwise there must be 4528 * no devices attached to the array. 4529 */ 4530 4531 err = mddev_lock(mddev); 4532 if (err) 4533 return err; 4534 err = -EBUSY; 4535 if (mddev->external && strncmp(buf, "external:", 9) == 0) 4536 ; 4537 else if (!list_empty(&mddev->disks)) 4538 goto out_unlock; 4539 4540 err = 0; 4541 if (cmd_match(buf, "none")) { 4542 mddev->persistent = 0; 4543 mddev->external = 0; 4544 mddev->major_version = 0; 4545 mddev->minor_version = 90; 4546 goto out_unlock; 4547 } 4548 if (strncmp(buf, "external:", 9) == 0) { 4549 size_t namelen = len-9; 4550 if (namelen >= sizeof(mddev->metadata_type)) 4551 namelen = sizeof(mddev->metadata_type)-1; 4552 strncpy(mddev->metadata_type, buf+9, namelen); 4553 mddev->metadata_type[namelen] = 0; 4554 if (namelen && mddev->metadata_type[namelen-1] == '\n') 4555 mddev->metadata_type[--namelen] = 0; 4556 mddev->persistent = 0; 4557 mddev->external = 1; 4558 mddev->major_version = 0; 4559 mddev->minor_version = 90; 4560 goto out_unlock; 4561 } 4562 major = simple_strtoul(buf, &e, 10); 4563 err = -EINVAL; 4564 if (e==buf || *e != '.') 4565 goto out_unlock; 4566 buf = e+1; 4567 minor = simple_strtoul(buf, &e, 10); 4568 if (e==buf || (*e && *e != '\n') ) 4569 goto out_unlock; 4570 err = -ENOENT; 4571 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 4572 goto out_unlock; 4573 mddev->major_version = major; 4574 mddev->minor_version = minor; 4575 mddev->persistent = 1; 4576 mddev->external = 0; 4577 err = 0; 4578 out_unlock: 4579 mddev_unlock(mddev); 4580 return err ?: len; 4581 } 4582 4583 static struct md_sysfs_entry md_metadata = 4584 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4585 4586 static ssize_t 4587 action_show(struct mddev *mddev, char *page) 4588 { 4589 char *type = "idle"; 4590 unsigned long recovery = mddev->recovery; 4591 if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 4592 type = "frozen"; 4593 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) || 4594 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) { 4595 if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 4596 type = "reshape"; 4597 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 4598 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) 4599 type = "resync"; 4600 else if (test_bit(MD_RECOVERY_CHECK, &recovery)) 4601 type = "check"; 4602 else 4603 type = "repair"; 4604 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 4605 type = "recover"; 4606 else if (mddev->reshape_position != MaxSector) 4607 type = "reshape"; 4608 } 4609 return sprintf(page, "%s\n", type); 4610 } 4611 4612 static ssize_t 4613 action_store(struct mddev *mddev, const char *page, size_t len) 4614 { 4615 if (!mddev->pers || !mddev->pers->sync_request) 4616 return -EINVAL; 4617 4618 4619 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4620 if (cmd_match(page, "frozen")) 4621 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4622 else 4623 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4624 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 4625 mddev_lock(mddev) == 0) { 4626 flush_workqueue(md_misc_wq); 4627 if (mddev->sync_thread) { 4628 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4629 md_reap_sync_thread(mddev); 4630 } 4631 mddev_unlock(mddev); 4632 } 4633 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4634 return -EBUSY; 4635 else if (cmd_match(page, "resync")) 4636 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4637 else if (cmd_match(page, "recover")) { 4638 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4639 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4640 } else if (cmd_match(page, "reshape")) { 4641 int err; 4642 if (mddev->pers->start_reshape == NULL) 4643 return -EINVAL; 4644 err = mddev_lock(mddev); 4645 if (!err) { 4646 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4647 err = -EBUSY; 4648 else { 4649 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4650 err = mddev->pers->start_reshape(mddev); 4651 } 4652 mddev_unlock(mddev); 4653 } 4654 if (err) 4655 return err; 4656 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4657 } else { 4658 if (cmd_match(page, "check")) 4659 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4660 else if (!cmd_match(page, "repair")) 4661 return -EINVAL; 4662 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4663 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4664 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4665 } 4666 if (mddev->ro == 2) { 4667 /* A write to sync_action is enough to justify 4668 * canceling read-auto mode 4669 */ 4670 mddev->ro = 0; 4671 md_wakeup_thread(mddev->sync_thread); 4672 } 4673 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4674 md_wakeup_thread(mddev->thread); 4675 sysfs_notify_dirent_safe(mddev->sysfs_action); 4676 return len; 4677 } 4678 4679 static struct md_sysfs_entry md_scan_mode = 4680 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 4681 4682 static ssize_t 4683 last_sync_action_show(struct mddev *mddev, char *page) 4684 { 4685 return sprintf(page, "%s\n", mddev->last_sync_action); 4686 } 4687 4688 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action); 4689 4690 static ssize_t 4691 mismatch_cnt_show(struct mddev *mddev, char *page) 4692 { 4693 return sprintf(page, "%llu\n", 4694 (unsigned long long) 4695 atomic64_read(&mddev->resync_mismatches)); 4696 } 4697 4698 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 4699 4700 static ssize_t 4701 sync_min_show(struct mddev *mddev, char *page) 4702 { 4703 return sprintf(page, "%d (%s)\n", speed_min(mddev), 4704 mddev->sync_speed_min ? "local": "system"); 4705 } 4706 4707 static ssize_t 4708 sync_min_store(struct mddev *mddev, const char *buf, size_t len) 4709 { 4710 unsigned int min; 4711 int rv; 4712 4713 if (strncmp(buf, "system", 6)==0) { 4714 min = 0; 4715 } else { 4716 rv = kstrtouint(buf, 10, &min); 4717 if (rv < 0) 4718 return rv; 4719 if (min == 0) 4720 return -EINVAL; 4721 } 4722 mddev->sync_speed_min = min; 4723 return len; 4724 } 4725 4726 static struct md_sysfs_entry md_sync_min = 4727 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 4728 4729 static ssize_t 4730 sync_max_show(struct mddev *mddev, char *page) 4731 { 4732 return sprintf(page, "%d (%s)\n", speed_max(mddev), 4733 mddev->sync_speed_max ? "local": "system"); 4734 } 4735 4736 static ssize_t 4737 sync_max_store(struct mddev *mddev, const char *buf, size_t len) 4738 { 4739 unsigned int max; 4740 int rv; 4741 4742 if (strncmp(buf, "system", 6)==0) { 4743 max = 0; 4744 } else { 4745 rv = kstrtouint(buf, 10, &max); 4746 if (rv < 0) 4747 return rv; 4748 if (max == 0) 4749 return -EINVAL; 4750 } 4751 mddev->sync_speed_max = max; 4752 return len; 4753 } 4754 4755 static struct md_sysfs_entry md_sync_max = 4756 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 4757 4758 static ssize_t 4759 degraded_show(struct mddev *mddev, char *page) 4760 { 4761 return sprintf(page, "%d\n", mddev->degraded); 4762 } 4763 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 4764 4765 static ssize_t 4766 sync_force_parallel_show(struct mddev *mddev, char *page) 4767 { 4768 return sprintf(page, "%d\n", mddev->parallel_resync); 4769 } 4770 4771 static ssize_t 4772 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) 4773 { 4774 long n; 4775 4776 if (kstrtol(buf, 10, &n)) 4777 return -EINVAL; 4778 4779 if (n != 0 && n != 1) 4780 return -EINVAL; 4781 4782 mddev->parallel_resync = n; 4783 4784 if (mddev->sync_thread) 4785 wake_up(&resync_wait); 4786 4787 return len; 4788 } 4789 4790 /* force parallel resync, even with shared block devices */ 4791 static struct md_sysfs_entry md_sync_force_parallel = 4792 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 4793 sync_force_parallel_show, sync_force_parallel_store); 4794 4795 static ssize_t 4796 sync_speed_show(struct mddev *mddev, char *page) 4797 { 4798 unsigned long resync, dt, db; 4799 if (mddev->curr_resync == 0) 4800 return sprintf(page, "none\n"); 4801 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 4802 dt = (jiffies - mddev->resync_mark) / HZ; 4803 if (!dt) dt++; 4804 db = resync - mddev->resync_mark_cnt; 4805 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 4806 } 4807 4808 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 4809 4810 static ssize_t 4811 sync_completed_show(struct mddev *mddev, char *page) 4812 { 4813 unsigned long long max_sectors, resync; 4814 4815 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4816 return sprintf(page, "none\n"); 4817 4818 if (mddev->curr_resync == 1 || 4819 mddev->curr_resync == 2) 4820 return sprintf(page, "delayed\n"); 4821 4822 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 4823 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4824 max_sectors = mddev->resync_max_sectors; 4825 else 4826 max_sectors = mddev->dev_sectors; 4827 4828 resync = mddev->curr_resync_completed; 4829 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 4830 } 4831 4832 static struct md_sysfs_entry md_sync_completed = 4833 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL); 4834 4835 static ssize_t 4836 min_sync_show(struct mddev *mddev, char *page) 4837 { 4838 return sprintf(page, "%llu\n", 4839 (unsigned long long)mddev->resync_min); 4840 } 4841 static ssize_t 4842 min_sync_store(struct mddev *mddev, const char *buf, size_t len) 4843 { 4844 unsigned long long min; 4845 int err; 4846 4847 if (kstrtoull(buf, 10, &min)) 4848 return -EINVAL; 4849 4850 spin_lock(&mddev->lock); 4851 err = -EINVAL; 4852 if (min > mddev->resync_max) 4853 goto out_unlock; 4854 4855 err = -EBUSY; 4856 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4857 goto out_unlock; 4858 4859 /* Round down to multiple of 4K for safety */ 4860 mddev->resync_min = round_down(min, 8); 4861 err = 0; 4862 4863 out_unlock: 4864 spin_unlock(&mddev->lock); 4865 return err ?: len; 4866 } 4867 4868 static struct md_sysfs_entry md_min_sync = 4869 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 4870 4871 static ssize_t 4872 max_sync_show(struct mddev *mddev, char *page) 4873 { 4874 if (mddev->resync_max == MaxSector) 4875 return sprintf(page, "max\n"); 4876 else 4877 return sprintf(page, "%llu\n", 4878 (unsigned long long)mddev->resync_max); 4879 } 4880 static ssize_t 4881 max_sync_store(struct mddev *mddev, const char *buf, size_t len) 4882 { 4883 int err; 4884 spin_lock(&mddev->lock); 4885 if (strncmp(buf, "max", 3) == 0) 4886 mddev->resync_max = MaxSector; 4887 else { 4888 unsigned long long max; 4889 int chunk; 4890 4891 err = -EINVAL; 4892 if (kstrtoull(buf, 10, &max)) 4893 goto out_unlock; 4894 if (max < mddev->resync_min) 4895 goto out_unlock; 4896 4897 err = -EBUSY; 4898 if (max < mddev->resync_max && 4899 mddev->ro == 0 && 4900 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4901 goto out_unlock; 4902 4903 /* Must be a multiple of chunk_size */ 4904 chunk = mddev->chunk_sectors; 4905 if (chunk) { 4906 sector_t temp = max; 4907 4908 err = -EINVAL; 4909 if (sector_div(temp, chunk)) 4910 goto out_unlock; 4911 } 4912 mddev->resync_max = max; 4913 } 4914 wake_up(&mddev->recovery_wait); 4915 err = 0; 4916 out_unlock: 4917 spin_unlock(&mddev->lock); 4918 return err ?: len; 4919 } 4920 4921 static struct md_sysfs_entry md_max_sync = 4922 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 4923 4924 static ssize_t 4925 suspend_lo_show(struct mddev *mddev, char *page) 4926 { 4927 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 4928 } 4929 4930 static ssize_t 4931 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) 4932 { 4933 unsigned long long new; 4934 int err; 4935 4936 err = kstrtoull(buf, 10, &new); 4937 if (err < 0) 4938 return err; 4939 if (new != (sector_t)new) 4940 return -EINVAL; 4941 4942 err = mddev_lock(mddev); 4943 if (err) 4944 return err; 4945 err = -EINVAL; 4946 if (mddev->pers == NULL || 4947 mddev->pers->quiesce == NULL) 4948 goto unlock; 4949 mddev_suspend(mddev); 4950 mddev->suspend_lo = new; 4951 mddev_resume(mddev); 4952 4953 err = 0; 4954 unlock: 4955 mddev_unlock(mddev); 4956 return err ?: len; 4957 } 4958 static struct md_sysfs_entry md_suspend_lo = 4959 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 4960 4961 static ssize_t 4962 suspend_hi_show(struct mddev *mddev, char *page) 4963 { 4964 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 4965 } 4966 4967 static ssize_t 4968 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) 4969 { 4970 unsigned long long new; 4971 int err; 4972 4973 err = kstrtoull(buf, 10, &new); 4974 if (err < 0) 4975 return err; 4976 if (new != (sector_t)new) 4977 return -EINVAL; 4978 4979 err = mddev_lock(mddev); 4980 if (err) 4981 return err; 4982 err = -EINVAL; 4983 if (mddev->pers == NULL) 4984 goto unlock; 4985 4986 mddev_suspend(mddev); 4987 mddev->suspend_hi = new; 4988 mddev_resume(mddev); 4989 4990 err = 0; 4991 unlock: 4992 mddev_unlock(mddev); 4993 return err ?: len; 4994 } 4995 static struct md_sysfs_entry md_suspend_hi = 4996 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 4997 4998 static ssize_t 4999 reshape_position_show(struct mddev *mddev, char *page) 5000 { 5001 if (mddev->reshape_position != MaxSector) 5002 return sprintf(page, "%llu\n", 5003 (unsigned long long)mddev->reshape_position); 5004 strcpy(page, "none\n"); 5005 return 5; 5006 } 5007 5008 static ssize_t 5009 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) 5010 { 5011 struct md_rdev *rdev; 5012 unsigned long long new; 5013 int err; 5014 5015 err = kstrtoull(buf, 10, &new); 5016 if (err < 0) 5017 return err; 5018 if (new != (sector_t)new) 5019 return -EINVAL; 5020 err = mddev_lock(mddev); 5021 if (err) 5022 return err; 5023 err = -EBUSY; 5024 if (mddev->pers) 5025 goto unlock; 5026 mddev->reshape_position = new; 5027 mddev->delta_disks = 0; 5028 mddev->reshape_backwards = 0; 5029 mddev->new_level = mddev->level; 5030 mddev->new_layout = mddev->layout; 5031 mddev->new_chunk_sectors = mddev->chunk_sectors; 5032 rdev_for_each(rdev, mddev) 5033 rdev->new_data_offset = rdev->data_offset; 5034 err = 0; 5035 unlock: 5036 mddev_unlock(mddev); 5037 return err ?: len; 5038 } 5039 5040 static struct md_sysfs_entry md_reshape_position = 5041 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 5042 reshape_position_store); 5043 5044 static ssize_t 5045 reshape_direction_show(struct mddev *mddev, char *page) 5046 { 5047 return sprintf(page, "%s\n", 5048 mddev->reshape_backwards ? "backwards" : "forwards"); 5049 } 5050 5051 static ssize_t 5052 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) 5053 { 5054 int backwards = 0; 5055 int err; 5056 5057 if (cmd_match(buf, "forwards")) 5058 backwards = 0; 5059 else if (cmd_match(buf, "backwards")) 5060 backwards = 1; 5061 else 5062 return -EINVAL; 5063 if (mddev->reshape_backwards == backwards) 5064 return len; 5065 5066 err = mddev_lock(mddev); 5067 if (err) 5068 return err; 5069 /* check if we are allowed to change */ 5070 if (mddev->delta_disks) 5071 err = -EBUSY; 5072 else if (mddev->persistent && 5073 mddev->major_version == 0) 5074 err = -EINVAL; 5075 else 5076 mddev->reshape_backwards = backwards; 5077 mddev_unlock(mddev); 5078 return err ?: len; 5079 } 5080 5081 static struct md_sysfs_entry md_reshape_direction = 5082 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show, 5083 reshape_direction_store); 5084 5085 static ssize_t 5086 array_size_show(struct mddev *mddev, char *page) 5087 { 5088 if (mddev->external_size) 5089 return sprintf(page, "%llu\n", 5090 (unsigned long long)mddev->array_sectors/2); 5091 else 5092 return sprintf(page, "default\n"); 5093 } 5094 5095 static ssize_t 5096 array_size_store(struct mddev *mddev, const char *buf, size_t len) 5097 { 5098 sector_t sectors; 5099 int err; 5100 5101 err = mddev_lock(mddev); 5102 if (err) 5103 return err; 5104 5105 /* cluster raid doesn't support change array_sectors */ 5106 if (mddev_is_clustered(mddev)) { 5107 mddev_unlock(mddev); 5108 return -EINVAL; 5109 } 5110 5111 if (strncmp(buf, "default", 7) == 0) { 5112 if (mddev->pers) 5113 sectors = mddev->pers->size(mddev, 0, 0); 5114 else 5115 sectors = mddev->array_sectors; 5116 5117 mddev->external_size = 0; 5118 } else { 5119 if (strict_blocks_to_sectors(buf, §ors) < 0) 5120 err = -EINVAL; 5121 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 5122 err = -E2BIG; 5123 else 5124 mddev->external_size = 1; 5125 } 5126 5127 if (!err) { 5128 mddev->array_sectors = sectors; 5129 if (mddev->pers) { 5130 set_capacity(mddev->gendisk, mddev->array_sectors); 5131 revalidate_disk(mddev->gendisk); 5132 } 5133 } 5134 mddev_unlock(mddev); 5135 return err ?: len; 5136 } 5137 5138 static struct md_sysfs_entry md_array_size = 5139 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 5140 array_size_store); 5141 5142 static ssize_t 5143 consistency_policy_show(struct mddev *mddev, char *page) 5144 { 5145 int ret; 5146 5147 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 5148 ret = sprintf(page, "journal\n"); 5149 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) { 5150 ret = sprintf(page, "ppl\n"); 5151 } else if (mddev->bitmap) { 5152 ret = sprintf(page, "bitmap\n"); 5153 } else if (mddev->pers) { 5154 if (mddev->pers->sync_request) 5155 ret = sprintf(page, "resync\n"); 5156 else 5157 ret = sprintf(page, "none\n"); 5158 } else { 5159 ret = sprintf(page, "unknown\n"); 5160 } 5161 5162 return ret; 5163 } 5164 5165 static ssize_t 5166 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len) 5167 { 5168 int err = 0; 5169 5170 if (mddev->pers) { 5171 if (mddev->pers->change_consistency_policy) 5172 err = mddev->pers->change_consistency_policy(mddev, buf); 5173 else 5174 err = -EBUSY; 5175 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) { 5176 set_bit(MD_HAS_PPL, &mddev->flags); 5177 } else { 5178 err = -EINVAL; 5179 } 5180 5181 return err ? err : len; 5182 } 5183 5184 static struct md_sysfs_entry md_consistency_policy = 5185 __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show, 5186 consistency_policy_store); 5187 5188 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page) 5189 { 5190 return sprintf(page, "%d\n", mddev->fail_last_dev); 5191 } 5192 5193 /* 5194 * Setting fail_last_dev to true to allow last device to be forcibly removed 5195 * from RAID1/RAID10. 5196 */ 5197 static ssize_t 5198 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len) 5199 { 5200 int ret; 5201 bool value; 5202 5203 ret = kstrtobool(buf, &value); 5204 if (ret) 5205 return ret; 5206 5207 if (value != mddev->fail_last_dev) 5208 mddev->fail_last_dev = value; 5209 5210 return len; 5211 } 5212 static struct md_sysfs_entry md_fail_last_dev = 5213 __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show, 5214 fail_last_dev_store); 5215 5216 static struct attribute *md_default_attrs[] = { 5217 &md_level.attr, 5218 &md_layout.attr, 5219 &md_raid_disks.attr, 5220 &md_chunk_size.attr, 5221 &md_size.attr, 5222 &md_resync_start.attr, 5223 &md_metadata.attr, 5224 &md_new_device.attr, 5225 &md_safe_delay.attr, 5226 &md_array_state.attr, 5227 &md_reshape_position.attr, 5228 &md_reshape_direction.attr, 5229 &md_array_size.attr, 5230 &max_corr_read_errors.attr, 5231 &md_consistency_policy.attr, 5232 &md_fail_last_dev.attr, 5233 NULL, 5234 }; 5235 5236 static struct attribute *md_redundancy_attrs[] = { 5237 &md_scan_mode.attr, 5238 &md_last_scan_mode.attr, 5239 &md_mismatches.attr, 5240 &md_sync_min.attr, 5241 &md_sync_max.attr, 5242 &md_sync_speed.attr, 5243 &md_sync_force_parallel.attr, 5244 &md_sync_completed.attr, 5245 &md_min_sync.attr, 5246 &md_max_sync.attr, 5247 &md_suspend_lo.attr, 5248 &md_suspend_hi.attr, 5249 &md_bitmap.attr, 5250 &md_degraded.attr, 5251 NULL, 5252 }; 5253 static struct attribute_group md_redundancy_group = { 5254 .name = NULL, 5255 .attrs = md_redundancy_attrs, 5256 }; 5257 5258 static ssize_t 5259 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 5260 { 5261 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 5262 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 5263 ssize_t rv; 5264 5265 if (!entry->show) 5266 return -EIO; 5267 spin_lock(&all_mddevs_lock); 5268 if (list_empty(&mddev->all_mddevs)) { 5269 spin_unlock(&all_mddevs_lock); 5270 return -EBUSY; 5271 } 5272 mddev_get(mddev); 5273 spin_unlock(&all_mddevs_lock); 5274 5275 rv = entry->show(mddev, page); 5276 mddev_put(mddev); 5277 return rv; 5278 } 5279 5280 static ssize_t 5281 md_attr_store(struct kobject *kobj, struct attribute *attr, 5282 const char *page, size_t length) 5283 { 5284 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 5285 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 5286 ssize_t rv; 5287 5288 if (!entry->store) 5289 return -EIO; 5290 if (!capable(CAP_SYS_ADMIN)) 5291 return -EACCES; 5292 spin_lock(&all_mddevs_lock); 5293 if (list_empty(&mddev->all_mddevs)) { 5294 spin_unlock(&all_mddevs_lock); 5295 return -EBUSY; 5296 } 5297 mddev_get(mddev); 5298 spin_unlock(&all_mddevs_lock); 5299 rv = entry->store(mddev, page, length); 5300 mddev_put(mddev); 5301 return rv; 5302 } 5303 5304 static void md_free(struct kobject *ko) 5305 { 5306 struct mddev *mddev = container_of(ko, struct mddev, kobj); 5307 5308 if (mddev->sysfs_state) 5309 sysfs_put(mddev->sysfs_state); 5310 5311 if (mddev->gendisk) 5312 del_gendisk(mddev->gendisk); 5313 if (mddev->queue) 5314 blk_cleanup_queue(mddev->queue); 5315 if (mddev->gendisk) 5316 put_disk(mddev->gendisk); 5317 percpu_ref_exit(&mddev->writes_pending); 5318 5319 bioset_exit(&mddev->bio_set); 5320 bioset_exit(&mddev->sync_set); 5321 kfree(mddev); 5322 } 5323 5324 static const struct sysfs_ops md_sysfs_ops = { 5325 .show = md_attr_show, 5326 .store = md_attr_store, 5327 }; 5328 static struct kobj_type md_ktype = { 5329 .release = md_free, 5330 .sysfs_ops = &md_sysfs_ops, 5331 .default_attrs = md_default_attrs, 5332 }; 5333 5334 int mdp_major = 0; 5335 5336 static void mddev_delayed_delete(struct work_struct *ws) 5337 { 5338 struct mddev *mddev = container_of(ws, struct mddev, del_work); 5339 5340 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); 5341 kobject_del(&mddev->kobj); 5342 kobject_put(&mddev->kobj); 5343 } 5344 5345 static void no_op(struct percpu_ref *r) {} 5346 5347 int mddev_init_writes_pending(struct mddev *mddev) 5348 { 5349 if (mddev->writes_pending.percpu_count_ptr) 5350 return 0; 5351 if (percpu_ref_init(&mddev->writes_pending, no_op, 5352 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0) 5353 return -ENOMEM; 5354 /* We want to start with the refcount at zero */ 5355 percpu_ref_put(&mddev->writes_pending); 5356 return 0; 5357 } 5358 EXPORT_SYMBOL_GPL(mddev_init_writes_pending); 5359 5360 static int md_alloc(dev_t dev, char *name) 5361 { 5362 /* 5363 * If dev is zero, name is the name of a device to allocate with 5364 * an arbitrary minor number. It will be "md_???" 5365 * If dev is non-zero it must be a device number with a MAJOR of 5366 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then 5367 * the device is being created by opening a node in /dev. 5368 * If "name" is not NULL, the device is being created by 5369 * writing to /sys/module/md_mod/parameters/new_array. 5370 */ 5371 static DEFINE_MUTEX(disks_mutex); 5372 struct mddev *mddev = mddev_find(dev); 5373 struct gendisk *disk; 5374 int partitioned; 5375 int shift; 5376 int unit; 5377 int error; 5378 5379 if (!mddev) 5380 return -ENODEV; 5381 5382 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 5383 shift = partitioned ? MdpMinorShift : 0; 5384 unit = MINOR(mddev->unit) >> shift; 5385 5386 /* wait for any previous instance of this device to be 5387 * completely removed (mddev_delayed_delete). 5388 */ 5389 flush_workqueue(md_misc_wq); 5390 5391 mutex_lock(&disks_mutex); 5392 error = -EEXIST; 5393 if (mddev->gendisk) 5394 goto abort; 5395 5396 if (name && !dev) { 5397 /* Need to ensure that 'name' is not a duplicate. 5398 */ 5399 struct mddev *mddev2; 5400 spin_lock(&all_mddevs_lock); 5401 5402 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 5403 if (mddev2->gendisk && 5404 strcmp(mddev2->gendisk->disk_name, name) == 0) { 5405 spin_unlock(&all_mddevs_lock); 5406 goto abort; 5407 } 5408 spin_unlock(&all_mddevs_lock); 5409 } 5410 if (name && dev) 5411 /* 5412 * Creating /dev/mdNNN via "newarray", so adjust hold_active. 5413 */ 5414 mddev->hold_active = UNTIL_STOP; 5415 5416 error = -ENOMEM; 5417 mddev->queue = blk_alloc_queue(GFP_KERNEL); 5418 if (!mddev->queue) 5419 goto abort; 5420 mddev->queue->queuedata = mddev; 5421 5422 blk_queue_make_request(mddev->queue, md_make_request); 5423 blk_set_stacking_limits(&mddev->queue->limits); 5424 5425 disk = alloc_disk(1 << shift); 5426 if (!disk) { 5427 blk_cleanup_queue(mddev->queue); 5428 mddev->queue = NULL; 5429 goto abort; 5430 } 5431 disk->major = MAJOR(mddev->unit); 5432 disk->first_minor = unit << shift; 5433 if (name) 5434 strcpy(disk->disk_name, name); 5435 else if (partitioned) 5436 sprintf(disk->disk_name, "md_d%d", unit); 5437 else 5438 sprintf(disk->disk_name, "md%d", unit); 5439 disk->fops = &md_fops; 5440 disk->private_data = mddev; 5441 disk->queue = mddev->queue; 5442 blk_queue_write_cache(mddev->queue, true, true); 5443 /* Allow extended partitions. This makes the 5444 * 'mdp' device redundant, but we can't really 5445 * remove it now. 5446 */ 5447 disk->flags |= GENHD_FL_EXT_DEVT; 5448 mddev->gendisk = disk; 5449 /* As soon as we call add_disk(), another thread could get 5450 * through to md_open, so make sure it doesn't get too far 5451 */ 5452 mutex_lock(&mddev->open_mutex); 5453 add_disk(disk); 5454 5455 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); 5456 if (error) { 5457 /* This isn't possible, but as kobject_init_and_add is marked 5458 * __must_check, we must do something with the result 5459 */ 5460 pr_debug("md: cannot register %s/md - name in use\n", 5461 disk->disk_name); 5462 error = 0; 5463 } 5464 if (mddev->kobj.sd && 5465 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) 5466 pr_debug("pointless warning\n"); 5467 mutex_unlock(&mddev->open_mutex); 5468 abort: 5469 mutex_unlock(&disks_mutex); 5470 if (!error && mddev->kobj.sd) { 5471 kobject_uevent(&mddev->kobj, KOBJ_ADD); 5472 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 5473 } 5474 mddev_put(mddev); 5475 return error; 5476 } 5477 5478 static struct kobject *md_probe(dev_t dev, int *part, void *data) 5479 { 5480 if (create_on_open) 5481 md_alloc(dev, NULL); 5482 return NULL; 5483 } 5484 5485 static int add_named_array(const char *val, const struct kernel_param *kp) 5486 { 5487 /* 5488 * val must be "md_*" or "mdNNN". 5489 * For "md_*" we allocate an array with a large free minor number, and 5490 * set the name to val. val must not already be an active name. 5491 * For "mdNNN" we allocate an array with the minor number NNN 5492 * which must not already be in use. 5493 */ 5494 int len = strlen(val); 5495 char buf[DISK_NAME_LEN]; 5496 unsigned long devnum; 5497 5498 while (len && val[len-1] == '\n') 5499 len--; 5500 if (len >= DISK_NAME_LEN) 5501 return -E2BIG; 5502 strlcpy(buf, val, len+1); 5503 if (strncmp(buf, "md_", 3) == 0) 5504 return md_alloc(0, buf); 5505 if (strncmp(buf, "md", 2) == 0 && 5506 isdigit(buf[2]) && 5507 kstrtoul(buf+2, 10, &devnum) == 0 && 5508 devnum <= MINORMASK) 5509 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL); 5510 5511 return -EINVAL; 5512 } 5513 5514 static void md_safemode_timeout(struct timer_list *t) 5515 { 5516 struct mddev *mddev = from_timer(mddev, t, safemode_timer); 5517 5518 mddev->safemode = 1; 5519 if (mddev->external) 5520 sysfs_notify_dirent_safe(mddev->sysfs_state); 5521 5522 md_wakeup_thread(mddev->thread); 5523 } 5524 5525 static int start_dirty_degraded; 5526 5527 int md_run(struct mddev *mddev) 5528 { 5529 int err; 5530 struct md_rdev *rdev; 5531 struct md_personality *pers; 5532 5533 if (list_empty(&mddev->disks)) 5534 /* cannot run an array with no devices.. */ 5535 return -EINVAL; 5536 5537 if (mddev->pers) 5538 return -EBUSY; 5539 /* Cannot run until previous stop completes properly */ 5540 if (mddev->sysfs_active) 5541 return -EBUSY; 5542 5543 /* 5544 * Analyze all RAID superblock(s) 5545 */ 5546 if (!mddev->raid_disks) { 5547 if (!mddev->persistent) 5548 return -EINVAL; 5549 analyze_sbs(mddev); 5550 } 5551 5552 if (mddev->level != LEVEL_NONE) 5553 request_module("md-level-%d", mddev->level); 5554 else if (mddev->clevel[0]) 5555 request_module("md-%s", mddev->clevel); 5556 5557 /* 5558 * Drop all container device buffers, from now on 5559 * the only valid external interface is through the md 5560 * device. 5561 */ 5562 mddev->has_superblocks = false; 5563 rdev_for_each(rdev, mddev) { 5564 if (test_bit(Faulty, &rdev->flags)) 5565 continue; 5566 sync_blockdev(rdev->bdev); 5567 invalidate_bdev(rdev->bdev); 5568 if (mddev->ro != 1 && 5569 (bdev_read_only(rdev->bdev) || 5570 bdev_read_only(rdev->meta_bdev))) { 5571 mddev->ro = 1; 5572 if (mddev->gendisk) 5573 set_disk_ro(mddev->gendisk, 1); 5574 } 5575 5576 if (rdev->sb_page) 5577 mddev->has_superblocks = true; 5578 5579 /* perform some consistency tests on the device. 5580 * We don't want the data to overlap the metadata, 5581 * Internal Bitmap issues have been handled elsewhere. 5582 */ 5583 if (rdev->meta_bdev) { 5584 /* Nothing to check */; 5585 } else if (rdev->data_offset < rdev->sb_start) { 5586 if (mddev->dev_sectors && 5587 rdev->data_offset + mddev->dev_sectors 5588 > rdev->sb_start) { 5589 pr_warn("md: %s: data overlaps metadata\n", 5590 mdname(mddev)); 5591 return -EINVAL; 5592 } 5593 } else { 5594 if (rdev->sb_start + rdev->sb_size/512 5595 > rdev->data_offset) { 5596 pr_warn("md: %s: metadata overlaps data\n", 5597 mdname(mddev)); 5598 return -EINVAL; 5599 } 5600 } 5601 sysfs_notify_dirent_safe(rdev->sysfs_state); 5602 } 5603 5604 if (!bioset_initialized(&mddev->bio_set)) { 5605 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 5606 if (err) 5607 return err; 5608 } 5609 if (!bioset_initialized(&mddev->sync_set)) { 5610 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 5611 if (err) 5612 return err; 5613 } 5614 5615 spin_lock(&pers_lock); 5616 pers = find_pers(mddev->level, mddev->clevel); 5617 if (!pers || !try_module_get(pers->owner)) { 5618 spin_unlock(&pers_lock); 5619 if (mddev->level != LEVEL_NONE) 5620 pr_warn("md: personality for level %d is not loaded!\n", 5621 mddev->level); 5622 else 5623 pr_warn("md: personality for level %s is not loaded!\n", 5624 mddev->clevel); 5625 err = -EINVAL; 5626 goto abort; 5627 } 5628 spin_unlock(&pers_lock); 5629 if (mddev->level != pers->level) { 5630 mddev->level = pers->level; 5631 mddev->new_level = pers->level; 5632 } 5633 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 5634 5635 if (mddev->reshape_position != MaxSector && 5636 pers->start_reshape == NULL) { 5637 /* This personality cannot handle reshaping... */ 5638 module_put(pers->owner); 5639 err = -EINVAL; 5640 goto abort; 5641 } 5642 5643 if (pers->sync_request) { 5644 /* Warn if this is a potentially silly 5645 * configuration. 5646 */ 5647 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5648 struct md_rdev *rdev2; 5649 int warned = 0; 5650 5651 rdev_for_each(rdev, mddev) 5652 rdev_for_each(rdev2, mddev) { 5653 if (rdev < rdev2 && 5654 rdev->bdev->bd_contains == 5655 rdev2->bdev->bd_contains) { 5656 pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n", 5657 mdname(mddev), 5658 bdevname(rdev->bdev,b), 5659 bdevname(rdev2->bdev,b2)); 5660 warned = 1; 5661 } 5662 } 5663 5664 if (warned) 5665 pr_warn("True protection against single-disk failure might be compromised.\n"); 5666 } 5667 5668 mddev->recovery = 0; 5669 /* may be over-ridden by personality */ 5670 mddev->resync_max_sectors = mddev->dev_sectors; 5671 5672 mddev->ok_start_degraded = start_dirty_degraded; 5673 5674 if (start_readonly && mddev->ro == 0) 5675 mddev->ro = 2; /* read-only, but switch on first write */ 5676 5677 err = pers->run(mddev); 5678 if (err) 5679 pr_warn("md: pers->run() failed ...\n"); 5680 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { 5681 WARN_ONCE(!mddev->external_size, 5682 "%s: default size too small, but 'external_size' not in effect?\n", 5683 __func__); 5684 pr_warn("md: invalid array_size %llu > default size %llu\n", 5685 (unsigned long long)mddev->array_sectors / 2, 5686 (unsigned long long)pers->size(mddev, 0, 0) / 2); 5687 err = -EINVAL; 5688 } 5689 if (err == 0 && pers->sync_request && 5690 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { 5691 struct bitmap *bitmap; 5692 5693 bitmap = md_bitmap_create(mddev, -1); 5694 if (IS_ERR(bitmap)) { 5695 err = PTR_ERR(bitmap); 5696 pr_warn("%s: failed to create bitmap (%d)\n", 5697 mdname(mddev), err); 5698 } else 5699 mddev->bitmap = bitmap; 5700 5701 } 5702 if (err) 5703 goto bitmap_abort; 5704 5705 if (mddev->bitmap_info.max_write_behind > 0) { 5706 bool creat_pool = false; 5707 5708 rdev_for_each(rdev, mddev) { 5709 if (test_bit(WriteMostly, &rdev->flags) && 5710 rdev_init_wb(rdev)) 5711 creat_pool = true; 5712 } 5713 if (creat_pool && mddev->wb_info_pool == NULL) { 5714 mddev->wb_info_pool = 5715 mempool_create_kmalloc_pool(NR_WB_INFOS, 5716 sizeof(struct wb_info)); 5717 if (!mddev->wb_info_pool) { 5718 err = -ENOMEM; 5719 goto bitmap_abort; 5720 } 5721 } 5722 } 5723 5724 if (mddev->queue) { 5725 bool nonrot = true; 5726 5727 rdev_for_each(rdev, mddev) { 5728 if (rdev->raid_disk >= 0 && 5729 !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) { 5730 nonrot = false; 5731 break; 5732 } 5733 } 5734 if (mddev->degraded) 5735 nonrot = false; 5736 if (nonrot) 5737 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); 5738 else 5739 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); 5740 mddev->queue->backing_dev_info->congested_data = mddev; 5741 mddev->queue->backing_dev_info->congested_fn = md_congested; 5742 } 5743 if (pers->sync_request) { 5744 if (mddev->kobj.sd && 5745 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 5746 pr_warn("md: cannot register extra attributes for %s\n", 5747 mdname(mddev)); 5748 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 5749 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 5750 mddev->ro = 0; 5751 5752 atomic_set(&mddev->max_corr_read_errors, 5753 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 5754 mddev->safemode = 0; 5755 if (mddev_is_clustered(mddev)) 5756 mddev->safemode_delay = 0; 5757 else 5758 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 5759 mddev->in_sync = 1; 5760 smp_wmb(); 5761 spin_lock(&mddev->lock); 5762 mddev->pers = pers; 5763 spin_unlock(&mddev->lock); 5764 rdev_for_each(rdev, mddev) 5765 if (rdev->raid_disk >= 0) 5766 sysfs_link_rdev(mddev, rdev); /* failure here is OK */ 5767 5768 if (mddev->degraded && !mddev->ro) 5769 /* This ensures that recovering status is reported immediately 5770 * via sysfs - until a lack of spares is confirmed. 5771 */ 5772 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5773 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5774 5775 if (mddev->sb_flags) 5776 md_update_sb(mddev, 0); 5777 5778 md_new_event(mddev); 5779 sysfs_notify_dirent_safe(mddev->sysfs_state); 5780 sysfs_notify_dirent_safe(mddev->sysfs_action); 5781 sysfs_notify(&mddev->kobj, NULL, "degraded"); 5782 return 0; 5783 5784 bitmap_abort: 5785 mddev_detach(mddev); 5786 if (mddev->private) 5787 pers->free(mddev, mddev->private); 5788 mddev->private = NULL; 5789 module_put(pers->owner); 5790 md_bitmap_destroy(mddev); 5791 abort: 5792 bioset_exit(&mddev->bio_set); 5793 bioset_exit(&mddev->sync_set); 5794 return err; 5795 } 5796 EXPORT_SYMBOL_GPL(md_run); 5797 5798 static int do_md_run(struct mddev *mddev) 5799 { 5800 int err; 5801 5802 err = md_run(mddev); 5803 if (err) 5804 goto out; 5805 err = md_bitmap_load(mddev); 5806 if (err) { 5807 md_bitmap_destroy(mddev); 5808 goto out; 5809 } 5810 5811 if (mddev_is_clustered(mddev)) 5812 md_allow_write(mddev); 5813 5814 /* run start up tasks that require md_thread */ 5815 md_start(mddev); 5816 5817 md_wakeup_thread(mddev->thread); 5818 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 5819 5820 set_capacity(mddev->gendisk, mddev->array_sectors); 5821 revalidate_disk(mddev->gendisk); 5822 mddev->changed = 1; 5823 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 5824 out: 5825 return err; 5826 } 5827 5828 int md_start(struct mddev *mddev) 5829 { 5830 int ret = 0; 5831 5832 if (mddev->pers->start) { 5833 set_bit(MD_RECOVERY_WAIT, &mddev->recovery); 5834 md_wakeup_thread(mddev->thread); 5835 ret = mddev->pers->start(mddev); 5836 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); 5837 md_wakeup_thread(mddev->sync_thread); 5838 } 5839 return ret; 5840 } 5841 EXPORT_SYMBOL_GPL(md_start); 5842 5843 static int restart_array(struct mddev *mddev) 5844 { 5845 struct gendisk *disk = mddev->gendisk; 5846 struct md_rdev *rdev; 5847 bool has_journal = false; 5848 bool has_readonly = false; 5849 5850 /* Complain if it has no devices */ 5851 if (list_empty(&mddev->disks)) 5852 return -ENXIO; 5853 if (!mddev->pers) 5854 return -EINVAL; 5855 if (!mddev->ro) 5856 return -EBUSY; 5857 5858 rcu_read_lock(); 5859 rdev_for_each_rcu(rdev, mddev) { 5860 if (test_bit(Journal, &rdev->flags) && 5861 !test_bit(Faulty, &rdev->flags)) 5862 has_journal = true; 5863 if (bdev_read_only(rdev->bdev)) 5864 has_readonly = true; 5865 } 5866 rcu_read_unlock(); 5867 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal) 5868 /* Don't restart rw with journal missing/faulty */ 5869 return -EINVAL; 5870 if (has_readonly) 5871 return -EROFS; 5872 5873 mddev->safemode = 0; 5874 mddev->ro = 0; 5875 set_disk_ro(disk, 0); 5876 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); 5877 /* Kick recovery or resync if necessary */ 5878 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5879 md_wakeup_thread(mddev->thread); 5880 md_wakeup_thread(mddev->sync_thread); 5881 sysfs_notify_dirent_safe(mddev->sysfs_state); 5882 return 0; 5883 } 5884 5885 static void md_clean(struct mddev *mddev) 5886 { 5887 mddev->array_sectors = 0; 5888 mddev->external_size = 0; 5889 mddev->dev_sectors = 0; 5890 mddev->raid_disks = 0; 5891 mddev->recovery_cp = 0; 5892 mddev->resync_min = 0; 5893 mddev->resync_max = MaxSector; 5894 mddev->reshape_position = MaxSector; 5895 mddev->external = 0; 5896 mddev->persistent = 0; 5897 mddev->level = LEVEL_NONE; 5898 mddev->clevel[0] = 0; 5899 mddev->flags = 0; 5900 mddev->sb_flags = 0; 5901 mddev->ro = 0; 5902 mddev->metadata_type[0] = 0; 5903 mddev->chunk_sectors = 0; 5904 mddev->ctime = mddev->utime = 0; 5905 mddev->layout = 0; 5906 mddev->max_disks = 0; 5907 mddev->events = 0; 5908 mddev->can_decrease_events = 0; 5909 mddev->delta_disks = 0; 5910 mddev->reshape_backwards = 0; 5911 mddev->new_level = LEVEL_NONE; 5912 mddev->new_layout = 0; 5913 mddev->new_chunk_sectors = 0; 5914 mddev->curr_resync = 0; 5915 atomic64_set(&mddev->resync_mismatches, 0); 5916 mddev->suspend_lo = mddev->suspend_hi = 0; 5917 mddev->sync_speed_min = mddev->sync_speed_max = 0; 5918 mddev->recovery = 0; 5919 mddev->in_sync = 0; 5920 mddev->changed = 0; 5921 mddev->degraded = 0; 5922 mddev->safemode = 0; 5923 mddev->private = NULL; 5924 mddev->cluster_info = NULL; 5925 mddev->bitmap_info.offset = 0; 5926 mddev->bitmap_info.default_offset = 0; 5927 mddev->bitmap_info.default_space = 0; 5928 mddev->bitmap_info.chunksize = 0; 5929 mddev->bitmap_info.daemon_sleep = 0; 5930 mddev->bitmap_info.max_write_behind = 0; 5931 mddev->bitmap_info.nodes = 0; 5932 } 5933 5934 static void __md_stop_writes(struct mddev *mddev) 5935 { 5936 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5937 flush_workqueue(md_misc_wq); 5938 if (mddev->sync_thread) { 5939 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5940 md_reap_sync_thread(mddev); 5941 } 5942 5943 del_timer_sync(&mddev->safemode_timer); 5944 5945 if (mddev->pers && mddev->pers->quiesce) { 5946 mddev->pers->quiesce(mddev, 1); 5947 mddev->pers->quiesce(mddev, 0); 5948 } 5949 md_bitmap_flush(mddev); 5950 5951 if (mddev->ro == 0 && 5952 ((!mddev->in_sync && !mddev_is_clustered(mddev)) || 5953 mddev->sb_flags)) { 5954 /* mark array as shutdown cleanly */ 5955 if (!mddev_is_clustered(mddev)) 5956 mddev->in_sync = 1; 5957 md_update_sb(mddev, 1); 5958 } 5959 mempool_destroy(mddev->wb_info_pool); 5960 mddev->wb_info_pool = NULL; 5961 } 5962 5963 void md_stop_writes(struct mddev *mddev) 5964 { 5965 mddev_lock_nointr(mddev); 5966 __md_stop_writes(mddev); 5967 mddev_unlock(mddev); 5968 } 5969 EXPORT_SYMBOL_GPL(md_stop_writes); 5970 5971 static void mddev_detach(struct mddev *mddev) 5972 { 5973 md_bitmap_wait_behind_writes(mddev); 5974 if (mddev->pers && mddev->pers->quiesce) { 5975 mddev->pers->quiesce(mddev, 1); 5976 mddev->pers->quiesce(mddev, 0); 5977 } 5978 md_unregister_thread(&mddev->thread); 5979 if (mddev->queue) 5980 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 5981 } 5982 5983 static void __md_stop(struct mddev *mddev) 5984 { 5985 struct md_personality *pers = mddev->pers; 5986 md_bitmap_destroy(mddev); 5987 mddev_detach(mddev); 5988 /* Ensure ->event_work is done */ 5989 flush_workqueue(md_misc_wq); 5990 spin_lock(&mddev->lock); 5991 mddev->pers = NULL; 5992 spin_unlock(&mddev->lock); 5993 pers->free(mddev, mddev->private); 5994 mddev->private = NULL; 5995 if (pers->sync_request && mddev->to_remove == NULL) 5996 mddev->to_remove = &md_redundancy_group; 5997 module_put(pers->owner); 5998 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5999 } 6000 6001 void md_stop(struct mddev *mddev) 6002 { 6003 /* stop the array and free an attached data structures. 6004 * This is called from dm-raid 6005 */ 6006 __md_stop(mddev); 6007 bioset_exit(&mddev->bio_set); 6008 bioset_exit(&mddev->sync_set); 6009 } 6010 6011 EXPORT_SYMBOL_GPL(md_stop); 6012 6013 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) 6014 { 6015 int err = 0; 6016 int did_freeze = 0; 6017 6018 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 6019 did_freeze = 1; 6020 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6021 md_wakeup_thread(mddev->thread); 6022 } 6023 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 6024 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6025 if (mddev->sync_thread) 6026 /* Thread might be blocked waiting for metadata update 6027 * which will now never happen */ 6028 wake_up_process(mddev->sync_thread->tsk); 6029 6030 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 6031 return -EBUSY; 6032 mddev_unlock(mddev); 6033 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, 6034 &mddev->recovery)); 6035 wait_event(mddev->sb_wait, 6036 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 6037 mddev_lock_nointr(mddev); 6038 6039 mutex_lock(&mddev->open_mutex); 6040 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 6041 mddev->sync_thread || 6042 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 6043 pr_warn("md: %s still in use.\n",mdname(mddev)); 6044 if (did_freeze) { 6045 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6046 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6047 md_wakeup_thread(mddev->thread); 6048 } 6049 err = -EBUSY; 6050 goto out; 6051 } 6052 if (mddev->pers) { 6053 __md_stop_writes(mddev); 6054 6055 err = -ENXIO; 6056 if (mddev->ro==1) 6057 goto out; 6058 mddev->ro = 1; 6059 set_disk_ro(mddev->gendisk, 1); 6060 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6061 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6062 md_wakeup_thread(mddev->thread); 6063 sysfs_notify_dirent_safe(mddev->sysfs_state); 6064 err = 0; 6065 } 6066 out: 6067 mutex_unlock(&mddev->open_mutex); 6068 return err; 6069 } 6070 6071 /* mode: 6072 * 0 - completely stop and dis-assemble array 6073 * 2 - stop but do not disassemble array 6074 */ 6075 static int do_md_stop(struct mddev *mddev, int mode, 6076 struct block_device *bdev) 6077 { 6078 struct gendisk *disk = mddev->gendisk; 6079 struct md_rdev *rdev; 6080 int did_freeze = 0; 6081 6082 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 6083 did_freeze = 1; 6084 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6085 md_wakeup_thread(mddev->thread); 6086 } 6087 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 6088 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6089 if (mddev->sync_thread) 6090 /* Thread might be blocked waiting for metadata update 6091 * which will now never happen */ 6092 wake_up_process(mddev->sync_thread->tsk); 6093 6094 mddev_unlock(mddev); 6095 wait_event(resync_wait, (mddev->sync_thread == NULL && 6096 !test_bit(MD_RECOVERY_RUNNING, 6097 &mddev->recovery))); 6098 mddev_lock_nointr(mddev); 6099 6100 mutex_lock(&mddev->open_mutex); 6101 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 6102 mddev->sysfs_active || 6103 mddev->sync_thread || 6104 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 6105 pr_warn("md: %s still in use.\n",mdname(mddev)); 6106 mutex_unlock(&mddev->open_mutex); 6107 if (did_freeze) { 6108 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6109 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6110 md_wakeup_thread(mddev->thread); 6111 } 6112 return -EBUSY; 6113 } 6114 if (mddev->pers) { 6115 if (mddev->ro) 6116 set_disk_ro(disk, 0); 6117 6118 __md_stop_writes(mddev); 6119 __md_stop(mddev); 6120 mddev->queue->backing_dev_info->congested_fn = NULL; 6121 6122 /* tell userspace to handle 'inactive' */ 6123 sysfs_notify_dirent_safe(mddev->sysfs_state); 6124 6125 rdev_for_each(rdev, mddev) 6126 if (rdev->raid_disk >= 0) 6127 sysfs_unlink_rdev(mddev, rdev); 6128 6129 set_capacity(disk, 0); 6130 mutex_unlock(&mddev->open_mutex); 6131 mddev->changed = 1; 6132 revalidate_disk(disk); 6133 6134 if (mddev->ro) 6135 mddev->ro = 0; 6136 } else 6137 mutex_unlock(&mddev->open_mutex); 6138 /* 6139 * Free resources if final stop 6140 */ 6141 if (mode == 0) { 6142 pr_info("md: %s stopped.\n", mdname(mddev)); 6143 6144 if (mddev->bitmap_info.file) { 6145 struct file *f = mddev->bitmap_info.file; 6146 spin_lock(&mddev->lock); 6147 mddev->bitmap_info.file = NULL; 6148 spin_unlock(&mddev->lock); 6149 fput(f); 6150 } 6151 mddev->bitmap_info.offset = 0; 6152 6153 export_array(mddev); 6154 6155 md_clean(mddev); 6156 if (mddev->hold_active == UNTIL_STOP) 6157 mddev->hold_active = 0; 6158 } 6159 md_new_event(mddev); 6160 sysfs_notify_dirent_safe(mddev->sysfs_state); 6161 return 0; 6162 } 6163 6164 #ifndef MODULE 6165 static void autorun_array(struct mddev *mddev) 6166 { 6167 struct md_rdev *rdev; 6168 int err; 6169 6170 if (list_empty(&mddev->disks)) 6171 return; 6172 6173 pr_info("md: running: "); 6174 6175 rdev_for_each(rdev, mddev) { 6176 char b[BDEVNAME_SIZE]; 6177 pr_cont("<%s>", bdevname(rdev->bdev,b)); 6178 } 6179 pr_cont("\n"); 6180 6181 err = do_md_run(mddev); 6182 if (err) { 6183 pr_warn("md: do_md_run() returned %d\n", err); 6184 do_md_stop(mddev, 0, NULL); 6185 } 6186 } 6187 6188 /* 6189 * lets try to run arrays based on all disks that have arrived 6190 * until now. (those are in pending_raid_disks) 6191 * 6192 * the method: pick the first pending disk, collect all disks with 6193 * the same UUID, remove all from the pending list and put them into 6194 * the 'same_array' list. Then order this list based on superblock 6195 * update time (freshest comes first), kick out 'old' disks and 6196 * compare superblocks. If everything's fine then run it. 6197 * 6198 * If "unit" is allocated, then bump its reference count 6199 */ 6200 static void autorun_devices(int part) 6201 { 6202 struct md_rdev *rdev0, *rdev, *tmp; 6203 struct mddev *mddev; 6204 char b[BDEVNAME_SIZE]; 6205 6206 pr_info("md: autorun ...\n"); 6207 while (!list_empty(&pending_raid_disks)) { 6208 int unit; 6209 dev_t dev; 6210 LIST_HEAD(candidates); 6211 rdev0 = list_entry(pending_raid_disks.next, 6212 struct md_rdev, same_set); 6213 6214 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b)); 6215 INIT_LIST_HEAD(&candidates); 6216 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 6217 if (super_90_load(rdev, rdev0, 0) >= 0) { 6218 pr_debug("md: adding %s ...\n", 6219 bdevname(rdev->bdev,b)); 6220 list_move(&rdev->same_set, &candidates); 6221 } 6222 /* 6223 * now we have a set of devices, with all of them having 6224 * mostly sane superblocks. It's time to allocate the 6225 * mddev. 6226 */ 6227 if (part) { 6228 dev = MKDEV(mdp_major, 6229 rdev0->preferred_minor << MdpMinorShift); 6230 unit = MINOR(dev) >> MdpMinorShift; 6231 } else { 6232 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 6233 unit = MINOR(dev); 6234 } 6235 if (rdev0->preferred_minor != unit) { 6236 pr_warn("md: unit number in %s is bad: %d\n", 6237 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 6238 break; 6239 } 6240 6241 md_probe(dev, NULL, NULL); 6242 mddev = mddev_find(dev); 6243 if (!mddev || !mddev->gendisk) { 6244 if (mddev) 6245 mddev_put(mddev); 6246 break; 6247 } 6248 if (mddev_lock(mddev)) 6249 pr_warn("md: %s locked, cannot run\n", mdname(mddev)); 6250 else if (mddev->raid_disks || mddev->major_version 6251 || !list_empty(&mddev->disks)) { 6252 pr_warn("md: %s already running, cannot run %s\n", 6253 mdname(mddev), bdevname(rdev0->bdev,b)); 6254 mddev_unlock(mddev); 6255 } else { 6256 pr_debug("md: created %s\n", mdname(mddev)); 6257 mddev->persistent = 1; 6258 rdev_for_each_list(rdev, tmp, &candidates) { 6259 list_del_init(&rdev->same_set); 6260 if (bind_rdev_to_array(rdev, mddev)) 6261 export_rdev(rdev); 6262 } 6263 autorun_array(mddev); 6264 mddev_unlock(mddev); 6265 } 6266 /* on success, candidates will be empty, on error 6267 * it won't... 6268 */ 6269 rdev_for_each_list(rdev, tmp, &candidates) { 6270 list_del_init(&rdev->same_set); 6271 export_rdev(rdev); 6272 } 6273 mddev_put(mddev); 6274 } 6275 pr_info("md: ... autorun DONE.\n"); 6276 } 6277 #endif /* !MODULE */ 6278 6279 static int get_version(void __user *arg) 6280 { 6281 mdu_version_t ver; 6282 6283 ver.major = MD_MAJOR_VERSION; 6284 ver.minor = MD_MINOR_VERSION; 6285 ver.patchlevel = MD_PATCHLEVEL_VERSION; 6286 6287 if (copy_to_user(arg, &ver, sizeof(ver))) 6288 return -EFAULT; 6289 6290 return 0; 6291 } 6292 6293 static int get_array_info(struct mddev *mddev, void __user *arg) 6294 { 6295 mdu_array_info_t info; 6296 int nr,working,insync,failed,spare; 6297 struct md_rdev *rdev; 6298 6299 nr = working = insync = failed = spare = 0; 6300 rcu_read_lock(); 6301 rdev_for_each_rcu(rdev, mddev) { 6302 nr++; 6303 if (test_bit(Faulty, &rdev->flags)) 6304 failed++; 6305 else { 6306 working++; 6307 if (test_bit(In_sync, &rdev->flags)) 6308 insync++; 6309 else if (test_bit(Journal, &rdev->flags)) 6310 /* TODO: add journal count to md_u.h */ 6311 ; 6312 else 6313 spare++; 6314 } 6315 } 6316 rcu_read_unlock(); 6317 6318 info.major_version = mddev->major_version; 6319 info.minor_version = mddev->minor_version; 6320 info.patch_version = MD_PATCHLEVEL_VERSION; 6321 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); 6322 info.level = mddev->level; 6323 info.size = mddev->dev_sectors / 2; 6324 if (info.size != mddev->dev_sectors / 2) /* overflow */ 6325 info.size = -1; 6326 info.nr_disks = nr; 6327 info.raid_disks = mddev->raid_disks; 6328 info.md_minor = mddev->md_minor; 6329 info.not_persistent= !mddev->persistent; 6330 6331 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); 6332 info.state = 0; 6333 if (mddev->in_sync) 6334 info.state = (1<<MD_SB_CLEAN); 6335 if (mddev->bitmap && mddev->bitmap_info.offset) 6336 info.state |= (1<<MD_SB_BITMAP_PRESENT); 6337 if (mddev_is_clustered(mddev)) 6338 info.state |= (1<<MD_SB_CLUSTERED); 6339 info.active_disks = insync; 6340 info.working_disks = working; 6341 info.failed_disks = failed; 6342 info.spare_disks = spare; 6343 6344 info.layout = mddev->layout; 6345 info.chunk_size = mddev->chunk_sectors << 9; 6346 6347 if (copy_to_user(arg, &info, sizeof(info))) 6348 return -EFAULT; 6349 6350 return 0; 6351 } 6352 6353 static int get_bitmap_file(struct mddev *mddev, void __user * arg) 6354 { 6355 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 6356 char *ptr; 6357 int err; 6358 6359 file = kzalloc(sizeof(*file), GFP_NOIO); 6360 if (!file) 6361 return -ENOMEM; 6362 6363 err = 0; 6364 spin_lock(&mddev->lock); 6365 /* bitmap enabled */ 6366 if (mddev->bitmap_info.file) { 6367 ptr = file_path(mddev->bitmap_info.file, file->pathname, 6368 sizeof(file->pathname)); 6369 if (IS_ERR(ptr)) 6370 err = PTR_ERR(ptr); 6371 else 6372 memmove(file->pathname, ptr, 6373 sizeof(file->pathname)-(ptr-file->pathname)); 6374 } 6375 spin_unlock(&mddev->lock); 6376 6377 if (err == 0 && 6378 copy_to_user(arg, file, sizeof(*file))) 6379 err = -EFAULT; 6380 6381 kfree(file); 6382 return err; 6383 } 6384 6385 static int get_disk_info(struct mddev *mddev, void __user * arg) 6386 { 6387 mdu_disk_info_t info; 6388 struct md_rdev *rdev; 6389 6390 if (copy_from_user(&info, arg, sizeof(info))) 6391 return -EFAULT; 6392 6393 rcu_read_lock(); 6394 rdev = md_find_rdev_nr_rcu(mddev, info.number); 6395 if (rdev) { 6396 info.major = MAJOR(rdev->bdev->bd_dev); 6397 info.minor = MINOR(rdev->bdev->bd_dev); 6398 info.raid_disk = rdev->raid_disk; 6399 info.state = 0; 6400 if (test_bit(Faulty, &rdev->flags)) 6401 info.state |= (1<<MD_DISK_FAULTY); 6402 else if (test_bit(In_sync, &rdev->flags)) { 6403 info.state |= (1<<MD_DISK_ACTIVE); 6404 info.state |= (1<<MD_DISK_SYNC); 6405 } 6406 if (test_bit(Journal, &rdev->flags)) 6407 info.state |= (1<<MD_DISK_JOURNAL); 6408 if (test_bit(WriteMostly, &rdev->flags)) 6409 info.state |= (1<<MD_DISK_WRITEMOSTLY); 6410 if (test_bit(FailFast, &rdev->flags)) 6411 info.state |= (1<<MD_DISK_FAILFAST); 6412 } else { 6413 info.major = info.minor = 0; 6414 info.raid_disk = -1; 6415 info.state = (1<<MD_DISK_REMOVED); 6416 } 6417 rcu_read_unlock(); 6418 6419 if (copy_to_user(arg, &info, sizeof(info))) 6420 return -EFAULT; 6421 6422 return 0; 6423 } 6424 6425 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) 6426 { 6427 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 6428 struct md_rdev *rdev; 6429 dev_t dev = MKDEV(info->major,info->minor); 6430 6431 if (mddev_is_clustered(mddev) && 6432 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) { 6433 pr_warn("%s: Cannot add to clustered mddev.\n", 6434 mdname(mddev)); 6435 return -EINVAL; 6436 } 6437 6438 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 6439 return -EOVERFLOW; 6440 6441 if (!mddev->raid_disks) { 6442 int err; 6443 /* expecting a device which has a superblock */ 6444 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 6445 if (IS_ERR(rdev)) { 6446 pr_warn("md: md_import_device returned %ld\n", 6447 PTR_ERR(rdev)); 6448 return PTR_ERR(rdev); 6449 } 6450 if (!list_empty(&mddev->disks)) { 6451 struct md_rdev *rdev0 6452 = list_entry(mddev->disks.next, 6453 struct md_rdev, same_set); 6454 err = super_types[mddev->major_version] 6455 .load_super(rdev, rdev0, mddev->minor_version); 6456 if (err < 0) { 6457 pr_warn("md: %s has different UUID to %s\n", 6458 bdevname(rdev->bdev,b), 6459 bdevname(rdev0->bdev,b2)); 6460 export_rdev(rdev); 6461 return -EINVAL; 6462 } 6463 } 6464 err = bind_rdev_to_array(rdev, mddev); 6465 if (err) 6466 export_rdev(rdev); 6467 return err; 6468 } 6469 6470 /* 6471 * add_new_disk can be used once the array is assembled 6472 * to add "hot spares". They must already have a superblock 6473 * written 6474 */ 6475 if (mddev->pers) { 6476 int err; 6477 if (!mddev->pers->hot_add_disk) { 6478 pr_warn("%s: personality does not support diskops!\n", 6479 mdname(mddev)); 6480 return -EINVAL; 6481 } 6482 if (mddev->persistent) 6483 rdev = md_import_device(dev, mddev->major_version, 6484 mddev->minor_version); 6485 else 6486 rdev = md_import_device(dev, -1, -1); 6487 if (IS_ERR(rdev)) { 6488 pr_warn("md: md_import_device returned %ld\n", 6489 PTR_ERR(rdev)); 6490 return PTR_ERR(rdev); 6491 } 6492 /* set saved_raid_disk if appropriate */ 6493 if (!mddev->persistent) { 6494 if (info->state & (1<<MD_DISK_SYNC) && 6495 info->raid_disk < mddev->raid_disks) { 6496 rdev->raid_disk = info->raid_disk; 6497 set_bit(In_sync, &rdev->flags); 6498 clear_bit(Bitmap_sync, &rdev->flags); 6499 } else 6500 rdev->raid_disk = -1; 6501 rdev->saved_raid_disk = rdev->raid_disk; 6502 } else 6503 super_types[mddev->major_version]. 6504 validate_super(mddev, rdev); 6505 if ((info->state & (1<<MD_DISK_SYNC)) && 6506 rdev->raid_disk != info->raid_disk) { 6507 /* This was a hot-add request, but events doesn't 6508 * match, so reject it. 6509 */ 6510 export_rdev(rdev); 6511 return -EINVAL; 6512 } 6513 6514 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 6515 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 6516 set_bit(WriteMostly, &rdev->flags); 6517 else 6518 clear_bit(WriteMostly, &rdev->flags); 6519 if (info->state & (1<<MD_DISK_FAILFAST)) 6520 set_bit(FailFast, &rdev->flags); 6521 else 6522 clear_bit(FailFast, &rdev->flags); 6523 6524 if (info->state & (1<<MD_DISK_JOURNAL)) { 6525 struct md_rdev *rdev2; 6526 bool has_journal = false; 6527 6528 /* make sure no existing journal disk */ 6529 rdev_for_each(rdev2, mddev) { 6530 if (test_bit(Journal, &rdev2->flags)) { 6531 has_journal = true; 6532 break; 6533 } 6534 } 6535 if (has_journal || mddev->bitmap) { 6536 export_rdev(rdev); 6537 return -EBUSY; 6538 } 6539 set_bit(Journal, &rdev->flags); 6540 } 6541 /* 6542 * check whether the device shows up in other nodes 6543 */ 6544 if (mddev_is_clustered(mddev)) { 6545 if (info->state & (1 << MD_DISK_CANDIDATE)) 6546 set_bit(Candidate, &rdev->flags); 6547 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) { 6548 /* --add initiated by this node */ 6549 err = md_cluster_ops->add_new_disk(mddev, rdev); 6550 if (err) { 6551 export_rdev(rdev); 6552 return err; 6553 } 6554 } 6555 } 6556 6557 rdev->raid_disk = -1; 6558 err = bind_rdev_to_array(rdev, mddev); 6559 6560 if (err) 6561 export_rdev(rdev); 6562 6563 if (mddev_is_clustered(mddev)) { 6564 if (info->state & (1 << MD_DISK_CANDIDATE)) { 6565 if (!err) { 6566 err = md_cluster_ops->new_disk_ack(mddev, 6567 err == 0); 6568 if (err) 6569 md_kick_rdev_from_array(rdev); 6570 } 6571 } else { 6572 if (err) 6573 md_cluster_ops->add_new_disk_cancel(mddev); 6574 else 6575 err = add_bound_rdev(rdev); 6576 } 6577 6578 } else if (!err) 6579 err = add_bound_rdev(rdev); 6580 6581 return err; 6582 } 6583 6584 /* otherwise, add_new_disk is only allowed 6585 * for major_version==0 superblocks 6586 */ 6587 if (mddev->major_version != 0) { 6588 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev)); 6589 return -EINVAL; 6590 } 6591 6592 if (!(info->state & (1<<MD_DISK_FAULTY))) { 6593 int err; 6594 rdev = md_import_device(dev, -1, 0); 6595 if (IS_ERR(rdev)) { 6596 pr_warn("md: error, md_import_device() returned %ld\n", 6597 PTR_ERR(rdev)); 6598 return PTR_ERR(rdev); 6599 } 6600 rdev->desc_nr = info->number; 6601 if (info->raid_disk < mddev->raid_disks) 6602 rdev->raid_disk = info->raid_disk; 6603 else 6604 rdev->raid_disk = -1; 6605 6606 if (rdev->raid_disk < mddev->raid_disks) 6607 if (info->state & (1<<MD_DISK_SYNC)) 6608 set_bit(In_sync, &rdev->flags); 6609 6610 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 6611 set_bit(WriteMostly, &rdev->flags); 6612 if (info->state & (1<<MD_DISK_FAILFAST)) 6613 set_bit(FailFast, &rdev->flags); 6614 6615 if (!mddev->persistent) { 6616 pr_debug("md: nonpersistent superblock ...\n"); 6617 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 6618 } else 6619 rdev->sb_start = calc_dev_sboffset(rdev); 6620 rdev->sectors = rdev->sb_start; 6621 6622 err = bind_rdev_to_array(rdev, mddev); 6623 if (err) { 6624 export_rdev(rdev); 6625 return err; 6626 } 6627 } 6628 6629 return 0; 6630 } 6631 6632 static int hot_remove_disk(struct mddev *mddev, dev_t dev) 6633 { 6634 char b[BDEVNAME_SIZE]; 6635 struct md_rdev *rdev; 6636 6637 if (!mddev->pers) 6638 return -ENODEV; 6639 6640 rdev = find_rdev(mddev, dev); 6641 if (!rdev) 6642 return -ENXIO; 6643 6644 if (rdev->raid_disk < 0) 6645 goto kick_rdev; 6646 6647 clear_bit(Blocked, &rdev->flags); 6648 remove_and_add_spares(mddev, rdev); 6649 6650 if (rdev->raid_disk >= 0) 6651 goto busy; 6652 6653 kick_rdev: 6654 if (mddev_is_clustered(mddev)) 6655 md_cluster_ops->remove_disk(mddev, rdev); 6656 6657 md_kick_rdev_from_array(rdev); 6658 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6659 if (mddev->thread) 6660 md_wakeup_thread(mddev->thread); 6661 else 6662 md_update_sb(mddev, 1); 6663 md_new_event(mddev); 6664 6665 return 0; 6666 busy: 6667 pr_debug("md: cannot remove active disk %s from %s ...\n", 6668 bdevname(rdev->bdev,b), mdname(mddev)); 6669 return -EBUSY; 6670 } 6671 6672 static int hot_add_disk(struct mddev *mddev, dev_t dev) 6673 { 6674 char b[BDEVNAME_SIZE]; 6675 int err; 6676 struct md_rdev *rdev; 6677 6678 if (!mddev->pers) 6679 return -ENODEV; 6680 6681 if (mddev->major_version != 0) { 6682 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n", 6683 mdname(mddev)); 6684 return -EINVAL; 6685 } 6686 if (!mddev->pers->hot_add_disk) { 6687 pr_warn("%s: personality does not support diskops!\n", 6688 mdname(mddev)); 6689 return -EINVAL; 6690 } 6691 6692 rdev = md_import_device(dev, -1, 0); 6693 if (IS_ERR(rdev)) { 6694 pr_warn("md: error, md_import_device() returned %ld\n", 6695 PTR_ERR(rdev)); 6696 return -EINVAL; 6697 } 6698 6699 if (mddev->persistent) 6700 rdev->sb_start = calc_dev_sboffset(rdev); 6701 else 6702 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 6703 6704 rdev->sectors = rdev->sb_start; 6705 6706 if (test_bit(Faulty, &rdev->flags)) { 6707 pr_warn("md: can not hot-add faulty %s disk to %s!\n", 6708 bdevname(rdev->bdev,b), mdname(mddev)); 6709 err = -EINVAL; 6710 goto abort_export; 6711 } 6712 6713 clear_bit(In_sync, &rdev->flags); 6714 rdev->desc_nr = -1; 6715 rdev->saved_raid_disk = -1; 6716 err = bind_rdev_to_array(rdev, mddev); 6717 if (err) 6718 goto abort_export; 6719 6720 /* 6721 * The rest should better be atomic, we can have disk failures 6722 * noticed in interrupt contexts ... 6723 */ 6724 6725 rdev->raid_disk = -1; 6726 6727 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6728 if (!mddev->thread) 6729 md_update_sb(mddev, 1); 6730 /* 6731 * Kick recovery, maybe this spare has to be added to the 6732 * array immediately. 6733 */ 6734 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6735 md_wakeup_thread(mddev->thread); 6736 md_new_event(mddev); 6737 return 0; 6738 6739 abort_export: 6740 export_rdev(rdev); 6741 return err; 6742 } 6743 6744 static int set_bitmap_file(struct mddev *mddev, int fd) 6745 { 6746 int err = 0; 6747 6748 if (mddev->pers) { 6749 if (!mddev->pers->quiesce || !mddev->thread) 6750 return -EBUSY; 6751 if (mddev->recovery || mddev->sync_thread) 6752 return -EBUSY; 6753 /* we should be able to change the bitmap.. */ 6754 } 6755 6756 if (fd >= 0) { 6757 struct inode *inode; 6758 struct file *f; 6759 6760 if (mddev->bitmap || mddev->bitmap_info.file) 6761 return -EEXIST; /* cannot add when bitmap is present */ 6762 f = fget(fd); 6763 6764 if (f == NULL) { 6765 pr_warn("%s: error: failed to get bitmap file\n", 6766 mdname(mddev)); 6767 return -EBADF; 6768 } 6769 6770 inode = f->f_mapping->host; 6771 if (!S_ISREG(inode->i_mode)) { 6772 pr_warn("%s: error: bitmap file must be a regular file\n", 6773 mdname(mddev)); 6774 err = -EBADF; 6775 } else if (!(f->f_mode & FMODE_WRITE)) { 6776 pr_warn("%s: error: bitmap file must open for write\n", 6777 mdname(mddev)); 6778 err = -EBADF; 6779 } else if (atomic_read(&inode->i_writecount) != 1) { 6780 pr_warn("%s: error: bitmap file is already in use\n", 6781 mdname(mddev)); 6782 err = -EBUSY; 6783 } 6784 if (err) { 6785 fput(f); 6786 return err; 6787 } 6788 mddev->bitmap_info.file = f; 6789 mddev->bitmap_info.offset = 0; /* file overrides offset */ 6790 } else if (mddev->bitmap == NULL) 6791 return -ENOENT; /* cannot remove what isn't there */ 6792 err = 0; 6793 if (mddev->pers) { 6794 if (fd >= 0) { 6795 struct bitmap *bitmap; 6796 6797 bitmap = md_bitmap_create(mddev, -1); 6798 mddev_suspend(mddev); 6799 if (!IS_ERR(bitmap)) { 6800 mddev->bitmap = bitmap; 6801 err = md_bitmap_load(mddev); 6802 } else 6803 err = PTR_ERR(bitmap); 6804 if (err) { 6805 md_bitmap_destroy(mddev); 6806 fd = -1; 6807 } 6808 mddev_resume(mddev); 6809 } else if (fd < 0) { 6810 mddev_suspend(mddev); 6811 md_bitmap_destroy(mddev); 6812 mddev_resume(mddev); 6813 } 6814 } 6815 if (fd < 0) { 6816 struct file *f = mddev->bitmap_info.file; 6817 if (f) { 6818 spin_lock(&mddev->lock); 6819 mddev->bitmap_info.file = NULL; 6820 spin_unlock(&mddev->lock); 6821 fput(f); 6822 } 6823 } 6824 6825 return err; 6826 } 6827 6828 /* 6829 * set_array_info is used two different ways 6830 * The original usage is when creating a new array. 6831 * In this usage, raid_disks is > 0 and it together with 6832 * level, size, not_persistent,layout,chunksize determine the 6833 * shape of the array. 6834 * This will always create an array with a type-0.90.0 superblock. 6835 * The newer usage is when assembling an array. 6836 * In this case raid_disks will be 0, and the major_version field is 6837 * use to determine which style super-blocks are to be found on the devices. 6838 * The minor and patch _version numbers are also kept incase the 6839 * super_block handler wishes to interpret them. 6840 */ 6841 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) 6842 { 6843 6844 if (info->raid_disks == 0) { 6845 /* just setting version number for superblock loading */ 6846 if (info->major_version < 0 || 6847 info->major_version >= ARRAY_SIZE(super_types) || 6848 super_types[info->major_version].name == NULL) { 6849 /* maybe try to auto-load a module? */ 6850 pr_warn("md: superblock version %d not known\n", 6851 info->major_version); 6852 return -EINVAL; 6853 } 6854 mddev->major_version = info->major_version; 6855 mddev->minor_version = info->minor_version; 6856 mddev->patch_version = info->patch_version; 6857 mddev->persistent = !info->not_persistent; 6858 /* ensure mddev_put doesn't delete this now that there 6859 * is some minimal configuration. 6860 */ 6861 mddev->ctime = ktime_get_real_seconds(); 6862 return 0; 6863 } 6864 mddev->major_version = MD_MAJOR_VERSION; 6865 mddev->minor_version = MD_MINOR_VERSION; 6866 mddev->patch_version = MD_PATCHLEVEL_VERSION; 6867 mddev->ctime = ktime_get_real_seconds(); 6868 6869 mddev->level = info->level; 6870 mddev->clevel[0] = 0; 6871 mddev->dev_sectors = 2 * (sector_t)info->size; 6872 mddev->raid_disks = info->raid_disks; 6873 /* don't set md_minor, it is determined by which /dev/md* was 6874 * openned 6875 */ 6876 if (info->state & (1<<MD_SB_CLEAN)) 6877 mddev->recovery_cp = MaxSector; 6878 else 6879 mddev->recovery_cp = 0; 6880 mddev->persistent = ! info->not_persistent; 6881 mddev->external = 0; 6882 6883 mddev->layout = info->layout; 6884 mddev->chunk_sectors = info->chunk_size >> 9; 6885 6886 if (mddev->persistent) { 6887 mddev->max_disks = MD_SB_DISKS; 6888 mddev->flags = 0; 6889 mddev->sb_flags = 0; 6890 } 6891 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6892 6893 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 6894 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 6895 mddev->bitmap_info.offset = 0; 6896 6897 mddev->reshape_position = MaxSector; 6898 6899 /* 6900 * Generate a 128 bit UUID 6901 */ 6902 get_random_bytes(mddev->uuid, 16); 6903 6904 mddev->new_level = mddev->level; 6905 mddev->new_chunk_sectors = mddev->chunk_sectors; 6906 mddev->new_layout = mddev->layout; 6907 mddev->delta_disks = 0; 6908 mddev->reshape_backwards = 0; 6909 6910 return 0; 6911 } 6912 6913 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) 6914 { 6915 lockdep_assert_held(&mddev->reconfig_mutex); 6916 6917 if (mddev->external_size) 6918 return; 6919 6920 mddev->array_sectors = array_sectors; 6921 } 6922 EXPORT_SYMBOL(md_set_array_sectors); 6923 6924 static int update_size(struct mddev *mddev, sector_t num_sectors) 6925 { 6926 struct md_rdev *rdev; 6927 int rv; 6928 int fit = (num_sectors == 0); 6929 sector_t old_dev_sectors = mddev->dev_sectors; 6930 6931 if (mddev->pers->resize == NULL) 6932 return -EINVAL; 6933 /* The "num_sectors" is the number of sectors of each device that 6934 * is used. This can only make sense for arrays with redundancy. 6935 * linear and raid0 always use whatever space is available. We can only 6936 * consider changing this number if no resync or reconstruction is 6937 * happening, and if the new size is acceptable. It must fit before the 6938 * sb_start or, if that is <data_offset, it must fit before the size 6939 * of each device. If num_sectors is zero, we find the largest size 6940 * that fits. 6941 */ 6942 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 6943 mddev->sync_thread) 6944 return -EBUSY; 6945 if (mddev->ro) 6946 return -EROFS; 6947 6948 rdev_for_each(rdev, mddev) { 6949 sector_t avail = rdev->sectors; 6950 6951 if (fit && (num_sectors == 0 || num_sectors > avail)) 6952 num_sectors = avail; 6953 if (avail < num_sectors) 6954 return -ENOSPC; 6955 } 6956 rv = mddev->pers->resize(mddev, num_sectors); 6957 if (!rv) { 6958 if (mddev_is_clustered(mddev)) 6959 md_cluster_ops->update_size(mddev, old_dev_sectors); 6960 else if (mddev->queue) { 6961 set_capacity(mddev->gendisk, mddev->array_sectors); 6962 revalidate_disk(mddev->gendisk); 6963 } 6964 } 6965 return rv; 6966 } 6967 6968 static int update_raid_disks(struct mddev *mddev, int raid_disks) 6969 { 6970 int rv; 6971 struct md_rdev *rdev; 6972 /* change the number of raid disks */ 6973 if (mddev->pers->check_reshape == NULL) 6974 return -EINVAL; 6975 if (mddev->ro) 6976 return -EROFS; 6977 if (raid_disks <= 0 || 6978 (mddev->max_disks && raid_disks >= mddev->max_disks)) 6979 return -EINVAL; 6980 if (mddev->sync_thread || 6981 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 6982 mddev->reshape_position != MaxSector) 6983 return -EBUSY; 6984 6985 rdev_for_each(rdev, mddev) { 6986 if (mddev->raid_disks < raid_disks && 6987 rdev->data_offset < rdev->new_data_offset) 6988 return -EINVAL; 6989 if (mddev->raid_disks > raid_disks && 6990 rdev->data_offset > rdev->new_data_offset) 6991 return -EINVAL; 6992 } 6993 6994 mddev->delta_disks = raid_disks - mddev->raid_disks; 6995 if (mddev->delta_disks < 0) 6996 mddev->reshape_backwards = 1; 6997 else if (mddev->delta_disks > 0) 6998 mddev->reshape_backwards = 0; 6999 7000 rv = mddev->pers->check_reshape(mddev); 7001 if (rv < 0) { 7002 mddev->delta_disks = 0; 7003 mddev->reshape_backwards = 0; 7004 } 7005 return rv; 7006 } 7007 7008 /* 7009 * update_array_info is used to change the configuration of an 7010 * on-line array. 7011 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 7012 * fields in the info are checked against the array. 7013 * Any differences that cannot be handled will cause an error. 7014 * Normally, only one change can be managed at a time. 7015 */ 7016 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) 7017 { 7018 int rv = 0; 7019 int cnt = 0; 7020 int state = 0; 7021 7022 /* calculate expected state,ignoring low bits */ 7023 if (mddev->bitmap && mddev->bitmap_info.offset) 7024 state |= (1 << MD_SB_BITMAP_PRESENT); 7025 7026 if (mddev->major_version != info->major_version || 7027 mddev->minor_version != info->minor_version || 7028 /* mddev->patch_version != info->patch_version || */ 7029 mddev->ctime != info->ctime || 7030 mddev->level != info->level || 7031 /* mddev->layout != info->layout || */ 7032 mddev->persistent != !info->not_persistent || 7033 mddev->chunk_sectors != info->chunk_size >> 9 || 7034 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 7035 ((state^info->state) & 0xfffffe00) 7036 ) 7037 return -EINVAL; 7038 /* Check there is only one change */ 7039 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 7040 cnt++; 7041 if (mddev->raid_disks != info->raid_disks) 7042 cnt++; 7043 if (mddev->layout != info->layout) 7044 cnt++; 7045 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 7046 cnt++; 7047 if (cnt == 0) 7048 return 0; 7049 if (cnt > 1) 7050 return -EINVAL; 7051 7052 if (mddev->layout != info->layout) { 7053 /* Change layout 7054 * we don't need to do anything at the md level, the 7055 * personality will take care of it all. 7056 */ 7057 if (mddev->pers->check_reshape == NULL) 7058 return -EINVAL; 7059 else { 7060 mddev->new_layout = info->layout; 7061 rv = mddev->pers->check_reshape(mddev); 7062 if (rv) 7063 mddev->new_layout = mddev->layout; 7064 return rv; 7065 } 7066 } 7067 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 7068 rv = update_size(mddev, (sector_t)info->size * 2); 7069 7070 if (mddev->raid_disks != info->raid_disks) 7071 rv = update_raid_disks(mddev, info->raid_disks); 7072 7073 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 7074 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { 7075 rv = -EINVAL; 7076 goto err; 7077 } 7078 if (mddev->recovery || mddev->sync_thread) { 7079 rv = -EBUSY; 7080 goto err; 7081 } 7082 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 7083 struct bitmap *bitmap; 7084 /* add the bitmap */ 7085 if (mddev->bitmap) { 7086 rv = -EEXIST; 7087 goto err; 7088 } 7089 if (mddev->bitmap_info.default_offset == 0) { 7090 rv = -EINVAL; 7091 goto err; 7092 } 7093 mddev->bitmap_info.offset = 7094 mddev->bitmap_info.default_offset; 7095 mddev->bitmap_info.space = 7096 mddev->bitmap_info.default_space; 7097 bitmap = md_bitmap_create(mddev, -1); 7098 mddev_suspend(mddev); 7099 if (!IS_ERR(bitmap)) { 7100 mddev->bitmap = bitmap; 7101 rv = md_bitmap_load(mddev); 7102 } else 7103 rv = PTR_ERR(bitmap); 7104 if (rv) 7105 md_bitmap_destroy(mddev); 7106 mddev_resume(mddev); 7107 } else { 7108 /* remove the bitmap */ 7109 if (!mddev->bitmap) { 7110 rv = -ENOENT; 7111 goto err; 7112 } 7113 if (mddev->bitmap->storage.file) { 7114 rv = -EINVAL; 7115 goto err; 7116 } 7117 if (mddev->bitmap_info.nodes) { 7118 /* hold PW on all the bitmap lock */ 7119 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) { 7120 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n"); 7121 rv = -EPERM; 7122 md_cluster_ops->unlock_all_bitmaps(mddev); 7123 goto err; 7124 } 7125 7126 mddev->bitmap_info.nodes = 0; 7127 md_cluster_ops->leave(mddev); 7128 } 7129 mddev_suspend(mddev); 7130 md_bitmap_destroy(mddev); 7131 mddev_resume(mddev); 7132 mddev->bitmap_info.offset = 0; 7133 } 7134 } 7135 md_update_sb(mddev, 1); 7136 return rv; 7137 err: 7138 return rv; 7139 } 7140 7141 static int set_disk_faulty(struct mddev *mddev, dev_t dev) 7142 { 7143 struct md_rdev *rdev; 7144 int err = 0; 7145 7146 if (mddev->pers == NULL) 7147 return -ENODEV; 7148 7149 rcu_read_lock(); 7150 rdev = md_find_rdev_rcu(mddev, dev); 7151 if (!rdev) 7152 err = -ENODEV; 7153 else { 7154 md_error(mddev, rdev); 7155 if (!test_bit(Faulty, &rdev->flags)) 7156 err = -EBUSY; 7157 } 7158 rcu_read_unlock(); 7159 return err; 7160 } 7161 7162 /* 7163 * We have a problem here : there is no easy way to give a CHS 7164 * virtual geometry. We currently pretend that we have a 2 heads 7165 * 4 sectors (with a BIG number of cylinders...). This drives 7166 * dosfs just mad... ;-) 7167 */ 7168 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 7169 { 7170 struct mddev *mddev = bdev->bd_disk->private_data; 7171 7172 geo->heads = 2; 7173 geo->sectors = 4; 7174 geo->cylinders = mddev->array_sectors / 8; 7175 return 0; 7176 } 7177 7178 static inline bool md_ioctl_valid(unsigned int cmd) 7179 { 7180 switch (cmd) { 7181 case ADD_NEW_DISK: 7182 case BLKROSET: 7183 case GET_ARRAY_INFO: 7184 case GET_BITMAP_FILE: 7185 case GET_DISK_INFO: 7186 case HOT_ADD_DISK: 7187 case HOT_REMOVE_DISK: 7188 case RAID_AUTORUN: 7189 case RAID_VERSION: 7190 case RESTART_ARRAY_RW: 7191 case RUN_ARRAY: 7192 case SET_ARRAY_INFO: 7193 case SET_BITMAP_FILE: 7194 case SET_DISK_FAULTY: 7195 case STOP_ARRAY: 7196 case STOP_ARRAY_RO: 7197 case CLUSTERED_DISK_NACK: 7198 return true; 7199 default: 7200 return false; 7201 } 7202 } 7203 7204 static int md_ioctl(struct block_device *bdev, fmode_t mode, 7205 unsigned int cmd, unsigned long arg) 7206 { 7207 int err = 0; 7208 void __user *argp = (void __user *)arg; 7209 struct mddev *mddev = NULL; 7210 int ro; 7211 bool did_set_md_closing = false; 7212 7213 if (!md_ioctl_valid(cmd)) 7214 return -ENOTTY; 7215 7216 switch (cmd) { 7217 case RAID_VERSION: 7218 case GET_ARRAY_INFO: 7219 case GET_DISK_INFO: 7220 break; 7221 default: 7222 if (!capable(CAP_SYS_ADMIN)) 7223 return -EACCES; 7224 } 7225 7226 /* 7227 * Commands dealing with the RAID driver but not any 7228 * particular array: 7229 */ 7230 switch (cmd) { 7231 case RAID_VERSION: 7232 err = get_version(argp); 7233 goto out; 7234 7235 #ifndef MODULE 7236 case RAID_AUTORUN: 7237 err = 0; 7238 autostart_arrays(arg); 7239 goto out; 7240 #endif 7241 default:; 7242 } 7243 7244 /* 7245 * Commands creating/starting a new array: 7246 */ 7247 7248 mddev = bdev->bd_disk->private_data; 7249 7250 if (!mddev) { 7251 BUG(); 7252 goto out; 7253 } 7254 7255 /* Some actions do not requires the mutex */ 7256 switch (cmd) { 7257 case GET_ARRAY_INFO: 7258 if (!mddev->raid_disks && !mddev->external) 7259 err = -ENODEV; 7260 else 7261 err = get_array_info(mddev, argp); 7262 goto out; 7263 7264 case GET_DISK_INFO: 7265 if (!mddev->raid_disks && !mddev->external) 7266 err = -ENODEV; 7267 else 7268 err = get_disk_info(mddev, argp); 7269 goto out; 7270 7271 case SET_DISK_FAULTY: 7272 err = set_disk_faulty(mddev, new_decode_dev(arg)); 7273 goto out; 7274 7275 case GET_BITMAP_FILE: 7276 err = get_bitmap_file(mddev, argp); 7277 goto out; 7278 7279 } 7280 7281 if (cmd == ADD_NEW_DISK) 7282 /* need to ensure md_delayed_delete() has completed */ 7283 flush_workqueue(md_misc_wq); 7284 7285 if (cmd == HOT_REMOVE_DISK) 7286 /* need to ensure recovery thread has run */ 7287 wait_event_interruptible_timeout(mddev->sb_wait, 7288 !test_bit(MD_RECOVERY_NEEDED, 7289 &mddev->recovery), 7290 msecs_to_jiffies(5000)); 7291 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { 7292 /* Need to flush page cache, and ensure no-one else opens 7293 * and writes 7294 */ 7295 mutex_lock(&mddev->open_mutex); 7296 if (mddev->pers && atomic_read(&mddev->openers) > 1) { 7297 mutex_unlock(&mddev->open_mutex); 7298 err = -EBUSY; 7299 goto out; 7300 } 7301 WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags)); 7302 set_bit(MD_CLOSING, &mddev->flags); 7303 did_set_md_closing = true; 7304 mutex_unlock(&mddev->open_mutex); 7305 sync_blockdev(bdev); 7306 } 7307 err = mddev_lock(mddev); 7308 if (err) { 7309 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n", 7310 err, cmd); 7311 goto out; 7312 } 7313 7314 if (cmd == SET_ARRAY_INFO) { 7315 mdu_array_info_t info; 7316 if (!arg) 7317 memset(&info, 0, sizeof(info)); 7318 else if (copy_from_user(&info, argp, sizeof(info))) { 7319 err = -EFAULT; 7320 goto unlock; 7321 } 7322 if (mddev->pers) { 7323 err = update_array_info(mddev, &info); 7324 if (err) { 7325 pr_warn("md: couldn't update array info. %d\n", err); 7326 goto unlock; 7327 } 7328 goto unlock; 7329 } 7330 if (!list_empty(&mddev->disks)) { 7331 pr_warn("md: array %s already has disks!\n", mdname(mddev)); 7332 err = -EBUSY; 7333 goto unlock; 7334 } 7335 if (mddev->raid_disks) { 7336 pr_warn("md: array %s already initialised!\n", mdname(mddev)); 7337 err = -EBUSY; 7338 goto unlock; 7339 } 7340 err = set_array_info(mddev, &info); 7341 if (err) { 7342 pr_warn("md: couldn't set array info. %d\n", err); 7343 goto unlock; 7344 } 7345 goto unlock; 7346 } 7347 7348 /* 7349 * Commands querying/configuring an existing array: 7350 */ 7351 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 7352 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 7353 if ((!mddev->raid_disks && !mddev->external) 7354 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 7355 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 7356 && cmd != GET_BITMAP_FILE) { 7357 err = -ENODEV; 7358 goto unlock; 7359 } 7360 7361 /* 7362 * Commands even a read-only array can execute: 7363 */ 7364 switch (cmd) { 7365 case RESTART_ARRAY_RW: 7366 err = restart_array(mddev); 7367 goto unlock; 7368 7369 case STOP_ARRAY: 7370 err = do_md_stop(mddev, 0, bdev); 7371 goto unlock; 7372 7373 case STOP_ARRAY_RO: 7374 err = md_set_readonly(mddev, bdev); 7375 goto unlock; 7376 7377 case HOT_REMOVE_DISK: 7378 err = hot_remove_disk(mddev, new_decode_dev(arg)); 7379 goto unlock; 7380 7381 case ADD_NEW_DISK: 7382 /* We can support ADD_NEW_DISK on read-only arrays 7383 * only if we are re-adding a preexisting device. 7384 * So require mddev->pers and MD_DISK_SYNC. 7385 */ 7386 if (mddev->pers) { 7387 mdu_disk_info_t info; 7388 if (copy_from_user(&info, argp, sizeof(info))) 7389 err = -EFAULT; 7390 else if (!(info.state & (1<<MD_DISK_SYNC))) 7391 /* Need to clear read-only for this */ 7392 break; 7393 else 7394 err = add_new_disk(mddev, &info); 7395 goto unlock; 7396 } 7397 break; 7398 7399 case BLKROSET: 7400 if (get_user(ro, (int __user *)(arg))) { 7401 err = -EFAULT; 7402 goto unlock; 7403 } 7404 err = -EINVAL; 7405 7406 /* if the bdev is going readonly the value of mddev->ro 7407 * does not matter, no writes are coming 7408 */ 7409 if (ro) 7410 goto unlock; 7411 7412 /* are we are already prepared for writes? */ 7413 if (mddev->ro != 1) 7414 goto unlock; 7415 7416 /* transitioning to readauto need only happen for 7417 * arrays that call md_write_start 7418 */ 7419 if (mddev->pers) { 7420 err = restart_array(mddev); 7421 if (err == 0) { 7422 mddev->ro = 2; 7423 set_disk_ro(mddev->gendisk, 0); 7424 } 7425 } 7426 goto unlock; 7427 } 7428 7429 /* 7430 * The remaining ioctls are changing the state of the 7431 * superblock, so we do not allow them on read-only arrays. 7432 */ 7433 if (mddev->ro && mddev->pers) { 7434 if (mddev->ro == 2) { 7435 mddev->ro = 0; 7436 sysfs_notify_dirent_safe(mddev->sysfs_state); 7437 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7438 /* mddev_unlock will wake thread */ 7439 /* If a device failed while we were read-only, we 7440 * need to make sure the metadata is updated now. 7441 */ 7442 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { 7443 mddev_unlock(mddev); 7444 wait_event(mddev->sb_wait, 7445 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && 7446 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 7447 mddev_lock_nointr(mddev); 7448 } 7449 } else { 7450 err = -EROFS; 7451 goto unlock; 7452 } 7453 } 7454 7455 switch (cmd) { 7456 case ADD_NEW_DISK: 7457 { 7458 mdu_disk_info_t info; 7459 if (copy_from_user(&info, argp, sizeof(info))) 7460 err = -EFAULT; 7461 else 7462 err = add_new_disk(mddev, &info); 7463 goto unlock; 7464 } 7465 7466 case CLUSTERED_DISK_NACK: 7467 if (mddev_is_clustered(mddev)) 7468 md_cluster_ops->new_disk_ack(mddev, false); 7469 else 7470 err = -EINVAL; 7471 goto unlock; 7472 7473 case HOT_ADD_DISK: 7474 err = hot_add_disk(mddev, new_decode_dev(arg)); 7475 goto unlock; 7476 7477 case RUN_ARRAY: 7478 err = do_md_run(mddev); 7479 goto unlock; 7480 7481 case SET_BITMAP_FILE: 7482 err = set_bitmap_file(mddev, (int)arg); 7483 goto unlock; 7484 7485 default: 7486 err = -EINVAL; 7487 goto unlock; 7488 } 7489 7490 unlock: 7491 if (mddev->hold_active == UNTIL_IOCTL && 7492 err != -EINVAL) 7493 mddev->hold_active = 0; 7494 mddev_unlock(mddev); 7495 out: 7496 if(did_set_md_closing) 7497 clear_bit(MD_CLOSING, &mddev->flags); 7498 return err; 7499 } 7500 #ifdef CONFIG_COMPAT 7501 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, 7502 unsigned int cmd, unsigned long arg) 7503 { 7504 switch (cmd) { 7505 case HOT_REMOVE_DISK: 7506 case HOT_ADD_DISK: 7507 case SET_DISK_FAULTY: 7508 case SET_BITMAP_FILE: 7509 /* These take in integer arg, do not convert */ 7510 break; 7511 default: 7512 arg = (unsigned long)compat_ptr(arg); 7513 break; 7514 } 7515 7516 return md_ioctl(bdev, mode, cmd, arg); 7517 } 7518 #endif /* CONFIG_COMPAT */ 7519 7520 static int md_open(struct block_device *bdev, fmode_t mode) 7521 { 7522 /* 7523 * Succeed if we can lock the mddev, which confirms that 7524 * it isn't being stopped right now. 7525 */ 7526 struct mddev *mddev = mddev_find(bdev->bd_dev); 7527 int err; 7528 7529 if (!mddev) 7530 return -ENODEV; 7531 7532 if (mddev->gendisk != bdev->bd_disk) { 7533 /* we are racing with mddev_put which is discarding this 7534 * bd_disk. 7535 */ 7536 mddev_put(mddev); 7537 /* Wait until bdev->bd_disk is definitely gone */ 7538 flush_workqueue(md_misc_wq); 7539 /* Then retry the open from the top */ 7540 return -ERESTARTSYS; 7541 } 7542 BUG_ON(mddev != bdev->bd_disk->private_data); 7543 7544 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) 7545 goto out; 7546 7547 if (test_bit(MD_CLOSING, &mddev->flags)) { 7548 mutex_unlock(&mddev->open_mutex); 7549 err = -ENODEV; 7550 goto out; 7551 } 7552 7553 err = 0; 7554 atomic_inc(&mddev->openers); 7555 mutex_unlock(&mddev->open_mutex); 7556 7557 check_disk_change(bdev); 7558 out: 7559 if (err) 7560 mddev_put(mddev); 7561 return err; 7562 } 7563 7564 static void md_release(struct gendisk *disk, fmode_t mode) 7565 { 7566 struct mddev *mddev = disk->private_data; 7567 7568 BUG_ON(!mddev); 7569 atomic_dec(&mddev->openers); 7570 mddev_put(mddev); 7571 } 7572 7573 static int md_media_changed(struct gendisk *disk) 7574 { 7575 struct mddev *mddev = disk->private_data; 7576 7577 return mddev->changed; 7578 } 7579 7580 static int md_revalidate(struct gendisk *disk) 7581 { 7582 struct mddev *mddev = disk->private_data; 7583 7584 mddev->changed = 0; 7585 return 0; 7586 } 7587 static const struct block_device_operations md_fops = 7588 { 7589 .owner = THIS_MODULE, 7590 .open = md_open, 7591 .release = md_release, 7592 .ioctl = md_ioctl, 7593 #ifdef CONFIG_COMPAT 7594 .compat_ioctl = md_compat_ioctl, 7595 #endif 7596 .getgeo = md_getgeo, 7597 .media_changed = md_media_changed, 7598 .revalidate_disk= md_revalidate, 7599 }; 7600 7601 static int md_thread(void *arg) 7602 { 7603 struct md_thread *thread = arg; 7604 7605 /* 7606 * md_thread is a 'system-thread', it's priority should be very 7607 * high. We avoid resource deadlocks individually in each 7608 * raid personality. (RAID5 does preallocation) We also use RR and 7609 * the very same RT priority as kswapd, thus we will never get 7610 * into a priority inversion deadlock. 7611 * 7612 * we definitely have to have equal or higher priority than 7613 * bdflush, otherwise bdflush will deadlock if there are too 7614 * many dirty RAID5 blocks. 7615 */ 7616 7617 allow_signal(SIGKILL); 7618 while (!kthread_should_stop()) { 7619 7620 /* We need to wait INTERRUPTIBLE so that 7621 * we don't add to the load-average. 7622 * That means we need to be sure no signals are 7623 * pending 7624 */ 7625 if (signal_pending(current)) 7626 flush_signals(current); 7627 7628 wait_event_interruptible_timeout 7629 (thread->wqueue, 7630 test_bit(THREAD_WAKEUP, &thread->flags) 7631 || kthread_should_stop() || kthread_should_park(), 7632 thread->timeout); 7633 7634 clear_bit(THREAD_WAKEUP, &thread->flags); 7635 if (kthread_should_park()) 7636 kthread_parkme(); 7637 if (!kthread_should_stop()) 7638 thread->run(thread); 7639 } 7640 7641 return 0; 7642 } 7643 7644 void md_wakeup_thread(struct md_thread *thread) 7645 { 7646 if (thread) { 7647 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); 7648 set_bit(THREAD_WAKEUP, &thread->flags); 7649 wake_up(&thread->wqueue); 7650 } 7651 } 7652 EXPORT_SYMBOL(md_wakeup_thread); 7653 7654 struct md_thread *md_register_thread(void (*run) (struct md_thread *), 7655 struct mddev *mddev, const char *name) 7656 { 7657 struct md_thread *thread; 7658 7659 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL); 7660 if (!thread) 7661 return NULL; 7662 7663 init_waitqueue_head(&thread->wqueue); 7664 7665 thread->run = run; 7666 thread->mddev = mddev; 7667 thread->timeout = MAX_SCHEDULE_TIMEOUT; 7668 thread->tsk = kthread_run(md_thread, thread, 7669 "%s_%s", 7670 mdname(thread->mddev), 7671 name); 7672 if (IS_ERR(thread->tsk)) { 7673 kfree(thread); 7674 return NULL; 7675 } 7676 return thread; 7677 } 7678 EXPORT_SYMBOL(md_register_thread); 7679 7680 void md_unregister_thread(struct md_thread **threadp) 7681 { 7682 struct md_thread *thread = *threadp; 7683 if (!thread) 7684 return; 7685 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 7686 /* Locking ensures that mddev_unlock does not wake_up a 7687 * non-existent thread 7688 */ 7689 spin_lock(&pers_lock); 7690 *threadp = NULL; 7691 spin_unlock(&pers_lock); 7692 7693 kthread_stop(thread->tsk); 7694 kfree(thread); 7695 } 7696 EXPORT_SYMBOL(md_unregister_thread); 7697 7698 void md_error(struct mddev *mddev, struct md_rdev *rdev) 7699 { 7700 if (!rdev || test_bit(Faulty, &rdev->flags)) 7701 return; 7702 7703 if (!mddev->pers || !mddev->pers->error_handler) 7704 return; 7705 mddev->pers->error_handler(mddev,rdev); 7706 if (mddev->degraded) 7707 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7708 sysfs_notify_dirent_safe(rdev->sysfs_state); 7709 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7710 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7711 md_wakeup_thread(mddev->thread); 7712 if (mddev->event_work.func) 7713 queue_work(md_misc_wq, &mddev->event_work); 7714 md_new_event(mddev); 7715 } 7716 EXPORT_SYMBOL(md_error); 7717 7718 /* seq_file implementation /proc/mdstat */ 7719 7720 static void status_unused(struct seq_file *seq) 7721 { 7722 int i = 0; 7723 struct md_rdev *rdev; 7724 7725 seq_printf(seq, "unused devices: "); 7726 7727 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 7728 char b[BDEVNAME_SIZE]; 7729 i++; 7730 seq_printf(seq, "%s ", 7731 bdevname(rdev->bdev,b)); 7732 } 7733 if (!i) 7734 seq_printf(seq, "<none>"); 7735 7736 seq_printf(seq, "\n"); 7737 } 7738 7739 static int status_resync(struct seq_file *seq, struct mddev *mddev) 7740 { 7741 sector_t max_sectors, resync, res; 7742 unsigned long dt, db = 0; 7743 sector_t rt, curr_mark_cnt, resync_mark_cnt; 7744 int scale, recovery_active; 7745 unsigned int per_milli; 7746 7747 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 7748 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7749 max_sectors = mddev->resync_max_sectors; 7750 else 7751 max_sectors = mddev->dev_sectors; 7752 7753 resync = mddev->curr_resync; 7754 if (resync <= 3) { 7755 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7756 /* Still cleaning up */ 7757 resync = max_sectors; 7758 } else if (resync > max_sectors) 7759 resync = max_sectors; 7760 else 7761 resync -= atomic_read(&mddev->recovery_active); 7762 7763 if (resync == 0) { 7764 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { 7765 struct md_rdev *rdev; 7766 7767 rdev_for_each(rdev, mddev) 7768 if (rdev->raid_disk >= 0 && 7769 !test_bit(Faulty, &rdev->flags) && 7770 rdev->recovery_offset != MaxSector && 7771 rdev->recovery_offset) { 7772 seq_printf(seq, "\trecover=REMOTE"); 7773 return 1; 7774 } 7775 if (mddev->reshape_position != MaxSector) 7776 seq_printf(seq, "\treshape=REMOTE"); 7777 else 7778 seq_printf(seq, "\tresync=REMOTE"); 7779 return 1; 7780 } 7781 if (mddev->recovery_cp < MaxSector) { 7782 seq_printf(seq, "\tresync=PENDING"); 7783 return 1; 7784 } 7785 return 0; 7786 } 7787 if (resync < 3) { 7788 seq_printf(seq, "\tresync=DELAYED"); 7789 return 1; 7790 } 7791 7792 WARN_ON(max_sectors == 0); 7793 /* Pick 'scale' such that (resync>>scale)*1000 will fit 7794 * in a sector_t, and (max_sectors>>scale) will fit in a 7795 * u32, as those are the requirements for sector_div. 7796 * Thus 'scale' must be at least 10 7797 */ 7798 scale = 10; 7799 if (sizeof(sector_t) > sizeof(unsigned long)) { 7800 while ( max_sectors/2 > (1ULL<<(scale+32))) 7801 scale++; 7802 } 7803 res = (resync>>scale)*1000; 7804 sector_div(res, (u32)((max_sectors>>scale)+1)); 7805 7806 per_milli = res; 7807 { 7808 int i, x = per_milli/50, y = 20-x; 7809 seq_printf(seq, "["); 7810 for (i = 0; i < x; i++) 7811 seq_printf(seq, "="); 7812 seq_printf(seq, ">"); 7813 for (i = 0; i < y; i++) 7814 seq_printf(seq, "."); 7815 seq_printf(seq, "] "); 7816 } 7817 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 7818 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 7819 "reshape" : 7820 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 7821 "check" : 7822 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 7823 "resync" : "recovery"))), 7824 per_milli/10, per_milli % 10, 7825 (unsigned long long) resync/2, 7826 (unsigned long long) max_sectors/2); 7827 7828 /* 7829 * dt: time from mark until now 7830 * db: blocks written from mark until now 7831 * rt: remaining time 7832 * 7833 * rt is a sector_t, which is always 64bit now. We are keeping 7834 * the original algorithm, but it is not really necessary. 7835 * 7836 * Original algorithm: 7837 * So we divide before multiply in case it is 32bit and close 7838 * to the limit. 7839 * We scale the divisor (db) by 32 to avoid losing precision 7840 * near the end of resync when the number of remaining sectors 7841 * is close to 'db'. 7842 * We then divide rt by 32 after multiplying by db to compensate. 7843 * The '+1' avoids division by zero if db is very small. 7844 */ 7845 dt = ((jiffies - mddev->resync_mark) / HZ); 7846 if (!dt) dt++; 7847 7848 curr_mark_cnt = mddev->curr_mark_cnt; 7849 recovery_active = atomic_read(&mddev->recovery_active); 7850 resync_mark_cnt = mddev->resync_mark_cnt; 7851 7852 if (curr_mark_cnt >= (recovery_active + resync_mark_cnt)) 7853 db = curr_mark_cnt - (recovery_active + resync_mark_cnt); 7854 7855 rt = max_sectors - resync; /* number of remaining sectors */ 7856 rt = div64_u64(rt, db/32+1); 7857 rt *= dt; 7858 rt >>= 5; 7859 7860 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 7861 ((unsigned long)rt % 60)/6); 7862 7863 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 7864 return 1; 7865 } 7866 7867 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 7868 { 7869 struct list_head *tmp; 7870 loff_t l = *pos; 7871 struct mddev *mddev; 7872 7873 if (l >= 0x10000) 7874 return NULL; 7875 if (!l--) 7876 /* header */ 7877 return (void*)1; 7878 7879 spin_lock(&all_mddevs_lock); 7880 list_for_each(tmp,&all_mddevs) 7881 if (!l--) { 7882 mddev = list_entry(tmp, struct mddev, all_mddevs); 7883 mddev_get(mddev); 7884 spin_unlock(&all_mddevs_lock); 7885 return mddev; 7886 } 7887 spin_unlock(&all_mddevs_lock); 7888 if (!l--) 7889 return (void*)2;/* tail */ 7890 return NULL; 7891 } 7892 7893 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 7894 { 7895 struct list_head *tmp; 7896 struct mddev *next_mddev, *mddev = v; 7897 7898 ++*pos; 7899 if (v == (void*)2) 7900 return NULL; 7901 7902 spin_lock(&all_mddevs_lock); 7903 if (v == (void*)1) 7904 tmp = all_mddevs.next; 7905 else 7906 tmp = mddev->all_mddevs.next; 7907 if (tmp != &all_mddevs) 7908 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs)); 7909 else { 7910 next_mddev = (void*)2; 7911 *pos = 0x10000; 7912 } 7913 spin_unlock(&all_mddevs_lock); 7914 7915 if (v != (void*)1) 7916 mddev_put(mddev); 7917 return next_mddev; 7918 7919 } 7920 7921 static void md_seq_stop(struct seq_file *seq, void *v) 7922 { 7923 struct mddev *mddev = v; 7924 7925 if (mddev && v != (void*)1 && v != (void*)2) 7926 mddev_put(mddev); 7927 } 7928 7929 static int md_seq_show(struct seq_file *seq, void *v) 7930 { 7931 struct mddev *mddev = v; 7932 sector_t sectors; 7933 struct md_rdev *rdev; 7934 7935 if (v == (void*)1) { 7936 struct md_personality *pers; 7937 seq_printf(seq, "Personalities : "); 7938 spin_lock(&pers_lock); 7939 list_for_each_entry(pers, &pers_list, list) 7940 seq_printf(seq, "[%s] ", pers->name); 7941 7942 spin_unlock(&pers_lock); 7943 seq_printf(seq, "\n"); 7944 seq->poll_event = atomic_read(&md_event_count); 7945 return 0; 7946 } 7947 if (v == (void*)2) { 7948 status_unused(seq); 7949 return 0; 7950 } 7951 7952 spin_lock(&mddev->lock); 7953 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 7954 seq_printf(seq, "%s : %sactive", mdname(mddev), 7955 mddev->pers ? "" : "in"); 7956 if (mddev->pers) { 7957 if (mddev->ro==1) 7958 seq_printf(seq, " (read-only)"); 7959 if (mddev->ro==2) 7960 seq_printf(seq, " (auto-read-only)"); 7961 seq_printf(seq, " %s", mddev->pers->name); 7962 } 7963 7964 sectors = 0; 7965 rcu_read_lock(); 7966 rdev_for_each_rcu(rdev, mddev) { 7967 char b[BDEVNAME_SIZE]; 7968 seq_printf(seq, " %s[%d]", 7969 bdevname(rdev->bdev,b), rdev->desc_nr); 7970 if (test_bit(WriteMostly, &rdev->flags)) 7971 seq_printf(seq, "(W)"); 7972 if (test_bit(Journal, &rdev->flags)) 7973 seq_printf(seq, "(J)"); 7974 if (test_bit(Faulty, &rdev->flags)) { 7975 seq_printf(seq, "(F)"); 7976 continue; 7977 } 7978 if (rdev->raid_disk < 0) 7979 seq_printf(seq, "(S)"); /* spare */ 7980 if (test_bit(Replacement, &rdev->flags)) 7981 seq_printf(seq, "(R)"); 7982 sectors += rdev->sectors; 7983 } 7984 rcu_read_unlock(); 7985 7986 if (!list_empty(&mddev->disks)) { 7987 if (mddev->pers) 7988 seq_printf(seq, "\n %llu blocks", 7989 (unsigned long long) 7990 mddev->array_sectors / 2); 7991 else 7992 seq_printf(seq, "\n %llu blocks", 7993 (unsigned long long)sectors / 2); 7994 } 7995 if (mddev->persistent) { 7996 if (mddev->major_version != 0 || 7997 mddev->minor_version != 90) { 7998 seq_printf(seq," super %d.%d", 7999 mddev->major_version, 8000 mddev->minor_version); 8001 } 8002 } else if (mddev->external) 8003 seq_printf(seq, " super external:%s", 8004 mddev->metadata_type); 8005 else 8006 seq_printf(seq, " super non-persistent"); 8007 8008 if (mddev->pers) { 8009 mddev->pers->status(seq, mddev); 8010 seq_printf(seq, "\n "); 8011 if (mddev->pers->sync_request) { 8012 if (status_resync(seq, mddev)) 8013 seq_printf(seq, "\n "); 8014 } 8015 } else 8016 seq_printf(seq, "\n "); 8017 8018 md_bitmap_status(seq, mddev->bitmap); 8019 8020 seq_printf(seq, "\n"); 8021 } 8022 spin_unlock(&mddev->lock); 8023 8024 return 0; 8025 } 8026 8027 static const struct seq_operations md_seq_ops = { 8028 .start = md_seq_start, 8029 .next = md_seq_next, 8030 .stop = md_seq_stop, 8031 .show = md_seq_show, 8032 }; 8033 8034 static int md_seq_open(struct inode *inode, struct file *file) 8035 { 8036 struct seq_file *seq; 8037 int error; 8038 8039 error = seq_open(file, &md_seq_ops); 8040 if (error) 8041 return error; 8042 8043 seq = file->private_data; 8044 seq->poll_event = atomic_read(&md_event_count); 8045 return error; 8046 } 8047 8048 static int md_unloading; 8049 static __poll_t mdstat_poll(struct file *filp, poll_table *wait) 8050 { 8051 struct seq_file *seq = filp->private_data; 8052 __poll_t mask; 8053 8054 if (md_unloading) 8055 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; 8056 poll_wait(filp, &md_event_waiters, wait); 8057 8058 /* always allow read */ 8059 mask = EPOLLIN | EPOLLRDNORM; 8060 8061 if (seq->poll_event != atomic_read(&md_event_count)) 8062 mask |= EPOLLERR | EPOLLPRI; 8063 return mask; 8064 } 8065 8066 static const struct file_operations md_seq_fops = { 8067 .owner = THIS_MODULE, 8068 .open = md_seq_open, 8069 .read = seq_read, 8070 .llseek = seq_lseek, 8071 .release = seq_release, 8072 .poll = mdstat_poll, 8073 }; 8074 8075 int register_md_personality(struct md_personality *p) 8076 { 8077 pr_debug("md: %s personality registered for level %d\n", 8078 p->name, p->level); 8079 spin_lock(&pers_lock); 8080 list_add_tail(&p->list, &pers_list); 8081 spin_unlock(&pers_lock); 8082 return 0; 8083 } 8084 EXPORT_SYMBOL(register_md_personality); 8085 8086 int unregister_md_personality(struct md_personality *p) 8087 { 8088 pr_debug("md: %s personality unregistered\n", p->name); 8089 spin_lock(&pers_lock); 8090 list_del_init(&p->list); 8091 spin_unlock(&pers_lock); 8092 return 0; 8093 } 8094 EXPORT_SYMBOL(unregister_md_personality); 8095 8096 int register_md_cluster_operations(struct md_cluster_operations *ops, 8097 struct module *module) 8098 { 8099 int ret = 0; 8100 spin_lock(&pers_lock); 8101 if (md_cluster_ops != NULL) 8102 ret = -EALREADY; 8103 else { 8104 md_cluster_ops = ops; 8105 md_cluster_mod = module; 8106 } 8107 spin_unlock(&pers_lock); 8108 return ret; 8109 } 8110 EXPORT_SYMBOL(register_md_cluster_operations); 8111 8112 int unregister_md_cluster_operations(void) 8113 { 8114 spin_lock(&pers_lock); 8115 md_cluster_ops = NULL; 8116 spin_unlock(&pers_lock); 8117 return 0; 8118 } 8119 EXPORT_SYMBOL(unregister_md_cluster_operations); 8120 8121 int md_setup_cluster(struct mddev *mddev, int nodes) 8122 { 8123 if (!md_cluster_ops) 8124 request_module("md-cluster"); 8125 spin_lock(&pers_lock); 8126 /* ensure module won't be unloaded */ 8127 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) { 8128 pr_warn("can't find md-cluster module or get it's reference.\n"); 8129 spin_unlock(&pers_lock); 8130 return -ENOENT; 8131 } 8132 spin_unlock(&pers_lock); 8133 8134 return md_cluster_ops->join(mddev, nodes); 8135 } 8136 8137 void md_cluster_stop(struct mddev *mddev) 8138 { 8139 if (!md_cluster_ops) 8140 return; 8141 md_cluster_ops->leave(mddev); 8142 module_put(md_cluster_mod); 8143 } 8144 8145 static int is_mddev_idle(struct mddev *mddev, int init) 8146 { 8147 struct md_rdev *rdev; 8148 int idle; 8149 int curr_events; 8150 8151 idle = 1; 8152 rcu_read_lock(); 8153 rdev_for_each_rcu(rdev, mddev) { 8154 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 8155 curr_events = (int)part_stat_read_accum(&disk->part0, sectors) - 8156 atomic_read(&disk->sync_io); 8157 /* sync IO will cause sync_io to increase before the disk_stats 8158 * as sync_io is counted when a request starts, and 8159 * disk_stats is counted when it completes. 8160 * So resync activity will cause curr_events to be smaller than 8161 * when there was no such activity. 8162 * non-sync IO will cause disk_stat to increase without 8163 * increasing sync_io so curr_events will (eventually) 8164 * be larger than it was before. Once it becomes 8165 * substantially larger, the test below will cause 8166 * the array to appear non-idle, and resync will slow 8167 * down. 8168 * If there is a lot of outstanding resync activity when 8169 * we set last_event to curr_events, then all that activity 8170 * completing might cause the array to appear non-idle 8171 * and resync will be slowed down even though there might 8172 * not have been non-resync activity. This will only 8173 * happen once though. 'last_events' will soon reflect 8174 * the state where there is little or no outstanding 8175 * resync requests, and further resync activity will 8176 * always make curr_events less than last_events. 8177 * 8178 */ 8179 if (init || curr_events - rdev->last_events > 64) { 8180 rdev->last_events = curr_events; 8181 idle = 0; 8182 } 8183 } 8184 rcu_read_unlock(); 8185 return idle; 8186 } 8187 8188 void md_done_sync(struct mddev *mddev, int blocks, int ok) 8189 { 8190 /* another "blocks" (512byte) blocks have been synced */ 8191 atomic_sub(blocks, &mddev->recovery_active); 8192 wake_up(&mddev->recovery_wait); 8193 if (!ok) { 8194 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8195 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); 8196 md_wakeup_thread(mddev->thread); 8197 // stop recovery, signal do_sync .... 8198 } 8199 } 8200 EXPORT_SYMBOL(md_done_sync); 8201 8202 /* md_write_start(mddev, bi) 8203 * If we need to update some array metadata (e.g. 'active' flag 8204 * in superblock) before writing, schedule a superblock update 8205 * and wait for it to complete. 8206 * A return value of 'false' means that the write wasn't recorded 8207 * and cannot proceed as the array is being suspend. 8208 */ 8209 bool md_write_start(struct mddev *mddev, struct bio *bi) 8210 { 8211 int did_change = 0; 8212 8213 if (bio_data_dir(bi) != WRITE) 8214 return true; 8215 8216 BUG_ON(mddev->ro == 1); 8217 if (mddev->ro == 2) { 8218 /* need to switch to read/write */ 8219 mddev->ro = 0; 8220 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8221 md_wakeup_thread(mddev->thread); 8222 md_wakeup_thread(mddev->sync_thread); 8223 did_change = 1; 8224 } 8225 rcu_read_lock(); 8226 percpu_ref_get(&mddev->writes_pending); 8227 smp_mb(); /* Match smp_mb in set_in_sync() */ 8228 if (mddev->safemode == 1) 8229 mddev->safemode = 0; 8230 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */ 8231 if (mddev->in_sync || mddev->sync_checkers) { 8232 spin_lock(&mddev->lock); 8233 if (mddev->in_sync) { 8234 mddev->in_sync = 0; 8235 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8236 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 8237 md_wakeup_thread(mddev->thread); 8238 did_change = 1; 8239 } 8240 spin_unlock(&mddev->lock); 8241 } 8242 rcu_read_unlock(); 8243 if (did_change) 8244 sysfs_notify_dirent_safe(mddev->sysfs_state); 8245 if (!mddev->has_superblocks) 8246 return true; 8247 wait_event(mddev->sb_wait, 8248 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || 8249 mddev->suspended); 8250 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 8251 percpu_ref_put(&mddev->writes_pending); 8252 return false; 8253 } 8254 return true; 8255 } 8256 EXPORT_SYMBOL(md_write_start); 8257 8258 /* md_write_inc can only be called when md_write_start() has 8259 * already been called at least once of the current request. 8260 * It increments the counter and is useful when a single request 8261 * is split into several parts. Each part causes an increment and 8262 * so needs a matching md_write_end(). 8263 * Unlike md_write_start(), it is safe to call md_write_inc() inside 8264 * a spinlocked region. 8265 */ 8266 void md_write_inc(struct mddev *mddev, struct bio *bi) 8267 { 8268 if (bio_data_dir(bi) != WRITE) 8269 return; 8270 WARN_ON_ONCE(mddev->in_sync || mddev->ro); 8271 percpu_ref_get(&mddev->writes_pending); 8272 } 8273 EXPORT_SYMBOL(md_write_inc); 8274 8275 void md_write_end(struct mddev *mddev) 8276 { 8277 percpu_ref_put(&mddev->writes_pending); 8278 8279 if (mddev->safemode == 2) 8280 md_wakeup_thread(mddev->thread); 8281 else if (mddev->safemode_delay) 8282 /* The roundup() ensures this only performs locking once 8283 * every ->safemode_delay jiffies 8284 */ 8285 mod_timer(&mddev->safemode_timer, 8286 roundup(jiffies, mddev->safemode_delay) + 8287 mddev->safemode_delay); 8288 } 8289 8290 EXPORT_SYMBOL(md_write_end); 8291 8292 /* md_allow_write(mddev) 8293 * Calling this ensures that the array is marked 'active' so that writes 8294 * may proceed without blocking. It is important to call this before 8295 * attempting a GFP_KERNEL allocation while holding the mddev lock. 8296 * Must be called with mddev_lock held. 8297 */ 8298 void md_allow_write(struct mddev *mddev) 8299 { 8300 if (!mddev->pers) 8301 return; 8302 if (mddev->ro) 8303 return; 8304 if (!mddev->pers->sync_request) 8305 return; 8306 8307 spin_lock(&mddev->lock); 8308 if (mddev->in_sync) { 8309 mddev->in_sync = 0; 8310 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8311 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 8312 if (mddev->safemode_delay && 8313 mddev->safemode == 0) 8314 mddev->safemode = 1; 8315 spin_unlock(&mddev->lock); 8316 md_update_sb(mddev, 0); 8317 sysfs_notify_dirent_safe(mddev->sysfs_state); 8318 /* wait for the dirty state to be recorded in the metadata */ 8319 wait_event(mddev->sb_wait, 8320 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 8321 } else 8322 spin_unlock(&mddev->lock); 8323 } 8324 EXPORT_SYMBOL_GPL(md_allow_write); 8325 8326 #define SYNC_MARKS 10 8327 #define SYNC_MARK_STEP (3*HZ) 8328 #define UPDATE_FREQUENCY (5*60*HZ) 8329 void md_do_sync(struct md_thread *thread) 8330 { 8331 struct mddev *mddev = thread->mddev; 8332 struct mddev *mddev2; 8333 unsigned int currspeed = 0, window; 8334 sector_t max_sectors,j, io_sectors, recovery_done; 8335 unsigned long mark[SYNC_MARKS]; 8336 unsigned long update_time; 8337 sector_t mark_cnt[SYNC_MARKS]; 8338 int last_mark,m; 8339 struct list_head *tmp; 8340 sector_t last_check; 8341 int skipped = 0; 8342 struct md_rdev *rdev; 8343 char *desc, *action = NULL; 8344 struct blk_plug plug; 8345 int ret; 8346 8347 /* just incase thread restarts... */ 8348 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 8349 test_bit(MD_RECOVERY_WAIT, &mddev->recovery)) 8350 return; 8351 if (mddev->ro) {/* never try to sync a read-only array */ 8352 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8353 return; 8354 } 8355 8356 if (mddev_is_clustered(mddev)) { 8357 ret = md_cluster_ops->resync_start(mddev); 8358 if (ret) 8359 goto skip; 8360 8361 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags); 8362 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 8363 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || 8364 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 8365 && ((unsigned long long)mddev->curr_resync_completed 8366 < (unsigned long long)mddev->resync_max_sectors)) 8367 goto skip; 8368 } 8369 8370 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8371 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 8372 desc = "data-check"; 8373 action = "check"; 8374 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 8375 desc = "requested-resync"; 8376 action = "repair"; 8377 } else 8378 desc = "resync"; 8379 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 8380 desc = "reshape"; 8381 else 8382 desc = "recovery"; 8383 8384 mddev->last_sync_action = action ?: desc; 8385 8386 /* we overload curr_resync somewhat here. 8387 * 0 == not engaged in resync at all 8388 * 2 == checking that there is no conflict with another sync 8389 * 1 == like 2, but have yielded to allow conflicting resync to 8390 * commence 8391 * other == active in resync - this many blocks 8392 * 8393 * Before starting a resync we must have set curr_resync to 8394 * 2, and then checked that every "conflicting" array has curr_resync 8395 * less than ours. When we find one that is the same or higher 8396 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 8397 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 8398 * This will mean we have to start checking from the beginning again. 8399 * 8400 */ 8401 8402 do { 8403 int mddev2_minor = -1; 8404 mddev->curr_resync = 2; 8405 8406 try_again: 8407 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8408 goto skip; 8409 for_each_mddev(mddev2, tmp) { 8410 if (mddev2 == mddev) 8411 continue; 8412 if (!mddev->parallel_resync 8413 && mddev2->curr_resync 8414 && match_mddev_units(mddev, mddev2)) { 8415 DEFINE_WAIT(wq); 8416 if (mddev < mddev2 && mddev->curr_resync == 2) { 8417 /* arbitrarily yield */ 8418 mddev->curr_resync = 1; 8419 wake_up(&resync_wait); 8420 } 8421 if (mddev > mddev2 && mddev->curr_resync == 1) 8422 /* no need to wait here, we can wait the next 8423 * time 'round when curr_resync == 2 8424 */ 8425 continue; 8426 /* We need to wait 'interruptible' so as not to 8427 * contribute to the load average, and not to 8428 * be caught by 'softlockup' 8429 */ 8430 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 8431 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8432 mddev2->curr_resync >= mddev->curr_resync) { 8433 if (mddev2_minor != mddev2->md_minor) { 8434 mddev2_minor = mddev2->md_minor; 8435 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n", 8436 desc, mdname(mddev), 8437 mdname(mddev2)); 8438 } 8439 mddev_put(mddev2); 8440 if (signal_pending(current)) 8441 flush_signals(current); 8442 schedule(); 8443 finish_wait(&resync_wait, &wq); 8444 goto try_again; 8445 } 8446 finish_wait(&resync_wait, &wq); 8447 } 8448 } 8449 } while (mddev->curr_resync < 2); 8450 8451 j = 0; 8452 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8453 /* resync follows the size requested by the personality, 8454 * which defaults to physical size, but can be virtual size 8455 */ 8456 max_sectors = mddev->resync_max_sectors; 8457 atomic64_set(&mddev->resync_mismatches, 0); 8458 /* we don't use the checkpoint if there's a bitmap */ 8459 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 8460 j = mddev->resync_min; 8461 else if (!mddev->bitmap) 8462 j = mddev->recovery_cp; 8463 8464 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 8465 max_sectors = mddev->resync_max_sectors; 8466 /* 8467 * If the original node aborts reshaping then we continue the 8468 * reshaping, so set j again to avoid restart reshape from the 8469 * first beginning 8470 */ 8471 if (mddev_is_clustered(mddev) && 8472 mddev->reshape_position != MaxSector) 8473 j = mddev->reshape_position; 8474 } else { 8475 /* recovery follows the physical size of devices */ 8476 max_sectors = mddev->dev_sectors; 8477 j = MaxSector; 8478 rcu_read_lock(); 8479 rdev_for_each_rcu(rdev, mddev) 8480 if (rdev->raid_disk >= 0 && 8481 !test_bit(Journal, &rdev->flags) && 8482 !test_bit(Faulty, &rdev->flags) && 8483 !test_bit(In_sync, &rdev->flags) && 8484 rdev->recovery_offset < j) 8485 j = rdev->recovery_offset; 8486 rcu_read_unlock(); 8487 8488 /* If there is a bitmap, we need to make sure all 8489 * writes that started before we added a spare 8490 * complete before we start doing a recovery. 8491 * Otherwise the write might complete and (via 8492 * bitmap_endwrite) set a bit in the bitmap after the 8493 * recovery has checked that bit and skipped that 8494 * region. 8495 */ 8496 if (mddev->bitmap) { 8497 mddev->pers->quiesce(mddev, 1); 8498 mddev->pers->quiesce(mddev, 0); 8499 } 8500 } 8501 8502 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); 8503 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); 8504 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n", 8505 speed_max(mddev), desc); 8506 8507 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 8508 8509 io_sectors = 0; 8510 for (m = 0; m < SYNC_MARKS; m++) { 8511 mark[m] = jiffies; 8512 mark_cnt[m] = io_sectors; 8513 } 8514 last_mark = 0; 8515 mddev->resync_mark = mark[last_mark]; 8516 mddev->resync_mark_cnt = mark_cnt[last_mark]; 8517 8518 /* 8519 * Tune reconstruction: 8520 */ 8521 window = 32 * (PAGE_SIZE / 512); 8522 pr_debug("md: using %dk window, over a total of %lluk.\n", 8523 window/2, (unsigned long long)max_sectors/2); 8524 8525 atomic_set(&mddev->recovery_active, 0); 8526 last_check = 0; 8527 8528 if (j>2) { 8529 pr_debug("md: resuming %s of %s from checkpoint.\n", 8530 desc, mdname(mddev)); 8531 mddev->curr_resync = j; 8532 } else 8533 mddev->curr_resync = 3; /* no longer delayed */ 8534 mddev->curr_resync_completed = j; 8535 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 8536 md_new_event(mddev); 8537 update_time = jiffies; 8538 8539 blk_start_plug(&plug); 8540 while (j < max_sectors) { 8541 sector_t sectors; 8542 8543 skipped = 0; 8544 8545 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8546 ((mddev->curr_resync > mddev->curr_resync_completed && 8547 (mddev->curr_resync - mddev->curr_resync_completed) 8548 > (max_sectors >> 4)) || 8549 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) || 8550 (j - mddev->curr_resync_completed)*2 8551 >= mddev->resync_max - mddev->curr_resync_completed || 8552 mddev->curr_resync_completed > mddev->resync_max 8553 )) { 8554 /* time to update curr_resync_completed */ 8555 wait_event(mddev->recovery_wait, 8556 atomic_read(&mddev->recovery_active) == 0); 8557 mddev->curr_resync_completed = j; 8558 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 8559 j > mddev->recovery_cp) 8560 mddev->recovery_cp = j; 8561 update_time = jiffies; 8562 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8563 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 8564 } 8565 8566 while (j >= mddev->resync_max && 8567 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8568 /* As this condition is controlled by user-space, 8569 * we can block indefinitely, so use '_interruptible' 8570 * to avoid triggering warnings. 8571 */ 8572 flush_signals(current); /* just in case */ 8573 wait_event_interruptible(mddev->recovery_wait, 8574 mddev->resync_max > j 8575 || test_bit(MD_RECOVERY_INTR, 8576 &mddev->recovery)); 8577 } 8578 8579 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8580 break; 8581 8582 sectors = mddev->pers->sync_request(mddev, j, &skipped); 8583 if (sectors == 0) { 8584 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8585 break; 8586 } 8587 8588 if (!skipped) { /* actual IO requested */ 8589 io_sectors += sectors; 8590 atomic_add(sectors, &mddev->recovery_active); 8591 } 8592 8593 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8594 break; 8595 8596 j += sectors; 8597 if (j > max_sectors) 8598 /* when skipping, extra large numbers can be returned. */ 8599 j = max_sectors; 8600 if (j > 2) 8601 mddev->curr_resync = j; 8602 mddev->curr_mark_cnt = io_sectors; 8603 if (last_check == 0) 8604 /* this is the earliest that rebuild will be 8605 * visible in /proc/mdstat 8606 */ 8607 md_new_event(mddev); 8608 8609 if (last_check + window > io_sectors || j == max_sectors) 8610 continue; 8611 8612 last_check = io_sectors; 8613 repeat: 8614 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 8615 /* step marks */ 8616 int next = (last_mark+1) % SYNC_MARKS; 8617 8618 mddev->resync_mark = mark[next]; 8619 mddev->resync_mark_cnt = mark_cnt[next]; 8620 mark[next] = jiffies; 8621 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 8622 last_mark = next; 8623 } 8624 8625 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8626 break; 8627 8628 /* 8629 * this loop exits only if either when we are slower than 8630 * the 'hard' speed limit, or the system was IO-idle for 8631 * a jiffy. 8632 * the system might be non-idle CPU-wise, but we only care 8633 * about not overloading the IO subsystem. (things like an 8634 * e2fsck being done on the RAID array should execute fast) 8635 */ 8636 cond_resched(); 8637 8638 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); 8639 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 8640 /((jiffies-mddev->resync_mark)/HZ +1) +1; 8641 8642 if (currspeed > speed_min(mddev)) { 8643 if (currspeed > speed_max(mddev)) { 8644 msleep(500); 8645 goto repeat; 8646 } 8647 if (!is_mddev_idle(mddev, 0)) { 8648 /* 8649 * Give other IO more of a chance. 8650 * The faster the devices, the less we wait. 8651 */ 8652 wait_event(mddev->recovery_wait, 8653 !atomic_read(&mddev->recovery_active)); 8654 } 8655 } 8656 } 8657 pr_info("md: %s: %s %s.\n",mdname(mddev), desc, 8658 test_bit(MD_RECOVERY_INTR, &mddev->recovery) 8659 ? "interrupted" : "done"); 8660 /* 8661 * this also signals 'finished resyncing' to md_stop 8662 */ 8663 blk_finish_plug(&plug); 8664 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 8665 8666 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8667 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8668 mddev->curr_resync > 3) { 8669 mddev->curr_resync_completed = mddev->curr_resync; 8670 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 8671 } 8672 mddev->pers->sync_request(mddev, max_sectors, &skipped); 8673 8674 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 8675 mddev->curr_resync > 3) { 8676 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8677 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8678 if (mddev->curr_resync >= mddev->recovery_cp) { 8679 pr_debug("md: checkpointing %s of %s.\n", 8680 desc, mdname(mddev)); 8681 if (test_bit(MD_RECOVERY_ERROR, 8682 &mddev->recovery)) 8683 mddev->recovery_cp = 8684 mddev->curr_resync_completed; 8685 else 8686 mddev->recovery_cp = 8687 mddev->curr_resync; 8688 } 8689 } else 8690 mddev->recovery_cp = MaxSector; 8691 } else { 8692 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8693 mddev->curr_resync = MaxSector; 8694 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8695 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { 8696 rcu_read_lock(); 8697 rdev_for_each_rcu(rdev, mddev) 8698 if (rdev->raid_disk >= 0 && 8699 mddev->delta_disks >= 0 && 8700 !test_bit(Journal, &rdev->flags) && 8701 !test_bit(Faulty, &rdev->flags) && 8702 !test_bit(In_sync, &rdev->flags) && 8703 rdev->recovery_offset < mddev->curr_resync) 8704 rdev->recovery_offset = mddev->curr_resync; 8705 rcu_read_unlock(); 8706 } 8707 } 8708 } 8709 skip: 8710 /* set CHANGE_PENDING here since maybe another update is needed, 8711 * so other nodes are informed. It should be harmless for normal 8712 * raid */ 8713 set_mask_bits(&mddev->sb_flags, 0, 8714 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); 8715 8716 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8717 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8718 mddev->delta_disks > 0 && 8719 mddev->pers->finish_reshape && 8720 mddev->pers->size && 8721 mddev->queue) { 8722 mddev_lock_nointr(mddev); 8723 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); 8724 mddev_unlock(mddev); 8725 if (!mddev_is_clustered(mddev)) { 8726 set_capacity(mddev->gendisk, mddev->array_sectors); 8727 revalidate_disk(mddev->gendisk); 8728 } 8729 } 8730 8731 spin_lock(&mddev->lock); 8732 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8733 /* We completed so min/max setting can be forgotten if used. */ 8734 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 8735 mddev->resync_min = 0; 8736 mddev->resync_max = MaxSector; 8737 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 8738 mddev->resync_min = mddev->curr_resync_completed; 8739 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 8740 mddev->curr_resync = 0; 8741 spin_unlock(&mddev->lock); 8742 8743 wake_up(&resync_wait); 8744 md_wakeup_thread(mddev->thread); 8745 return; 8746 } 8747 EXPORT_SYMBOL_GPL(md_do_sync); 8748 8749 static int remove_and_add_spares(struct mddev *mddev, 8750 struct md_rdev *this) 8751 { 8752 struct md_rdev *rdev; 8753 int spares = 0; 8754 int removed = 0; 8755 bool remove_some = false; 8756 8757 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 8758 /* Mustn't remove devices when resync thread is running */ 8759 return 0; 8760 8761 rdev_for_each(rdev, mddev) { 8762 if ((this == NULL || rdev == this) && 8763 rdev->raid_disk >= 0 && 8764 !test_bit(Blocked, &rdev->flags) && 8765 test_bit(Faulty, &rdev->flags) && 8766 atomic_read(&rdev->nr_pending)==0) { 8767 /* Faulty non-Blocked devices with nr_pending == 0 8768 * never get nr_pending incremented, 8769 * never get Faulty cleared, and never get Blocked set. 8770 * So we can synchronize_rcu now rather than once per device 8771 */ 8772 remove_some = true; 8773 set_bit(RemoveSynchronized, &rdev->flags); 8774 } 8775 } 8776 8777 if (remove_some) 8778 synchronize_rcu(); 8779 rdev_for_each(rdev, mddev) { 8780 if ((this == NULL || rdev == this) && 8781 rdev->raid_disk >= 0 && 8782 !test_bit(Blocked, &rdev->flags) && 8783 ((test_bit(RemoveSynchronized, &rdev->flags) || 8784 (!test_bit(In_sync, &rdev->flags) && 8785 !test_bit(Journal, &rdev->flags))) && 8786 atomic_read(&rdev->nr_pending)==0)) { 8787 if (mddev->pers->hot_remove_disk( 8788 mddev, rdev) == 0) { 8789 sysfs_unlink_rdev(mddev, rdev); 8790 rdev->saved_raid_disk = rdev->raid_disk; 8791 rdev->raid_disk = -1; 8792 removed++; 8793 } 8794 } 8795 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags)) 8796 clear_bit(RemoveSynchronized, &rdev->flags); 8797 } 8798 8799 if (removed && mddev->kobj.sd) 8800 sysfs_notify(&mddev->kobj, NULL, "degraded"); 8801 8802 if (this && removed) 8803 goto no_add; 8804 8805 rdev_for_each(rdev, mddev) { 8806 if (this && this != rdev) 8807 continue; 8808 if (test_bit(Candidate, &rdev->flags)) 8809 continue; 8810 if (rdev->raid_disk >= 0 && 8811 !test_bit(In_sync, &rdev->flags) && 8812 !test_bit(Journal, &rdev->flags) && 8813 !test_bit(Faulty, &rdev->flags)) 8814 spares++; 8815 if (rdev->raid_disk >= 0) 8816 continue; 8817 if (test_bit(Faulty, &rdev->flags)) 8818 continue; 8819 if (!test_bit(Journal, &rdev->flags)) { 8820 if (mddev->ro && 8821 ! (rdev->saved_raid_disk >= 0 && 8822 !test_bit(Bitmap_sync, &rdev->flags))) 8823 continue; 8824 8825 rdev->recovery_offset = 0; 8826 } 8827 if (mddev->pers-> 8828 hot_add_disk(mddev, rdev) == 0) { 8829 if (sysfs_link_rdev(mddev, rdev)) 8830 /* failure here is OK */; 8831 if (!test_bit(Journal, &rdev->flags)) 8832 spares++; 8833 md_new_event(mddev); 8834 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 8835 } 8836 } 8837 no_add: 8838 if (removed) 8839 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 8840 return spares; 8841 } 8842 8843 static void md_start_sync(struct work_struct *ws) 8844 { 8845 struct mddev *mddev = container_of(ws, struct mddev, del_work); 8846 8847 mddev->sync_thread = md_register_thread(md_do_sync, 8848 mddev, 8849 "resync"); 8850 if (!mddev->sync_thread) { 8851 pr_warn("%s: could not start resync thread...\n", 8852 mdname(mddev)); 8853 /* leave the spares where they are, it shouldn't hurt */ 8854 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8855 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8856 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 8857 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 8858 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8859 wake_up(&resync_wait); 8860 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 8861 &mddev->recovery)) 8862 if (mddev->sysfs_action) 8863 sysfs_notify_dirent_safe(mddev->sysfs_action); 8864 } else 8865 md_wakeup_thread(mddev->sync_thread); 8866 sysfs_notify_dirent_safe(mddev->sysfs_action); 8867 md_new_event(mddev); 8868 } 8869 8870 /* 8871 * This routine is regularly called by all per-raid-array threads to 8872 * deal with generic issues like resync and super-block update. 8873 * Raid personalities that don't have a thread (linear/raid0) do not 8874 * need this as they never do any recovery or update the superblock. 8875 * 8876 * It does not do any resync itself, but rather "forks" off other threads 8877 * to do that as needed. 8878 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 8879 * "->recovery" and create a thread at ->sync_thread. 8880 * When the thread finishes it sets MD_RECOVERY_DONE 8881 * and wakeups up this thread which will reap the thread and finish up. 8882 * This thread also removes any faulty devices (with nr_pending == 0). 8883 * 8884 * The overall approach is: 8885 * 1/ if the superblock needs updating, update it. 8886 * 2/ If a recovery thread is running, don't do anything else. 8887 * 3/ If recovery has finished, clean up, possibly marking spares active. 8888 * 4/ If there are any faulty devices, remove them. 8889 * 5/ If array is degraded, try to add spares devices 8890 * 6/ If array has spares or is not in-sync, start a resync thread. 8891 */ 8892 void md_check_recovery(struct mddev *mddev) 8893 { 8894 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { 8895 /* Write superblock - thread that called mddev_suspend() 8896 * holds reconfig_mutex for us. 8897 */ 8898 set_bit(MD_UPDATING_SB, &mddev->flags); 8899 smp_mb__after_atomic(); 8900 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) 8901 md_update_sb(mddev, 0); 8902 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); 8903 wake_up(&mddev->sb_wait); 8904 } 8905 8906 if (mddev->suspended) 8907 return; 8908 8909 if (mddev->bitmap) 8910 md_bitmap_daemon_work(mddev); 8911 8912 if (signal_pending(current)) { 8913 if (mddev->pers->sync_request && !mddev->external) { 8914 pr_debug("md: %s in immediate safe mode\n", 8915 mdname(mddev)); 8916 mddev->safemode = 2; 8917 } 8918 flush_signals(current); 8919 } 8920 8921 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 8922 return; 8923 if ( ! ( 8924 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) || 8925 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 8926 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 8927 (mddev->external == 0 && mddev->safemode == 1) || 8928 (mddev->safemode == 2 8929 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 8930 )) 8931 return; 8932 8933 if (mddev_trylock(mddev)) { 8934 int spares = 0; 8935 8936 if (!mddev->external && mddev->safemode == 1) 8937 mddev->safemode = 0; 8938 8939 if (mddev->ro) { 8940 struct md_rdev *rdev; 8941 if (!mddev->external && mddev->in_sync) 8942 /* 'Blocked' flag not needed as failed devices 8943 * will be recorded if array switched to read/write. 8944 * Leaving it set will prevent the device 8945 * from being removed. 8946 */ 8947 rdev_for_each(rdev, mddev) 8948 clear_bit(Blocked, &rdev->flags); 8949 /* On a read-only array we can: 8950 * - remove failed devices 8951 * - add already-in_sync devices if the array itself 8952 * is in-sync. 8953 * As we only add devices that are already in-sync, 8954 * we can activate the spares immediately. 8955 */ 8956 remove_and_add_spares(mddev, NULL); 8957 /* There is no thread, but we need to call 8958 * ->spare_active and clear saved_raid_disk 8959 */ 8960 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8961 md_reap_sync_thread(mddev); 8962 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8963 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8964 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 8965 goto unlock; 8966 } 8967 8968 if (mddev_is_clustered(mddev)) { 8969 struct md_rdev *rdev; 8970 /* kick the device if another node issued a 8971 * remove disk. 8972 */ 8973 rdev_for_each(rdev, mddev) { 8974 if (test_and_clear_bit(ClusterRemove, &rdev->flags) && 8975 rdev->raid_disk < 0) 8976 md_kick_rdev_from_array(rdev); 8977 } 8978 } 8979 8980 if (!mddev->external && !mddev->in_sync) { 8981 spin_lock(&mddev->lock); 8982 set_in_sync(mddev); 8983 spin_unlock(&mddev->lock); 8984 } 8985 8986 if (mddev->sb_flags) 8987 md_update_sb(mddev, 0); 8988 8989 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 8990 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 8991 /* resync/recovery still happening */ 8992 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8993 goto unlock; 8994 } 8995 if (mddev->sync_thread) { 8996 md_reap_sync_thread(mddev); 8997 goto unlock; 8998 } 8999 /* Set RUNNING before clearing NEEDED to avoid 9000 * any transients in the value of "sync_action". 9001 */ 9002 mddev->curr_resync_completed = 0; 9003 spin_lock(&mddev->lock); 9004 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9005 spin_unlock(&mddev->lock); 9006 /* Clear some bits that don't mean anything, but 9007 * might be left set 9008 */ 9009 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 9010 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 9011 9012 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 9013 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 9014 goto not_running; 9015 /* no recovery is running. 9016 * remove any failed drives, then 9017 * add spares if possible. 9018 * Spares are also removed and re-added, to allow 9019 * the personality to fail the re-add. 9020 */ 9021 9022 if (mddev->reshape_position != MaxSector) { 9023 if (mddev->pers->check_reshape == NULL || 9024 mddev->pers->check_reshape(mddev) != 0) 9025 /* Cannot proceed */ 9026 goto not_running; 9027 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9028 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9029 } else if ((spares = remove_and_add_spares(mddev, NULL))) { 9030 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9031 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9032 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9033 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9034 } else if (mddev->recovery_cp < MaxSector) { 9035 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9036 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9037 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 9038 /* nothing to be done ... */ 9039 goto not_running; 9040 9041 if (mddev->pers->sync_request) { 9042 if (spares) { 9043 /* We are adding a device or devices to an array 9044 * which has the bitmap stored on all devices. 9045 * So make sure all bitmap pages get written 9046 */ 9047 md_bitmap_write_all(mddev->bitmap); 9048 } 9049 INIT_WORK(&mddev->del_work, md_start_sync); 9050 queue_work(md_misc_wq, &mddev->del_work); 9051 goto unlock; 9052 } 9053 not_running: 9054 if (!mddev->sync_thread) { 9055 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9056 wake_up(&resync_wait); 9057 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 9058 &mddev->recovery)) 9059 if (mddev->sysfs_action) 9060 sysfs_notify_dirent_safe(mddev->sysfs_action); 9061 } 9062 unlock: 9063 wake_up(&mddev->sb_wait); 9064 mddev_unlock(mddev); 9065 } 9066 } 9067 EXPORT_SYMBOL(md_check_recovery); 9068 9069 void md_reap_sync_thread(struct mddev *mddev) 9070 { 9071 struct md_rdev *rdev; 9072 sector_t old_dev_sectors = mddev->dev_sectors; 9073 bool is_reshaped = false; 9074 9075 /* resync has finished, collect result */ 9076 md_unregister_thread(&mddev->sync_thread); 9077 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 9078 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 9079 /* success...*/ 9080 /* activate any spares */ 9081 if (mddev->pers->spare_active(mddev)) { 9082 sysfs_notify(&mddev->kobj, NULL, 9083 "degraded"); 9084 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 9085 } 9086 } 9087 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9088 mddev->pers->finish_reshape) { 9089 mddev->pers->finish_reshape(mddev); 9090 if (mddev_is_clustered(mddev)) 9091 is_reshaped = true; 9092 } 9093 9094 /* If array is no-longer degraded, then any saved_raid_disk 9095 * information must be scrapped. 9096 */ 9097 if (!mddev->degraded) 9098 rdev_for_each(rdev, mddev) 9099 rdev->saved_raid_disk = -1; 9100 9101 md_update_sb(mddev, 1); 9102 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can 9103 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by 9104 * clustered raid */ 9105 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) 9106 md_cluster_ops->resync_finish(mddev); 9107 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9108 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 9109 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9110 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9111 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9112 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9113 /* 9114 * We call md_cluster_ops->update_size here because sync_size could 9115 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared, 9116 * so it is time to update size across cluster. 9117 */ 9118 if (mddev_is_clustered(mddev) && is_reshaped 9119 && !test_bit(MD_CLOSING, &mddev->flags)) 9120 md_cluster_ops->update_size(mddev, old_dev_sectors); 9121 wake_up(&resync_wait); 9122 /* flag recovery needed just to double check */ 9123 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9124 sysfs_notify_dirent_safe(mddev->sysfs_action); 9125 md_new_event(mddev); 9126 if (mddev->event_work.func) 9127 queue_work(md_misc_wq, &mddev->event_work); 9128 } 9129 EXPORT_SYMBOL(md_reap_sync_thread); 9130 9131 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) 9132 { 9133 sysfs_notify_dirent_safe(rdev->sysfs_state); 9134 wait_event_timeout(rdev->blocked_wait, 9135 !test_bit(Blocked, &rdev->flags) && 9136 !test_bit(BlockedBadBlocks, &rdev->flags), 9137 msecs_to_jiffies(5000)); 9138 rdev_dec_pending(rdev, mddev); 9139 } 9140 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 9141 9142 void md_finish_reshape(struct mddev *mddev) 9143 { 9144 /* called be personality module when reshape completes. */ 9145 struct md_rdev *rdev; 9146 9147 rdev_for_each(rdev, mddev) { 9148 if (rdev->data_offset > rdev->new_data_offset) 9149 rdev->sectors += rdev->data_offset - rdev->new_data_offset; 9150 else 9151 rdev->sectors -= rdev->new_data_offset - rdev->data_offset; 9152 rdev->data_offset = rdev->new_data_offset; 9153 } 9154 } 9155 EXPORT_SYMBOL(md_finish_reshape); 9156 9157 /* Bad block management */ 9158 9159 /* Returns 1 on success, 0 on failure */ 9160 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 9161 int is_new) 9162 { 9163 struct mddev *mddev = rdev->mddev; 9164 int rv; 9165 if (is_new) 9166 s += rdev->new_data_offset; 9167 else 9168 s += rdev->data_offset; 9169 rv = badblocks_set(&rdev->badblocks, s, sectors, 0); 9170 if (rv == 0) { 9171 /* Make sure they get written out promptly */ 9172 if (test_bit(ExternalBbl, &rdev->flags)) 9173 sysfs_notify(&rdev->kobj, NULL, 9174 "unacknowledged_bad_blocks"); 9175 sysfs_notify_dirent_safe(rdev->sysfs_state); 9176 set_mask_bits(&mddev->sb_flags, 0, 9177 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING)); 9178 md_wakeup_thread(rdev->mddev->thread); 9179 return 1; 9180 } else 9181 return 0; 9182 } 9183 EXPORT_SYMBOL_GPL(rdev_set_badblocks); 9184 9185 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 9186 int is_new) 9187 { 9188 int rv; 9189 if (is_new) 9190 s += rdev->new_data_offset; 9191 else 9192 s += rdev->data_offset; 9193 rv = badblocks_clear(&rdev->badblocks, s, sectors); 9194 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags)) 9195 sysfs_notify(&rdev->kobj, NULL, "bad_blocks"); 9196 return rv; 9197 } 9198 EXPORT_SYMBOL_GPL(rdev_clear_badblocks); 9199 9200 static int md_notify_reboot(struct notifier_block *this, 9201 unsigned long code, void *x) 9202 { 9203 struct list_head *tmp; 9204 struct mddev *mddev; 9205 int need_delay = 0; 9206 9207 for_each_mddev(mddev, tmp) { 9208 if (mddev_trylock(mddev)) { 9209 if (mddev->pers) 9210 __md_stop_writes(mddev); 9211 if (mddev->persistent) 9212 mddev->safemode = 2; 9213 mddev_unlock(mddev); 9214 } 9215 need_delay = 1; 9216 } 9217 /* 9218 * certain more exotic SCSI devices are known to be 9219 * volatile wrt too early system reboots. While the 9220 * right place to handle this issue is the given 9221 * driver, we do want to have a safe RAID driver ... 9222 */ 9223 if (need_delay) 9224 mdelay(1000*1); 9225 9226 return NOTIFY_DONE; 9227 } 9228 9229 static struct notifier_block md_notifier = { 9230 .notifier_call = md_notify_reboot, 9231 .next = NULL, 9232 .priority = INT_MAX, /* before any real devices */ 9233 }; 9234 9235 static void md_geninit(void) 9236 { 9237 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 9238 9239 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); 9240 } 9241 9242 static int __init md_init(void) 9243 { 9244 int ret = -ENOMEM; 9245 9246 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); 9247 if (!md_wq) 9248 goto err_wq; 9249 9250 md_misc_wq = alloc_workqueue("md_misc", 0, 0); 9251 if (!md_misc_wq) 9252 goto err_misc_wq; 9253 9254 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0) 9255 goto err_md; 9256 9257 if ((ret = register_blkdev(0, "mdp")) < 0) 9258 goto err_mdp; 9259 mdp_major = ret; 9260 9261 blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE, 9262 md_probe, NULL, NULL); 9263 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 9264 md_probe, NULL, NULL); 9265 9266 register_reboot_notifier(&md_notifier); 9267 raid_table_header = register_sysctl_table(raid_root_table); 9268 9269 md_geninit(); 9270 return 0; 9271 9272 err_mdp: 9273 unregister_blkdev(MD_MAJOR, "md"); 9274 err_md: 9275 destroy_workqueue(md_misc_wq); 9276 err_misc_wq: 9277 destroy_workqueue(md_wq); 9278 err_wq: 9279 return ret; 9280 } 9281 9282 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) 9283 { 9284 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 9285 struct md_rdev *rdev2; 9286 int role, ret; 9287 char b[BDEVNAME_SIZE]; 9288 9289 /* 9290 * If size is changed in another node then we need to 9291 * do resize as well. 9292 */ 9293 if (mddev->dev_sectors != le64_to_cpu(sb->size)) { 9294 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size)); 9295 if (ret) 9296 pr_info("md-cluster: resize failed\n"); 9297 else 9298 md_bitmap_update_sb(mddev->bitmap); 9299 } 9300 9301 /* Check for change of roles in the active devices */ 9302 rdev_for_each(rdev2, mddev) { 9303 if (test_bit(Faulty, &rdev2->flags)) 9304 continue; 9305 9306 /* Check if the roles changed */ 9307 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]); 9308 9309 if (test_bit(Candidate, &rdev2->flags)) { 9310 if (role == 0xfffe) { 9311 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b)); 9312 md_kick_rdev_from_array(rdev2); 9313 continue; 9314 } 9315 else 9316 clear_bit(Candidate, &rdev2->flags); 9317 } 9318 9319 if (role != rdev2->raid_disk) { 9320 /* 9321 * got activated except reshape is happening. 9322 */ 9323 if (rdev2->raid_disk == -1 && role != 0xffff && 9324 !(le32_to_cpu(sb->feature_map) & 9325 MD_FEATURE_RESHAPE_ACTIVE)) { 9326 rdev2->saved_raid_disk = role; 9327 ret = remove_and_add_spares(mddev, rdev2); 9328 pr_info("Activated spare: %s\n", 9329 bdevname(rdev2->bdev,b)); 9330 /* wakeup mddev->thread here, so array could 9331 * perform resync with the new activated disk */ 9332 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9333 md_wakeup_thread(mddev->thread); 9334 } 9335 /* device faulty 9336 * We just want to do the minimum to mark the disk 9337 * as faulty. The recovery is performed by the 9338 * one who initiated the error. 9339 */ 9340 if ((role == 0xfffe) || (role == 0xfffd)) { 9341 md_error(mddev, rdev2); 9342 clear_bit(Blocked, &rdev2->flags); 9343 } 9344 } 9345 } 9346 9347 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) 9348 update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); 9349 9350 /* 9351 * Since mddev->delta_disks has already updated in update_raid_disks, 9352 * so it is time to check reshape. 9353 */ 9354 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && 9355 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 9356 /* 9357 * reshape is happening in the remote node, we need to 9358 * update reshape_position and call start_reshape. 9359 */ 9360 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 9361 if (mddev->pers->update_reshape_pos) 9362 mddev->pers->update_reshape_pos(mddev); 9363 if (mddev->pers->start_reshape) 9364 mddev->pers->start_reshape(mddev); 9365 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && 9366 mddev->reshape_position != MaxSector && 9367 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 9368 /* reshape is just done in another node. */ 9369 mddev->reshape_position = MaxSector; 9370 if (mddev->pers->update_reshape_pos) 9371 mddev->pers->update_reshape_pos(mddev); 9372 } 9373 9374 /* Finally set the event to be up to date */ 9375 mddev->events = le64_to_cpu(sb->events); 9376 } 9377 9378 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) 9379 { 9380 int err; 9381 struct page *swapout = rdev->sb_page; 9382 struct mdp_superblock_1 *sb; 9383 9384 /* Store the sb page of the rdev in the swapout temporary 9385 * variable in case we err in the future 9386 */ 9387 rdev->sb_page = NULL; 9388 err = alloc_disk_sb(rdev); 9389 if (err == 0) { 9390 ClearPageUptodate(rdev->sb_page); 9391 rdev->sb_loaded = 0; 9392 err = super_types[mddev->major_version]. 9393 load_super(rdev, NULL, mddev->minor_version); 9394 } 9395 if (err < 0) { 9396 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n", 9397 __func__, __LINE__, rdev->desc_nr, err); 9398 if (rdev->sb_page) 9399 put_page(rdev->sb_page); 9400 rdev->sb_page = swapout; 9401 rdev->sb_loaded = 1; 9402 return err; 9403 } 9404 9405 sb = page_address(rdev->sb_page); 9406 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET 9407 * is not set 9408 */ 9409 9410 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET)) 9411 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 9412 9413 /* The other node finished recovery, call spare_active to set 9414 * device In_sync and mddev->degraded 9415 */ 9416 if (rdev->recovery_offset == MaxSector && 9417 !test_bit(In_sync, &rdev->flags) && 9418 mddev->pers->spare_active(mddev)) 9419 sysfs_notify(&mddev->kobj, NULL, "degraded"); 9420 9421 put_page(swapout); 9422 return 0; 9423 } 9424 9425 void md_reload_sb(struct mddev *mddev, int nr) 9426 { 9427 struct md_rdev *rdev; 9428 int err; 9429 9430 /* Find the rdev */ 9431 rdev_for_each_rcu(rdev, mddev) { 9432 if (rdev->desc_nr == nr) 9433 break; 9434 } 9435 9436 if (!rdev || rdev->desc_nr != nr) { 9437 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr); 9438 return; 9439 } 9440 9441 err = read_rdev(mddev, rdev); 9442 if (err < 0) 9443 return; 9444 9445 check_sb_changes(mddev, rdev); 9446 9447 /* Read all rdev's to update recovery_offset */ 9448 rdev_for_each_rcu(rdev, mddev) { 9449 if (!test_bit(Faulty, &rdev->flags)) 9450 read_rdev(mddev, rdev); 9451 } 9452 } 9453 EXPORT_SYMBOL(md_reload_sb); 9454 9455 #ifndef MODULE 9456 9457 /* 9458 * Searches all registered partitions for autorun RAID arrays 9459 * at boot time. 9460 */ 9461 9462 static DEFINE_MUTEX(detected_devices_mutex); 9463 static LIST_HEAD(all_detected_devices); 9464 struct detected_devices_node { 9465 struct list_head list; 9466 dev_t dev; 9467 }; 9468 9469 void md_autodetect_dev(dev_t dev) 9470 { 9471 struct detected_devices_node *node_detected_dev; 9472 9473 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 9474 if (node_detected_dev) { 9475 node_detected_dev->dev = dev; 9476 mutex_lock(&detected_devices_mutex); 9477 list_add_tail(&node_detected_dev->list, &all_detected_devices); 9478 mutex_unlock(&detected_devices_mutex); 9479 } 9480 } 9481 9482 static void autostart_arrays(int part) 9483 { 9484 struct md_rdev *rdev; 9485 struct detected_devices_node *node_detected_dev; 9486 dev_t dev; 9487 int i_scanned, i_passed; 9488 9489 i_scanned = 0; 9490 i_passed = 0; 9491 9492 pr_info("md: Autodetecting RAID arrays.\n"); 9493 9494 mutex_lock(&detected_devices_mutex); 9495 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 9496 i_scanned++; 9497 node_detected_dev = list_entry(all_detected_devices.next, 9498 struct detected_devices_node, list); 9499 list_del(&node_detected_dev->list); 9500 dev = node_detected_dev->dev; 9501 kfree(node_detected_dev); 9502 mutex_unlock(&detected_devices_mutex); 9503 rdev = md_import_device(dev,0, 90); 9504 mutex_lock(&detected_devices_mutex); 9505 if (IS_ERR(rdev)) 9506 continue; 9507 9508 if (test_bit(Faulty, &rdev->flags)) 9509 continue; 9510 9511 set_bit(AutoDetected, &rdev->flags); 9512 list_add(&rdev->same_set, &pending_raid_disks); 9513 i_passed++; 9514 } 9515 mutex_unlock(&detected_devices_mutex); 9516 9517 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed); 9518 9519 autorun_devices(part); 9520 } 9521 9522 #endif /* !MODULE */ 9523 9524 static __exit void md_exit(void) 9525 { 9526 struct mddev *mddev; 9527 struct list_head *tmp; 9528 int delay = 1; 9529 9530 blk_unregister_region(MKDEV(MD_MAJOR,0), 512); 9531 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 9532 9533 unregister_blkdev(MD_MAJOR,"md"); 9534 unregister_blkdev(mdp_major, "mdp"); 9535 unregister_reboot_notifier(&md_notifier); 9536 unregister_sysctl_table(raid_table_header); 9537 9538 /* We cannot unload the modules while some process is 9539 * waiting for us in select() or poll() - wake them up 9540 */ 9541 md_unloading = 1; 9542 while (waitqueue_active(&md_event_waiters)) { 9543 /* not safe to leave yet */ 9544 wake_up(&md_event_waiters); 9545 msleep(delay); 9546 delay += delay; 9547 } 9548 remove_proc_entry("mdstat", NULL); 9549 9550 for_each_mddev(mddev, tmp) { 9551 export_array(mddev); 9552 mddev->ctime = 0; 9553 mddev->hold_active = 0; 9554 /* 9555 * for_each_mddev() will call mddev_put() at the end of each 9556 * iteration. As the mddev is now fully clear, this will 9557 * schedule the mddev for destruction by a workqueue, and the 9558 * destroy_workqueue() below will wait for that to complete. 9559 */ 9560 } 9561 destroy_workqueue(md_misc_wq); 9562 destroy_workqueue(md_wq); 9563 } 9564 9565 subsys_initcall(md_init); 9566 module_exit(md_exit) 9567 9568 static int get_ro(char *buffer, const struct kernel_param *kp) 9569 { 9570 return sprintf(buffer, "%d", start_readonly); 9571 } 9572 static int set_ro(const char *val, const struct kernel_param *kp) 9573 { 9574 return kstrtouint(val, 10, (unsigned int *)&start_readonly); 9575 } 9576 9577 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 9578 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 9579 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 9580 module_param(create_on_open, bool, S_IRUSR|S_IWUSR); 9581 9582 MODULE_LICENSE("GPL"); 9583 MODULE_DESCRIPTION("MD RAID framework"); 9584 MODULE_ALIAS("md"); 9585 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 9586