1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 34 Errors, Warnings, etc. 35 Please use: 36 pr_crit() for error conditions that risk data loss 37 pr_err() for error conditions that are unexpected, like an IO error 38 or internal inconsistency 39 pr_warn() for error conditions that could have been predicated, like 40 adding a device to an array when it has incompatible metadata 41 pr_info() for every interesting, very rare events, like an array starting 42 or stopping, or resync starting or stopping 43 pr_debug() for everything else. 44 45 */ 46 47 #include <linux/sched/signal.h> 48 #include <linux/kthread.h> 49 #include <linux/blkdev.h> 50 #include <linux/badblocks.h> 51 #include <linux/sysctl.h> 52 #include <linux/seq_file.h> 53 #include <linux/fs.h> 54 #include <linux/poll.h> 55 #include <linux/ctype.h> 56 #include <linux/string.h> 57 #include <linux/hdreg.h> 58 #include <linux/proc_fs.h> 59 #include <linux/random.h> 60 #include <linux/module.h> 61 #include <linux/reboot.h> 62 #include <linux/file.h> 63 #include <linux/compat.h> 64 #include <linux/delay.h> 65 #include <linux/raid/md_p.h> 66 #include <linux/raid/md_u.h> 67 #include <linux/slab.h> 68 #include <linux/percpu-refcount.h> 69 70 #include <trace/events/block.h> 71 #include "md.h" 72 #include "md-bitmap.h" 73 #include "md-cluster.h" 74 75 #ifndef MODULE 76 static void autostart_arrays(int part); 77 #endif 78 79 /* pers_list is a list of registered personalities protected 80 * by pers_lock. 81 * pers_lock does extra service to protect accesses to 82 * mddev->thread when the mutex cannot be held. 83 */ 84 static LIST_HEAD(pers_list); 85 static DEFINE_SPINLOCK(pers_lock); 86 87 static struct kobj_type md_ktype; 88 89 struct md_cluster_operations *md_cluster_ops; 90 EXPORT_SYMBOL(md_cluster_ops); 91 struct module *md_cluster_mod; 92 EXPORT_SYMBOL(md_cluster_mod); 93 94 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 95 static struct workqueue_struct *md_wq; 96 static struct workqueue_struct *md_misc_wq; 97 98 static int remove_and_add_spares(struct mddev *mddev, 99 struct md_rdev *this); 100 static void mddev_detach(struct mddev *mddev); 101 102 /* 103 * Default number of read corrections we'll attempt on an rdev 104 * before ejecting it from the array. We divide the read error 105 * count by 2 for every hour elapsed between read errors. 106 */ 107 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 108 /* 109 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 110 * is 1000 KB/sec, so the extra system load does not show up that much. 111 * Increase it if you want to have more _guaranteed_ speed. Note that 112 * the RAID driver will use the maximum available bandwidth if the IO 113 * subsystem is idle. There is also an 'absolute maximum' reconstruction 114 * speed limit - in case reconstruction slows down your system despite 115 * idle IO detection. 116 * 117 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 118 * or /sys/block/mdX/md/sync_speed_{min,max} 119 */ 120 121 static int sysctl_speed_limit_min = 1000; 122 static int sysctl_speed_limit_max = 200000; 123 static inline int speed_min(struct mddev *mddev) 124 { 125 return mddev->sync_speed_min ? 126 mddev->sync_speed_min : sysctl_speed_limit_min; 127 } 128 129 static inline int speed_max(struct mddev *mddev) 130 { 131 return mddev->sync_speed_max ? 132 mddev->sync_speed_max : sysctl_speed_limit_max; 133 } 134 135 static struct ctl_table_header *raid_table_header; 136 137 static struct ctl_table raid_table[] = { 138 { 139 .procname = "speed_limit_min", 140 .data = &sysctl_speed_limit_min, 141 .maxlen = sizeof(int), 142 .mode = S_IRUGO|S_IWUSR, 143 .proc_handler = proc_dointvec, 144 }, 145 { 146 .procname = "speed_limit_max", 147 .data = &sysctl_speed_limit_max, 148 .maxlen = sizeof(int), 149 .mode = S_IRUGO|S_IWUSR, 150 .proc_handler = proc_dointvec, 151 }, 152 { } 153 }; 154 155 static struct ctl_table raid_dir_table[] = { 156 { 157 .procname = "raid", 158 .maxlen = 0, 159 .mode = S_IRUGO|S_IXUGO, 160 .child = raid_table, 161 }, 162 { } 163 }; 164 165 static struct ctl_table raid_root_table[] = { 166 { 167 .procname = "dev", 168 .maxlen = 0, 169 .mode = 0555, 170 .child = raid_dir_table, 171 }, 172 { } 173 }; 174 175 static const struct block_device_operations md_fops; 176 177 static int start_readonly; 178 179 /* 180 * The original mechanism for creating an md device is to create 181 * a device node in /dev and to open it. This causes races with device-close. 182 * The preferred method is to write to the "new_array" module parameter. 183 * This can avoid races. 184 * Setting create_on_open to false disables the original mechanism 185 * so all the races disappear. 186 */ 187 static bool create_on_open = true; 188 189 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 190 struct mddev *mddev) 191 { 192 if (!mddev || !bioset_initialized(&mddev->bio_set)) 193 return bio_alloc(gfp_mask, nr_iovecs); 194 195 return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); 196 } 197 EXPORT_SYMBOL_GPL(bio_alloc_mddev); 198 199 static struct bio *md_bio_alloc_sync(struct mddev *mddev) 200 { 201 if (!mddev || !bioset_initialized(&mddev->sync_set)) 202 return bio_alloc(GFP_NOIO, 1); 203 204 return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set); 205 } 206 207 /* 208 * We have a system wide 'event count' that is incremented 209 * on any 'interesting' event, and readers of /proc/mdstat 210 * can use 'poll' or 'select' to find out when the event 211 * count increases. 212 * 213 * Events are: 214 * start array, stop array, error, add device, remove device, 215 * start build, activate spare 216 */ 217 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 218 static atomic_t md_event_count; 219 void md_new_event(struct mddev *mddev) 220 { 221 atomic_inc(&md_event_count); 222 wake_up(&md_event_waiters); 223 } 224 EXPORT_SYMBOL_GPL(md_new_event); 225 226 /* 227 * Enables to iterate over all existing md arrays 228 * all_mddevs_lock protects this list. 229 */ 230 static LIST_HEAD(all_mddevs); 231 static DEFINE_SPINLOCK(all_mddevs_lock); 232 233 /* 234 * iterates through all used mddevs in the system. 235 * We take care to grab the all_mddevs_lock whenever navigating 236 * the list, and to always hold a refcount when unlocked. 237 * Any code which breaks out of this loop while own 238 * a reference to the current mddev and must mddev_put it. 239 */ 240 #define for_each_mddev(_mddev,_tmp) \ 241 \ 242 for (({ spin_lock(&all_mddevs_lock); \ 243 _tmp = all_mddevs.next; \ 244 _mddev = NULL;}); \ 245 ({ if (_tmp != &all_mddevs) \ 246 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\ 247 spin_unlock(&all_mddevs_lock); \ 248 if (_mddev) mddev_put(_mddev); \ 249 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \ 250 _tmp != &all_mddevs;}); \ 251 ({ spin_lock(&all_mddevs_lock); \ 252 _tmp = _tmp->next;}) \ 253 ) 254 255 /* Rather than calling directly into the personality make_request function, 256 * IO requests come here first so that we can check if the device is 257 * being suspended pending a reconfiguration. 258 * We hold a refcount over the call to ->make_request. By the time that 259 * call has finished, the bio has been linked into some internal structure 260 * and so is visible to ->quiesce(), so we don't need the refcount any more. 261 */ 262 static bool is_suspended(struct mddev *mddev, struct bio *bio) 263 { 264 if (mddev->suspended) 265 return true; 266 if (bio_data_dir(bio) != WRITE) 267 return false; 268 if (mddev->suspend_lo >= mddev->suspend_hi) 269 return false; 270 if (bio->bi_iter.bi_sector >= mddev->suspend_hi) 271 return false; 272 if (bio_end_sector(bio) < mddev->suspend_lo) 273 return false; 274 return true; 275 } 276 277 void md_handle_request(struct mddev *mddev, struct bio *bio) 278 { 279 check_suspended: 280 rcu_read_lock(); 281 if (is_suspended(mddev, bio)) { 282 DEFINE_WAIT(__wait); 283 for (;;) { 284 prepare_to_wait(&mddev->sb_wait, &__wait, 285 TASK_UNINTERRUPTIBLE); 286 if (!is_suspended(mddev, bio)) 287 break; 288 rcu_read_unlock(); 289 schedule(); 290 rcu_read_lock(); 291 } 292 finish_wait(&mddev->sb_wait, &__wait); 293 } 294 atomic_inc(&mddev->active_io); 295 rcu_read_unlock(); 296 297 if (!mddev->pers->make_request(mddev, bio)) { 298 atomic_dec(&mddev->active_io); 299 wake_up(&mddev->sb_wait); 300 goto check_suspended; 301 } 302 303 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 304 wake_up(&mddev->sb_wait); 305 } 306 EXPORT_SYMBOL(md_handle_request); 307 308 static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) 309 { 310 const int rw = bio_data_dir(bio); 311 const int sgrp = op_stat_group(bio_op(bio)); 312 struct mddev *mddev = q->queuedata; 313 unsigned int sectors; 314 315 blk_queue_split(q, &bio); 316 317 if (mddev == NULL || mddev->pers == NULL) { 318 bio_io_error(bio); 319 return BLK_QC_T_NONE; 320 } 321 if (mddev->ro == 1 && unlikely(rw == WRITE)) { 322 if (bio_sectors(bio) != 0) 323 bio->bi_status = BLK_STS_IOERR; 324 bio_endio(bio); 325 return BLK_QC_T_NONE; 326 } 327 328 /* 329 * save the sectors now since our bio can 330 * go away inside make_request 331 */ 332 sectors = bio_sectors(bio); 333 /* bio could be mergeable after passing to underlayer */ 334 bio->bi_opf &= ~REQ_NOMERGE; 335 336 md_handle_request(mddev, bio); 337 338 part_stat_lock(); 339 part_stat_inc(&mddev->gendisk->part0, ios[sgrp]); 340 part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors); 341 part_stat_unlock(); 342 343 return BLK_QC_T_NONE; 344 } 345 346 /* mddev_suspend makes sure no new requests are submitted 347 * to the device, and that any requests that have been submitted 348 * are completely handled. 349 * Once mddev_detach() is called and completes, the module will be 350 * completely unused. 351 */ 352 void mddev_suspend(struct mddev *mddev) 353 { 354 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); 355 lockdep_assert_held(&mddev->reconfig_mutex); 356 if (mddev->suspended++) 357 return; 358 synchronize_rcu(); 359 wake_up(&mddev->sb_wait); 360 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags); 361 smp_mb__after_atomic(); 362 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 363 mddev->pers->quiesce(mddev, 1); 364 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); 365 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags)); 366 367 del_timer_sync(&mddev->safemode_timer); 368 } 369 EXPORT_SYMBOL_GPL(mddev_suspend); 370 371 void mddev_resume(struct mddev *mddev) 372 { 373 lockdep_assert_held(&mddev->reconfig_mutex); 374 if (--mddev->suspended) 375 return; 376 wake_up(&mddev->sb_wait); 377 mddev->pers->quiesce(mddev, 0); 378 379 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 380 md_wakeup_thread(mddev->thread); 381 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 382 } 383 EXPORT_SYMBOL_GPL(mddev_resume); 384 385 int mddev_congested(struct mddev *mddev, int bits) 386 { 387 struct md_personality *pers = mddev->pers; 388 int ret = 0; 389 390 rcu_read_lock(); 391 if (mddev->suspended) 392 ret = 1; 393 else if (pers && pers->congested) 394 ret = pers->congested(mddev, bits); 395 rcu_read_unlock(); 396 return ret; 397 } 398 EXPORT_SYMBOL_GPL(mddev_congested); 399 static int md_congested(void *data, int bits) 400 { 401 struct mddev *mddev = data; 402 return mddev_congested(mddev, bits); 403 } 404 405 /* 406 * Generic flush handling for md 407 */ 408 409 static void md_end_flush(struct bio *bio) 410 { 411 struct md_rdev *rdev = bio->bi_private; 412 struct mddev *mddev = rdev->mddev; 413 414 rdev_dec_pending(rdev, mddev); 415 416 if (atomic_dec_and_test(&mddev->flush_pending)) { 417 /* The pre-request flush has finished */ 418 queue_work(md_wq, &mddev->flush_work); 419 } 420 bio_put(bio); 421 } 422 423 static void md_submit_flush_data(struct work_struct *ws); 424 425 static void submit_flushes(struct work_struct *ws) 426 { 427 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 428 struct md_rdev *rdev; 429 430 mddev->start_flush = ktime_get_boottime(); 431 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 432 atomic_set(&mddev->flush_pending, 1); 433 rcu_read_lock(); 434 rdev_for_each_rcu(rdev, mddev) 435 if (rdev->raid_disk >= 0 && 436 !test_bit(Faulty, &rdev->flags)) { 437 /* Take two references, one is dropped 438 * when request finishes, one after 439 * we reclaim rcu_read_lock 440 */ 441 struct bio *bi; 442 atomic_inc(&rdev->nr_pending); 443 atomic_inc(&rdev->nr_pending); 444 rcu_read_unlock(); 445 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); 446 bi->bi_end_io = md_end_flush; 447 bi->bi_private = rdev; 448 bio_set_dev(bi, rdev->bdev); 449 bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 450 atomic_inc(&mddev->flush_pending); 451 submit_bio(bi); 452 rcu_read_lock(); 453 rdev_dec_pending(rdev, mddev); 454 } 455 rcu_read_unlock(); 456 if (atomic_dec_and_test(&mddev->flush_pending)) 457 queue_work(md_wq, &mddev->flush_work); 458 } 459 460 static void md_submit_flush_data(struct work_struct *ws) 461 { 462 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 463 struct bio *bio = mddev->flush_bio; 464 465 /* 466 * must reset flush_bio before calling into md_handle_request to avoid a 467 * deadlock, because other bios passed md_handle_request suspend check 468 * could wait for this and below md_handle_request could wait for those 469 * bios because of suspend check 470 */ 471 mddev->last_flush = mddev->start_flush; 472 mddev->flush_bio = NULL; 473 wake_up(&mddev->sb_wait); 474 475 if (bio->bi_iter.bi_size == 0) { 476 /* an empty barrier - all done */ 477 bio_endio(bio); 478 } else { 479 bio->bi_opf &= ~REQ_PREFLUSH; 480 md_handle_request(mddev, bio); 481 } 482 } 483 484 void md_flush_request(struct mddev *mddev, struct bio *bio) 485 { 486 ktime_t start = ktime_get_boottime(); 487 spin_lock_irq(&mddev->lock); 488 wait_event_lock_irq(mddev->sb_wait, 489 !mddev->flush_bio || 490 ktime_after(mddev->last_flush, start), 491 mddev->lock); 492 if (!ktime_after(mddev->last_flush, start)) { 493 WARN_ON(mddev->flush_bio); 494 mddev->flush_bio = bio; 495 bio = NULL; 496 } 497 spin_unlock_irq(&mddev->lock); 498 499 if (!bio) { 500 INIT_WORK(&mddev->flush_work, submit_flushes); 501 queue_work(md_wq, &mddev->flush_work); 502 } else { 503 /* flush was performed for some other bio while we waited. */ 504 if (bio->bi_iter.bi_size == 0) 505 /* an empty barrier - all done */ 506 bio_endio(bio); 507 else { 508 bio->bi_opf &= ~REQ_PREFLUSH; 509 mddev->pers->make_request(mddev, bio); 510 } 511 } 512 } 513 EXPORT_SYMBOL(md_flush_request); 514 515 static inline struct mddev *mddev_get(struct mddev *mddev) 516 { 517 atomic_inc(&mddev->active); 518 return mddev; 519 } 520 521 static void mddev_delayed_delete(struct work_struct *ws); 522 523 static void mddev_put(struct mddev *mddev) 524 { 525 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 526 return; 527 if (!mddev->raid_disks && list_empty(&mddev->disks) && 528 mddev->ctime == 0 && !mddev->hold_active) { 529 /* Array is not configured at all, and not held active, 530 * so destroy it */ 531 list_del_init(&mddev->all_mddevs); 532 533 /* 534 * Call queue_work inside the spinlock so that 535 * flush_workqueue() after mddev_find will succeed in waiting 536 * for the work to be done. 537 */ 538 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 539 queue_work(md_misc_wq, &mddev->del_work); 540 } 541 spin_unlock(&all_mddevs_lock); 542 } 543 544 static void md_safemode_timeout(struct timer_list *t); 545 546 void mddev_init(struct mddev *mddev) 547 { 548 kobject_init(&mddev->kobj, &md_ktype); 549 mutex_init(&mddev->open_mutex); 550 mutex_init(&mddev->reconfig_mutex); 551 mutex_init(&mddev->bitmap_info.mutex); 552 INIT_LIST_HEAD(&mddev->disks); 553 INIT_LIST_HEAD(&mddev->all_mddevs); 554 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); 555 atomic_set(&mddev->active, 1); 556 atomic_set(&mddev->openers, 0); 557 atomic_set(&mddev->active_io, 0); 558 spin_lock_init(&mddev->lock); 559 atomic_set(&mddev->flush_pending, 0); 560 init_waitqueue_head(&mddev->sb_wait); 561 init_waitqueue_head(&mddev->recovery_wait); 562 mddev->reshape_position = MaxSector; 563 mddev->reshape_backwards = 0; 564 mddev->last_sync_action = "none"; 565 mddev->resync_min = 0; 566 mddev->resync_max = MaxSector; 567 mddev->level = LEVEL_NONE; 568 } 569 EXPORT_SYMBOL_GPL(mddev_init); 570 571 static struct mddev *mddev_find(dev_t unit) 572 { 573 struct mddev *mddev, *new = NULL; 574 575 if (unit && MAJOR(unit) != MD_MAJOR) 576 unit &= ~((1<<MdpMinorShift)-1); 577 578 retry: 579 spin_lock(&all_mddevs_lock); 580 581 if (unit) { 582 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 583 if (mddev->unit == unit) { 584 mddev_get(mddev); 585 spin_unlock(&all_mddevs_lock); 586 kfree(new); 587 return mddev; 588 } 589 590 if (new) { 591 list_add(&new->all_mddevs, &all_mddevs); 592 spin_unlock(&all_mddevs_lock); 593 new->hold_active = UNTIL_IOCTL; 594 return new; 595 } 596 } else if (new) { 597 /* find an unused unit number */ 598 static int next_minor = 512; 599 int start = next_minor; 600 int is_free = 0; 601 int dev = 0; 602 while (!is_free) { 603 dev = MKDEV(MD_MAJOR, next_minor); 604 next_minor++; 605 if (next_minor > MINORMASK) 606 next_minor = 0; 607 if (next_minor == start) { 608 /* Oh dear, all in use. */ 609 spin_unlock(&all_mddevs_lock); 610 kfree(new); 611 return NULL; 612 } 613 614 is_free = 1; 615 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 616 if (mddev->unit == dev) { 617 is_free = 0; 618 break; 619 } 620 } 621 new->unit = dev; 622 new->md_minor = MINOR(dev); 623 new->hold_active = UNTIL_STOP; 624 list_add(&new->all_mddevs, &all_mddevs); 625 spin_unlock(&all_mddevs_lock); 626 return new; 627 } 628 spin_unlock(&all_mddevs_lock); 629 630 new = kzalloc(sizeof(*new), GFP_KERNEL); 631 if (!new) 632 return NULL; 633 634 new->unit = unit; 635 if (MAJOR(unit) == MD_MAJOR) 636 new->md_minor = MINOR(unit); 637 else 638 new->md_minor = MINOR(unit) >> MdpMinorShift; 639 640 mddev_init(new); 641 642 goto retry; 643 } 644 645 static struct attribute_group md_redundancy_group; 646 647 void mddev_unlock(struct mddev *mddev) 648 { 649 if (mddev->to_remove) { 650 /* These cannot be removed under reconfig_mutex as 651 * an access to the files will try to take reconfig_mutex 652 * while holding the file unremovable, which leads to 653 * a deadlock. 654 * So hold set sysfs_active while the remove in happeing, 655 * and anything else which might set ->to_remove or my 656 * otherwise change the sysfs namespace will fail with 657 * -EBUSY if sysfs_active is still set. 658 * We set sysfs_active under reconfig_mutex and elsewhere 659 * test it under the same mutex to ensure its correct value 660 * is seen. 661 */ 662 struct attribute_group *to_remove = mddev->to_remove; 663 mddev->to_remove = NULL; 664 mddev->sysfs_active = 1; 665 mutex_unlock(&mddev->reconfig_mutex); 666 667 if (mddev->kobj.sd) { 668 if (to_remove != &md_redundancy_group) 669 sysfs_remove_group(&mddev->kobj, to_remove); 670 if (mddev->pers == NULL || 671 mddev->pers->sync_request == NULL) { 672 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 673 if (mddev->sysfs_action) 674 sysfs_put(mddev->sysfs_action); 675 mddev->sysfs_action = NULL; 676 } 677 } 678 mddev->sysfs_active = 0; 679 } else 680 mutex_unlock(&mddev->reconfig_mutex); 681 682 /* As we've dropped the mutex we need a spinlock to 683 * make sure the thread doesn't disappear 684 */ 685 spin_lock(&pers_lock); 686 md_wakeup_thread(mddev->thread); 687 wake_up(&mddev->sb_wait); 688 spin_unlock(&pers_lock); 689 } 690 EXPORT_SYMBOL_GPL(mddev_unlock); 691 692 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) 693 { 694 struct md_rdev *rdev; 695 696 rdev_for_each_rcu(rdev, mddev) 697 if (rdev->desc_nr == nr) 698 return rdev; 699 700 return NULL; 701 } 702 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu); 703 704 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) 705 { 706 struct md_rdev *rdev; 707 708 rdev_for_each(rdev, mddev) 709 if (rdev->bdev->bd_dev == dev) 710 return rdev; 711 712 return NULL; 713 } 714 715 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev) 716 { 717 struct md_rdev *rdev; 718 719 rdev_for_each_rcu(rdev, mddev) 720 if (rdev->bdev->bd_dev == dev) 721 return rdev; 722 723 return NULL; 724 } 725 EXPORT_SYMBOL_GPL(md_find_rdev_rcu); 726 727 static struct md_personality *find_pers(int level, char *clevel) 728 { 729 struct md_personality *pers; 730 list_for_each_entry(pers, &pers_list, list) { 731 if (level != LEVEL_NONE && pers->level == level) 732 return pers; 733 if (strcmp(pers->name, clevel)==0) 734 return pers; 735 } 736 return NULL; 737 } 738 739 /* return the offset of the super block in 512byte sectors */ 740 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) 741 { 742 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; 743 return MD_NEW_SIZE_SECTORS(num_sectors); 744 } 745 746 static int alloc_disk_sb(struct md_rdev *rdev) 747 { 748 rdev->sb_page = alloc_page(GFP_KERNEL); 749 if (!rdev->sb_page) 750 return -ENOMEM; 751 return 0; 752 } 753 754 void md_rdev_clear(struct md_rdev *rdev) 755 { 756 if (rdev->sb_page) { 757 put_page(rdev->sb_page); 758 rdev->sb_loaded = 0; 759 rdev->sb_page = NULL; 760 rdev->sb_start = 0; 761 rdev->sectors = 0; 762 } 763 if (rdev->bb_page) { 764 put_page(rdev->bb_page); 765 rdev->bb_page = NULL; 766 } 767 badblocks_exit(&rdev->badblocks); 768 } 769 EXPORT_SYMBOL_GPL(md_rdev_clear); 770 771 static void super_written(struct bio *bio) 772 { 773 struct md_rdev *rdev = bio->bi_private; 774 struct mddev *mddev = rdev->mddev; 775 776 if (bio->bi_status) { 777 pr_err("md: super_written gets error=%d\n", bio->bi_status); 778 md_error(mddev, rdev); 779 if (!test_bit(Faulty, &rdev->flags) 780 && (bio->bi_opf & MD_FAILFAST)) { 781 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); 782 set_bit(LastDev, &rdev->flags); 783 } 784 } else 785 clear_bit(LastDev, &rdev->flags); 786 787 if (atomic_dec_and_test(&mddev->pending_writes)) 788 wake_up(&mddev->sb_wait); 789 rdev_dec_pending(rdev, mddev); 790 bio_put(bio); 791 } 792 793 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, 794 sector_t sector, int size, struct page *page) 795 { 796 /* write first size bytes of page to sector of rdev 797 * Increment mddev->pending_writes before returning 798 * and decrement it on completion, waking up sb_wait 799 * if zero is reached. 800 * If an error occurred, call md_error 801 */ 802 struct bio *bio; 803 int ff = 0; 804 805 if (!page) 806 return; 807 808 if (test_bit(Faulty, &rdev->flags)) 809 return; 810 811 bio = md_bio_alloc_sync(mddev); 812 813 atomic_inc(&rdev->nr_pending); 814 815 bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev); 816 bio->bi_iter.bi_sector = sector; 817 bio_add_page(bio, page, size, 0); 818 bio->bi_private = rdev; 819 bio->bi_end_io = super_written; 820 821 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && 822 test_bit(FailFast, &rdev->flags) && 823 !test_bit(LastDev, &rdev->flags)) 824 ff = MD_FAILFAST; 825 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff; 826 827 atomic_inc(&mddev->pending_writes); 828 submit_bio(bio); 829 } 830 831 int md_super_wait(struct mddev *mddev) 832 { 833 /* wait for all superblock writes that were scheduled to complete */ 834 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); 835 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) 836 return -EAGAIN; 837 return 0; 838 } 839 840 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 841 struct page *page, int op, int op_flags, bool metadata_op) 842 { 843 struct bio *bio = md_bio_alloc_sync(rdev->mddev); 844 int ret; 845 846 if (metadata_op && rdev->meta_bdev) 847 bio_set_dev(bio, rdev->meta_bdev); 848 else 849 bio_set_dev(bio, rdev->bdev); 850 bio_set_op_attrs(bio, op, op_flags); 851 if (metadata_op) 852 bio->bi_iter.bi_sector = sector + rdev->sb_start; 853 else if (rdev->mddev->reshape_position != MaxSector && 854 (rdev->mddev->reshape_backwards == 855 (sector >= rdev->mddev->reshape_position))) 856 bio->bi_iter.bi_sector = sector + rdev->new_data_offset; 857 else 858 bio->bi_iter.bi_sector = sector + rdev->data_offset; 859 bio_add_page(bio, page, size, 0); 860 861 submit_bio_wait(bio); 862 863 ret = !bio->bi_status; 864 bio_put(bio); 865 return ret; 866 } 867 EXPORT_SYMBOL_GPL(sync_page_io); 868 869 static int read_disk_sb(struct md_rdev *rdev, int size) 870 { 871 char b[BDEVNAME_SIZE]; 872 873 if (rdev->sb_loaded) 874 return 0; 875 876 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) 877 goto fail; 878 rdev->sb_loaded = 1; 879 return 0; 880 881 fail: 882 pr_err("md: disabled device %s, could not read superblock.\n", 883 bdevname(rdev->bdev,b)); 884 return -EINVAL; 885 } 886 887 static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 888 { 889 return sb1->set_uuid0 == sb2->set_uuid0 && 890 sb1->set_uuid1 == sb2->set_uuid1 && 891 sb1->set_uuid2 == sb2->set_uuid2 && 892 sb1->set_uuid3 == sb2->set_uuid3; 893 } 894 895 static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 896 { 897 int ret; 898 mdp_super_t *tmp1, *tmp2; 899 900 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 901 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 902 903 if (!tmp1 || !tmp2) { 904 ret = 0; 905 goto abort; 906 } 907 908 *tmp1 = *sb1; 909 *tmp2 = *sb2; 910 911 /* 912 * nr_disks is not constant 913 */ 914 tmp1->nr_disks = 0; 915 tmp2->nr_disks = 0; 916 917 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 918 abort: 919 kfree(tmp1); 920 kfree(tmp2); 921 return ret; 922 } 923 924 static u32 md_csum_fold(u32 csum) 925 { 926 csum = (csum & 0xffff) + (csum >> 16); 927 return (csum & 0xffff) + (csum >> 16); 928 } 929 930 static unsigned int calc_sb_csum(mdp_super_t *sb) 931 { 932 u64 newcsum = 0; 933 u32 *sb32 = (u32*)sb; 934 int i; 935 unsigned int disk_csum, csum; 936 937 disk_csum = sb->sb_csum; 938 sb->sb_csum = 0; 939 940 for (i = 0; i < MD_SB_BYTES/4 ; i++) 941 newcsum += sb32[i]; 942 csum = (newcsum & 0xffffffff) + (newcsum>>32); 943 944 #ifdef CONFIG_ALPHA 945 /* This used to use csum_partial, which was wrong for several 946 * reasons including that different results are returned on 947 * different architectures. It isn't critical that we get exactly 948 * the same return value as before (we always csum_fold before 949 * testing, and that removes any differences). However as we 950 * know that csum_partial always returned a 16bit value on 951 * alphas, do a fold to maximise conformity to previous behaviour. 952 */ 953 sb->sb_csum = md_csum_fold(disk_csum); 954 #else 955 sb->sb_csum = disk_csum; 956 #endif 957 return csum; 958 } 959 960 /* 961 * Handle superblock details. 962 * We want to be able to handle multiple superblock formats 963 * so we have a common interface to them all, and an array of 964 * different handlers. 965 * We rely on user-space to write the initial superblock, and support 966 * reading and updating of superblocks. 967 * Interface methods are: 968 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version) 969 * loads and validates a superblock on dev. 970 * if refdev != NULL, compare superblocks on both devices 971 * Return: 972 * 0 - dev has a superblock that is compatible with refdev 973 * 1 - dev has a superblock that is compatible and newer than refdev 974 * so dev should be used as the refdev in future 975 * -EINVAL superblock incompatible or invalid 976 * -othererror e.g. -EIO 977 * 978 * int validate_super(struct mddev *mddev, struct md_rdev *dev) 979 * Verify that dev is acceptable into mddev. 980 * The first time, mddev->raid_disks will be 0, and data from 981 * dev should be merged in. Subsequent calls check that dev 982 * is new enough. Return 0 or -EINVAL 983 * 984 * void sync_super(struct mddev *mddev, struct md_rdev *dev) 985 * Update the superblock for rdev with data in mddev 986 * This does not write to disc. 987 * 988 */ 989 990 struct super_type { 991 char *name; 992 struct module *owner; 993 int (*load_super)(struct md_rdev *rdev, 994 struct md_rdev *refdev, 995 int minor_version); 996 int (*validate_super)(struct mddev *mddev, 997 struct md_rdev *rdev); 998 void (*sync_super)(struct mddev *mddev, 999 struct md_rdev *rdev); 1000 unsigned long long (*rdev_size_change)(struct md_rdev *rdev, 1001 sector_t num_sectors); 1002 int (*allow_new_offset)(struct md_rdev *rdev, 1003 unsigned long long new_offset); 1004 }; 1005 1006 /* 1007 * Check that the given mddev has no bitmap. 1008 * 1009 * This function is called from the run method of all personalities that do not 1010 * support bitmaps. It prints an error message and returns non-zero if mddev 1011 * has a bitmap. Otherwise, it returns 0. 1012 * 1013 */ 1014 int md_check_no_bitmap(struct mddev *mddev) 1015 { 1016 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 1017 return 0; 1018 pr_warn("%s: bitmaps are not supported for %s\n", 1019 mdname(mddev), mddev->pers->name); 1020 return 1; 1021 } 1022 EXPORT_SYMBOL(md_check_no_bitmap); 1023 1024 /* 1025 * load_super for 0.90.0 1026 */ 1027 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1028 { 1029 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1030 mdp_super_t *sb; 1031 int ret; 1032 1033 /* 1034 * Calculate the position of the superblock (512byte sectors), 1035 * it's at the end of the disk. 1036 * 1037 * It also happens to be a multiple of 4Kb. 1038 */ 1039 rdev->sb_start = calc_dev_sboffset(rdev); 1040 1041 ret = read_disk_sb(rdev, MD_SB_BYTES); 1042 if (ret) 1043 return ret; 1044 1045 ret = -EINVAL; 1046 1047 bdevname(rdev->bdev, b); 1048 sb = page_address(rdev->sb_page); 1049 1050 if (sb->md_magic != MD_SB_MAGIC) { 1051 pr_warn("md: invalid raid superblock magic on %s\n", b); 1052 goto abort; 1053 } 1054 1055 if (sb->major_version != 0 || 1056 sb->minor_version < 90 || 1057 sb->minor_version > 91) { 1058 pr_warn("Bad version number %d.%d on %s\n", 1059 sb->major_version, sb->minor_version, b); 1060 goto abort; 1061 } 1062 1063 if (sb->raid_disks <= 0) 1064 goto abort; 1065 1066 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 1067 pr_warn("md: invalid superblock checksum on %s\n", b); 1068 goto abort; 1069 } 1070 1071 rdev->preferred_minor = sb->md_minor; 1072 rdev->data_offset = 0; 1073 rdev->new_data_offset = 0; 1074 rdev->sb_size = MD_SB_BYTES; 1075 rdev->badblocks.shift = -1; 1076 1077 if (sb->level == LEVEL_MULTIPATH) 1078 rdev->desc_nr = -1; 1079 else 1080 rdev->desc_nr = sb->this_disk.number; 1081 1082 if (!refdev) { 1083 ret = 1; 1084 } else { 1085 __u64 ev1, ev2; 1086 mdp_super_t *refsb = page_address(refdev->sb_page); 1087 if (!md_uuid_equal(refsb, sb)) { 1088 pr_warn("md: %s has different UUID to %s\n", 1089 b, bdevname(refdev->bdev,b2)); 1090 goto abort; 1091 } 1092 if (!md_sb_equal(refsb, sb)) { 1093 pr_warn("md: %s has same UUID but different superblock to %s\n", 1094 b, bdevname(refdev->bdev, b2)); 1095 goto abort; 1096 } 1097 ev1 = md_event(sb); 1098 ev2 = md_event(refsb); 1099 if (ev1 > ev2) 1100 ret = 1; 1101 else 1102 ret = 0; 1103 } 1104 rdev->sectors = rdev->sb_start; 1105 /* Limit to 4TB as metadata cannot record more than that. 1106 * (not needed for Linear and RAID0 as metadata doesn't 1107 * record this size) 1108 */ 1109 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1) 1110 rdev->sectors = (sector_t)(2ULL << 32) - 2; 1111 1112 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) 1113 /* "this cannot possibly happen" ... */ 1114 ret = -EINVAL; 1115 1116 abort: 1117 return ret; 1118 } 1119 1120 /* 1121 * validate_super for 0.90.0 1122 */ 1123 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) 1124 { 1125 mdp_disk_t *desc; 1126 mdp_super_t *sb = page_address(rdev->sb_page); 1127 __u64 ev1 = md_event(sb); 1128 1129 rdev->raid_disk = -1; 1130 clear_bit(Faulty, &rdev->flags); 1131 clear_bit(In_sync, &rdev->flags); 1132 clear_bit(Bitmap_sync, &rdev->flags); 1133 clear_bit(WriteMostly, &rdev->flags); 1134 1135 if (mddev->raid_disks == 0) { 1136 mddev->major_version = 0; 1137 mddev->minor_version = sb->minor_version; 1138 mddev->patch_version = sb->patch_version; 1139 mddev->external = 0; 1140 mddev->chunk_sectors = sb->chunk_size >> 9; 1141 mddev->ctime = sb->ctime; 1142 mddev->utime = sb->utime; 1143 mddev->level = sb->level; 1144 mddev->clevel[0] = 0; 1145 mddev->layout = sb->layout; 1146 mddev->raid_disks = sb->raid_disks; 1147 mddev->dev_sectors = ((sector_t)sb->size) * 2; 1148 mddev->events = ev1; 1149 mddev->bitmap_info.offset = 0; 1150 mddev->bitmap_info.space = 0; 1151 /* bitmap can use 60 K after the 4K superblocks */ 1152 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1153 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 1154 mddev->reshape_backwards = 0; 1155 1156 if (mddev->minor_version >= 91) { 1157 mddev->reshape_position = sb->reshape_position; 1158 mddev->delta_disks = sb->delta_disks; 1159 mddev->new_level = sb->new_level; 1160 mddev->new_layout = sb->new_layout; 1161 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1162 if (mddev->delta_disks < 0) 1163 mddev->reshape_backwards = 1; 1164 } else { 1165 mddev->reshape_position = MaxSector; 1166 mddev->delta_disks = 0; 1167 mddev->new_level = mddev->level; 1168 mddev->new_layout = mddev->layout; 1169 mddev->new_chunk_sectors = mddev->chunk_sectors; 1170 } 1171 1172 if (sb->state & (1<<MD_SB_CLEAN)) 1173 mddev->recovery_cp = MaxSector; 1174 else { 1175 if (sb->events_hi == sb->cp_events_hi && 1176 sb->events_lo == sb->cp_events_lo) { 1177 mddev->recovery_cp = sb->recovery_cp; 1178 } else 1179 mddev->recovery_cp = 0; 1180 } 1181 1182 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1183 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1184 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1185 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1186 1187 mddev->max_disks = MD_SB_DISKS; 1188 1189 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1190 mddev->bitmap_info.file == NULL) { 1191 mddev->bitmap_info.offset = 1192 mddev->bitmap_info.default_offset; 1193 mddev->bitmap_info.space = 1194 mddev->bitmap_info.default_space; 1195 } 1196 1197 } else if (mddev->pers == NULL) { 1198 /* Insist on good event counter while assembling, except 1199 * for spares (which don't need an event count) */ 1200 ++ev1; 1201 if (sb->disks[rdev->desc_nr].state & ( 1202 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1203 if (ev1 < mddev->events) 1204 return -EINVAL; 1205 } else if (mddev->bitmap) { 1206 /* if adding to array with a bitmap, then we can accept an 1207 * older device ... but not too old. 1208 */ 1209 if (ev1 < mddev->bitmap->events_cleared) 1210 return 0; 1211 if (ev1 < mddev->events) 1212 set_bit(Bitmap_sync, &rdev->flags); 1213 } else { 1214 if (ev1 < mddev->events) 1215 /* just a hot-add of a new device, leave raid_disk at -1 */ 1216 return 0; 1217 } 1218 1219 if (mddev->level != LEVEL_MULTIPATH) { 1220 desc = sb->disks + rdev->desc_nr; 1221 1222 if (desc->state & (1<<MD_DISK_FAULTY)) 1223 set_bit(Faulty, &rdev->flags); 1224 else if (desc->state & (1<<MD_DISK_SYNC) /* && 1225 desc->raid_disk < mddev->raid_disks */) { 1226 set_bit(In_sync, &rdev->flags); 1227 rdev->raid_disk = desc->raid_disk; 1228 rdev->saved_raid_disk = desc->raid_disk; 1229 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1230 /* active but not in sync implies recovery up to 1231 * reshape position. We don't know exactly where 1232 * that is, so set to zero for now */ 1233 if (mddev->minor_version >= 91) { 1234 rdev->recovery_offset = 0; 1235 rdev->raid_disk = desc->raid_disk; 1236 } 1237 } 1238 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1239 set_bit(WriteMostly, &rdev->flags); 1240 if (desc->state & (1<<MD_DISK_FAILFAST)) 1241 set_bit(FailFast, &rdev->flags); 1242 } else /* MULTIPATH are always insync */ 1243 set_bit(In_sync, &rdev->flags); 1244 return 0; 1245 } 1246 1247 /* 1248 * sync_super for 0.90.0 1249 */ 1250 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) 1251 { 1252 mdp_super_t *sb; 1253 struct md_rdev *rdev2; 1254 int next_spare = mddev->raid_disks; 1255 1256 /* make rdev->sb match mddev data.. 1257 * 1258 * 1/ zero out disks 1259 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1260 * 3/ any empty disks < next_spare become removed 1261 * 1262 * disks[0] gets initialised to REMOVED because 1263 * we cannot be sure from other fields if it has 1264 * been initialised or not. 1265 */ 1266 int i; 1267 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1268 1269 rdev->sb_size = MD_SB_BYTES; 1270 1271 sb = page_address(rdev->sb_page); 1272 1273 memset(sb, 0, sizeof(*sb)); 1274 1275 sb->md_magic = MD_SB_MAGIC; 1276 sb->major_version = mddev->major_version; 1277 sb->patch_version = mddev->patch_version; 1278 sb->gvalid_words = 0; /* ignored */ 1279 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1280 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1281 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1282 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1283 1284 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); 1285 sb->level = mddev->level; 1286 sb->size = mddev->dev_sectors / 2; 1287 sb->raid_disks = mddev->raid_disks; 1288 sb->md_minor = mddev->md_minor; 1289 sb->not_persistent = 0; 1290 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); 1291 sb->state = 0; 1292 sb->events_hi = (mddev->events>>32); 1293 sb->events_lo = (u32)mddev->events; 1294 1295 if (mddev->reshape_position == MaxSector) 1296 sb->minor_version = 90; 1297 else { 1298 sb->minor_version = 91; 1299 sb->reshape_position = mddev->reshape_position; 1300 sb->new_level = mddev->new_level; 1301 sb->delta_disks = mddev->delta_disks; 1302 sb->new_layout = mddev->new_layout; 1303 sb->new_chunk = mddev->new_chunk_sectors << 9; 1304 } 1305 mddev->minor_version = sb->minor_version; 1306 if (mddev->in_sync) 1307 { 1308 sb->recovery_cp = mddev->recovery_cp; 1309 sb->cp_events_hi = (mddev->events>>32); 1310 sb->cp_events_lo = (u32)mddev->events; 1311 if (mddev->recovery_cp == MaxSector) 1312 sb->state = (1<< MD_SB_CLEAN); 1313 } else 1314 sb->recovery_cp = 0; 1315 1316 sb->layout = mddev->layout; 1317 sb->chunk_size = mddev->chunk_sectors << 9; 1318 1319 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1320 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1321 1322 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1323 rdev_for_each(rdev2, mddev) { 1324 mdp_disk_t *d; 1325 int desc_nr; 1326 int is_active = test_bit(In_sync, &rdev2->flags); 1327 1328 if (rdev2->raid_disk >= 0 && 1329 sb->minor_version >= 91) 1330 /* we have nowhere to store the recovery_offset, 1331 * but if it is not below the reshape_position, 1332 * we can piggy-back on that. 1333 */ 1334 is_active = 1; 1335 if (rdev2->raid_disk < 0 || 1336 test_bit(Faulty, &rdev2->flags)) 1337 is_active = 0; 1338 if (is_active) 1339 desc_nr = rdev2->raid_disk; 1340 else 1341 desc_nr = next_spare++; 1342 rdev2->desc_nr = desc_nr; 1343 d = &sb->disks[rdev2->desc_nr]; 1344 nr_disks++; 1345 d->number = rdev2->desc_nr; 1346 d->major = MAJOR(rdev2->bdev->bd_dev); 1347 d->minor = MINOR(rdev2->bdev->bd_dev); 1348 if (is_active) 1349 d->raid_disk = rdev2->raid_disk; 1350 else 1351 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1352 if (test_bit(Faulty, &rdev2->flags)) 1353 d->state = (1<<MD_DISK_FAULTY); 1354 else if (is_active) { 1355 d->state = (1<<MD_DISK_ACTIVE); 1356 if (test_bit(In_sync, &rdev2->flags)) 1357 d->state |= (1<<MD_DISK_SYNC); 1358 active++; 1359 working++; 1360 } else { 1361 d->state = 0; 1362 spare++; 1363 working++; 1364 } 1365 if (test_bit(WriteMostly, &rdev2->flags)) 1366 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1367 if (test_bit(FailFast, &rdev2->flags)) 1368 d->state |= (1<<MD_DISK_FAILFAST); 1369 } 1370 /* now set the "removed" and "faulty" bits on any missing devices */ 1371 for (i=0 ; i < mddev->raid_disks ; i++) { 1372 mdp_disk_t *d = &sb->disks[i]; 1373 if (d->state == 0 && d->number == 0) { 1374 d->number = i; 1375 d->raid_disk = i; 1376 d->state = (1<<MD_DISK_REMOVED); 1377 d->state |= (1<<MD_DISK_FAULTY); 1378 failed++; 1379 } 1380 } 1381 sb->nr_disks = nr_disks; 1382 sb->active_disks = active; 1383 sb->working_disks = working; 1384 sb->failed_disks = failed; 1385 sb->spare_disks = spare; 1386 1387 sb->this_disk = sb->disks[rdev->desc_nr]; 1388 sb->sb_csum = calc_sb_csum(sb); 1389 } 1390 1391 /* 1392 * rdev_size_change for 0.90.0 1393 */ 1394 static unsigned long long 1395 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1396 { 1397 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1398 return 0; /* component must fit device */ 1399 if (rdev->mddev->bitmap_info.offset) 1400 return 0; /* can't move bitmap */ 1401 rdev->sb_start = calc_dev_sboffset(rdev); 1402 if (!num_sectors || num_sectors > rdev->sb_start) 1403 num_sectors = rdev->sb_start; 1404 /* Limit to 4TB as metadata cannot record more than that. 1405 * 4TB == 2^32 KB, or 2*2^32 sectors. 1406 */ 1407 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) 1408 num_sectors = (sector_t)(2ULL << 32) - 2; 1409 do { 1410 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1411 rdev->sb_page); 1412 } while (md_super_wait(rdev->mddev) < 0); 1413 return num_sectors; 1414 } 1415 1416 static int 1417 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) 1418 { 1419 /* non-zero offset changes not possible with v0.90 */ 1420 return new_offset == 0; 1421 } 1422 1423 /* 1424 * version 1 superblock 1425 */ 1426 1427 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb) 1428 { 1429 __le32 disk_csum; 1430 u32 csum; 1431 unsigned long long newcsum; 1432 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1433 __le32 *isuper = (__le32*)sb; 1434 1435 disk_csum = sb->sb_csum; 1436 sb->sb_csum = 0; 1437 newcsum = 0; 1438 for (; size >= 4; size -= 4) 1439 newcsum += le32_to_cpu(*isuper++); 1440 1441 if (size == 2) 1442 newcsum += le16_to_cpu(*(__le16*) isuper); 1443 1444 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1445 sb->sb_csum = disk_csum; 1446 return cpu_to_le32(csum); 1447 } 1448 1449 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1450 { 1451 struct mdp_superblock_1 *sb; 1452 int ret; 1453 sector_t sb_start; 1454 sector_t sectors; 1455 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1456 int bmask; 1457 1458 /* 1459 * Calculate the position of the superblock in 512byte sectors. 1460 * It is always aligned to a 4K boundary and 1461 * depeding on minor_version, it can be: 1462 * 0: At least 8K, but less than 12K, from end of device 1463 * 1: At start of device 1464 * 2: 4K from start of device. 1465 */ 1466 switch(minor_version) { 1467 case 0: 1468 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; 1469 sb_start -= 8*2; 1470 sb_start &= ~(sector_t)(4*2-1); 1471 break; 1472 case 1: 1473 sb_start = 0; 1474 break; 1475 case 2: 1476 sb_start = 8; 1477 break; 1478 default: 1479 return -EINVAL; 1480 } 1481 rdev->sb_start = sb_start; 1482 1483 /* superblock is rarely larger than 1K, but it can be larger, 1484 * and it is safe to read 4k, so we do that 1485 */ 1486 ret = read_disk_sb(rdev, 4096); 1487 if (ret) return ret; 1488 1489 sb = page_address(rdev->sb_page); 1490 1491 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1492 sb->major_version != cpu_to_le32(1) || 1493 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1494 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1495 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1496 return -EINVAL; 1497 1498 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1499 pr_warn("md: invalid superblock checksum on %s\n", 1500 bdevname(rdev->bdev,b)); 1501 return -EINVAL; 1502 } 1503 if (le64_to_cpu(sb->data_size) < 10) { 1504 pr_warn("md: data_size too small on %s\n", 1505 bdevname(rdev->bdev,b)); 1506 return -EINVAL; 1507 } 1508 if (sb->pad0 || 1509 sb->pad3[0] || 1510 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) 1511 /* Some padding is non-zero, might be a new feature */ 1512 return -EINVAL; 1513 1514 rdev->preferred_minor = 0xffff; 1515 rdev->data_offset = le64_to_cpu(sb->data_offset); 1516 rdev->new_data_offset = rdev->data_offset; 1517 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) && 1518 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) 1519 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); 1520 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1521 1522 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1523 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1524 if (rdev->sb_size & bmask) 1525 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1526 1527 if (minor_version 1528 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1529 return -EINVAL; 1530 if (minor_version 1531 && rdev->new_data_offset < sb_start + (rdev->sb_size/512)) 1532 return -EINVAL; 1533 1534 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1535 rdev->desc_nr = -1; 1536 else 1537 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1538 1539 if (!rdev->bb_page) { 1540 rdev->bb_page = alloc_page(GFP_KERNEL); 1541 if (!rdev->bb_page) 1542 return -ENOMEM; 1543 } 1544 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && 1545 rdev->badblocks.count == 0) { 1546 /* need to load the bad block list. 1547 * Currently we limit it to one page. 1548 */ 1549 s32 offset; 1550 sector_t bb_sector; 1551 u64 *bbp; 1552 int i; 1553 int sectors = le16_to_cpu(sb->bblog_size); 1554 if (sectors > (PAGE_SIZE / 512)) 1555 return -EINVAL; 1556 offset = le32_to_cpu(sb->bblog_offset); 1557 if (offset == 0) 1558 return -EINVAL; 1559 bb_sector = (long long)offset; 1560 if (!sync_page_io(rdev, bb_sector, sectors << 9, 1561 rdev->bb_page, REQ_OP_READ, 0, true)) 1562 return -EIO; 1563 bbp = (u64 *)page_address(rdev->bb_page); 1564 rdev->badblocks.shift = sb->bblog_shift; 1565 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { 1566 u64 bb = le64_to_cpu(*bbp); 1567 int count = bb & (0x3ff); 1568 u64 sector = bb >> 10; 1569 sector <<= sb->bblog_shift; 1570 count <<= sb->bblog_shift; 1571 if (bb + 1 == 0) 1572 break; 1573 if (badblocks_set(&rdev->badblocks, sector, count, 1)) 1574 return -EINVAL; 1575 } 1576 } else if (sb->bblog_offset != 0) 1577 rdev->badblocks.shift = 0; 1578 1579 if ((le32_to_cpu(sb->feature_map) & 1580 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) { 1581 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset); 1582 rdev->ppl.size = le16_to_cpu(sb->ppl.size); 1583 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset; 1584 } 1585 1586 if (!refdev) { 1587 ret = 1; 1588 } else { 1589 __u64 ev1, ev2; 1590 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); 1591 1592 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1593 sb->level != refsb->level || 1594 sb->layout != refsb->layout || 1595 sb->chunksize != refsb->chunksize) { 1596 pr_warn("md: %s has strangely different superblock to %s\n", 1597 bdevname(rdev->bdev,b), 1598 bdevname(refdev->bdev,b2)); 1599 return -EINVAL; 1600 } 1601 ev1 = le64_to_cpu(sb->events); 1602 ev2 = le64_to_cpu(refsb->events); 1603 1604 if (ev1 > ev2) 1605 ret = 1; 1606 else 1607 ret = 0; 1608 } 1609 if (minor_version) { 1610 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); 1611 sectors -= rdev->data_offset; 1612 } else 1613 sectors = rdev->sb_start; 1614 if (sectors < le64_to_cpu(sb->data_size)) 1615 return -EINVAL; 1616 rdev->sectors = le64_to_cpu(sb->data_size); 1617 return ret; 1618 } 1619 1620 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) 1621 { 1622 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 1623 __u64 ev1 = le64_to_cpu(sb->events); 1624 1625 rdev->raid_disk = -1; 1626 clear_bit(Faulty, &rdev->flags); 1627 clear_bit(In_sync, &rdev->flags); 1628 clear_bit(Bitmap_sync, &rdev->flags); 1629 clear_bit(WriteMostly, &rdev->flags); 1630 1631 if (mddev->raid_disks == 0) { 1632 mddev->major_version = 1; 1633 mddev->patch_version = 0; 1634 mddev->external = 0; 1635 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1636 mddev->ctime = le64_to_cpu(sb->ctime); 1637 mddev->utime = le64_to_cpu(sb->utime); 1638 mddev->level = le32_to_cpu(sb->level); 1639 mddev->clevel[0] = 0; 1640 mddev->layout = le32_to_cpu(sb->layout); 1641 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1642 mddev->dev_sectors = le64_to_cpu(sb->size); 1643 mddev->events = ev1; 1644 mddev->bitmap_info.offset = 0; 1645 mddev->bitmap_info.space = 0; 1646 /* Default location for bitmap is 1K after superblock 1647 * using 3K - total of 4K 1648 */ 1649 mddev->bitmap_info.default_offset = 1024 >> 9; 1650 mddev->bitmap_info.default_space = (4096-1024) >> 9; 1651 mddev->reshape_backwards = 0; 1652 1653 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1654 memcpy(mddev->uuid, sb->set_uuid, 16); 1655 1656 mddev->max_disks = (4096-256)/2; 1657 1658 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1659 mddev->bitmap_info.file == NULL) { 1660 mddev->bitmap_info.offset = 1661 (__s32)le32_to_cpu(sb->bitmap_offset); 1662 /* Metadata doesn't record how much space is available. 1663 * For 1.0, we assume we can use up to the superblock 1664 * if before, else to 4K beyond superblock. 1665 * For others, assume no change is possible. 1666 */ 1667 if (mddev->minor_version > 0) 1668 mddev->bitmap_info.space = 0; 1669 else if (mddev->bitmap_info.offset > 0) 1670 mddev->bitmap_info.space = 1671 8 - mddev->bitmap_info.offset; 1672 else 1673 mddev->bitmap_info.space = 1674 -mddev->bitmap_info.offset; 1675 } 1676 1677 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1678 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1679 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1680 mddev->new_level = le32_to_cpu(sb->new_level); 1681 mddev->new_layout = le32_to_cpu(sb->new_layout); 1682 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1683 if (mddev->delta_disks < 0 || 1684 (mddev->delta_disks == 0 && 1685 (le32_to_cpu(sb->feature_map) 1686 & MD_FEATURE_RESHAPE_BACKWARDS))) 1687 mddev->reshape_backwards = 1; 1688 } else { 1689 mddev->reshape_position = MaxSector; 1690 mddev->delta_disks = 0; 1691 mddev->new_level = mddev->level; 1692 mddev->new_layout = mddev->layout; 1693 mddev->new_chunk_sectors = mddev->chunk_sectors; 1694 } 1695 1696 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) 1697 set_bit(MD_HAS_JOURNAL, &mddev->flags); 1698 1699 if (le32_to_cpu(sb->feature_map) & 1700 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) { 1701 if (le32_to_cpu(sb->feature_map) & 1702 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL)) 1703 return -EINVAL; 1704 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) && 1705 (le32_to_cpu(sb->feature_map) & 1706 MD_FEATURE_MULTIPLE_PPLS)) 1707 return -EINVAL; 1708 set_bit(MD_HAS_PPL, &mddev->flags); 1709 } 1710 } else if (mddev->pers == NULL) { 1711 /* Insist of good event counter while assembling, except for 1712 * spares (which don't need an event count) */ 1713 ++ev1; 1714 if (rdev->desc_nr >= 0 && 1715 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1716 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || 1717 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)) 1718 if (ev1 < mddev->events) 1719 return -EINVAL; 1720 } else if (mddev->bitmap) { 1721 /* If adding to array with a bitmap, then we can accept an 1722 * older device, but not too old. 1723 */ 1724 if (ev1 < mddev->bitmap->events_cleared) 1725 return 0; 1726 if (ev1 < mddev->events) 1727 set_bit(Bitmap_sync, &rdev->flags); 1728 } else { 1729 if (ev1 < mddev->events) 1730 /* just a hot-add of a new device, leave raid_disk at -1 */ 1731 return 0; 1732 } 1733 if (mddev->level != LEVEL_MULTIPATH) { 1734 int role; 1735 if (rdev->desc_nr < 0 || 1736 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1737 role = MD_DISK_ROLE_SPARE; 1738 rdev->desc_nr = -1; 1739 } else 1740 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1741 switch(role) { 1742 case MD_DISK_ROLE_SPARE: /* spare */ 1743 break; 1744 case MD_DISK_ROLE_FAULTY: /* faulty */ 1745 set_bit(Faulty, &rdev->flags); 1746 break; 1747 case MD_DISK_ROLE_JOURNAL: /* journal device */ 1748 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) { 1749 /* journal device without journal feature */ 1750 pr_warn("md: journal device provided without journal feature, ignoring the device\n"); 1751 return -EINVAL; 1752 } 1753 set_bit(Journal, &rdev->flags); 1754 rdev->journal_tail = le64_to_cpu(sb->journal_tail); 1755 rdev->raid_disk = 0; 1756 break; 1757 default: 1758 rdev->saved_raid_disk = role; 1759 if ((le32_to_cpu(sb->feature_map) & 1760 MD_FEATURE_RECOVERY_OFFSET)) { 1761 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1762 if (!(le32_to_cpu(sb->feature_map) & 1763 MD_FEATURE_RECOVERY_BITMAP)) 1764 rdev->saved_raid_disk = -1; 1765 } else 1766 set_bit(In_sync, &rdev->flags); 1767 rdev->raid_disk = role; 1768 break; 1769 } 1770 if (sb->devflags & WriteMostly1) 1771 set_bit(WriteMostly, &rdev->flags); 1772 if (sb->devflags & FailFast1) 1773 set_bit(FailFast, &rdev->flags); 1774 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) 1775 set_bit(Replacement, &rdev->flags); 1776 } else /* MULTIPATH are always insync */ 1777 set_bit(In_sync, &rdev->flags); 1778 1779 return 0; 1780 } 1781 1782 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) 1783 { 1784 struct mdp_superblock_1 *sb; 1785 struct md_rdev *rdev2; 1786 int max_dev, i; 1787 /* make rdev->sb match mddev and rdev data. */ 1788 1789 sb = page_address(rdev->sb_page); 1790 1791 sb->feature_map = 0; 1792 sb->pad0 = 0; 1793 sb->recovery_offset = cpu_to_le64(0); 1794 memset(sb->pad3, 0, sizeof(sb->pad3)); 1795 1796 sb->utime = cpu_to_le64((__u64)mddev->utime); 1797 sb->events = cpu_to_le64(mddev->events); 1798 if (mddev->in_sync) 1799 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1800 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) 1801 sb->resync_offset = cpu_to_le64(MaxSector); 1802 else 1803 sb->resync_offset = cpu_to_le64(0); 1804 1805 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1806 1807 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1808 sb->size = cpu_to_le64(mddev->dev_sectors); 1809 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 1810 sb->level = cpu_to_le32(mddev->level); 1811 sb->layout = cpu_to_le32(mddev->layout); 1812 if (test_bit(FailFast, &rdev->flags)) 1813 sb->devflags |= FailFast1; 1814 else 1815 sb->devflags &= ~FailFast1; 1816 1817 if (test_bit(WriteMostly, &rdev->flags)) 1818 sb->devflags |= WriteMostly1; 1819 else 1820 sb->devflags &= ~WriteMostly1; 1821 sb->data_offset = cpu_to_le64(rdev->data_offset); 1822 sb->data_size = cpu_to_le64(rdev->sectors); 1823 1824 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1825 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 1826 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1827 } 1828 1829 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) && 1830 !test_bit(In_sync, &rdev->flags)) { 1831 sb->feature_map |= 1832 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1833 sb->recovery_offset = 1834 cpu_to_le64(rdev->recovery_offset); 1835 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) 1836 sb->feature_map |= 1837 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP); 1838 } 1839 /* Note: recovery_offset and journal_tail share space */ 1840 if (test_bit(Journal, &rdev->flags)) 1841 sb->journal_tail = cpu_to_le64(rdev->journal_tail); 1842 if (test_bit(Replacement, &rdev->flags)) 1843 sb->feature_map |= 1844 cpu_to_le32(MD_FEATURE_REPLACEMENT); 1845 1846 if (mddev->reshape_position != MaxSector) { 1847 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1848 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1849 sb->new_layout = cpu_to_le32(mddev->new_layout); 1850 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1851 sb->new_level = cpu_to_le32(mddev->new_level); 1852 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 1853 if (mddev->delta_disks == 0 && 1854 mddev->reshape_backwards) 1855 sb->feature_map 1856 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS); 1857 if (rdev->new_data_offset != rdev->data_offset) { 1858 sb->feature_map 1859 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET); 1860 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset 1861 - rdev->data_offset)); 1862 } 1863 } 1864 1865 if (mddev_is_clustered(mddev)) 1866 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED); 1867 1868 if (rdev->badblocks.count == 0) 1869 /* Nothing to do for bad blocks*/ ; 1870 else if (sb->bblog_offset == 0) 1871 /* Cannot record bad blocks on this device */ 1872 md_error(mddev, rdev); 1873 else { 1874 struct badblocks *bb = &rdev->badblocks; 1875 u64 *bbp = (u64 *)page_address(rdev->bb_page); 1876 u64 *p = bb->page; 1877 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); 1878 if (bb->changed) { 1879 unsigned seq; 1880 1881 retry: 1882 seq = read_seqbegin(&bb->lock); 1883 1884 memset(bbp, 0xff, PAGE_SIZE); 1885 1886 for (i = 0 ; i < bb->count ; i++) { 1887 u64 internal_bb = p[i]; 1888 u64 store_bb = ((BB_OFFSET(internal_bb) << 10) 1889 | BB_LEN(internal_bb)); 1890 bbp[i] = cpu_to_le64(store_bb); 1891 } 1892 bb->changed = 0; 1893 if (read_seqretry(&bb->lock, seq)) 1894 goto retry; 1895 1896 bb->sector = (rdev->sb_start + 1897 (int)le32_to_cpu(sb->bblog_offset)); 1898 bb->size = le16_to_cpu(sb->bblog_size); 1899 } 1900 } 1901 1902 max_dev = 0; 1903 rdev_for_each(rdev2, mddev) 1904 if (rdev2->desc_nr+1 > max_dev) 1905 max_dev = rdev2->desc_nr+1; 1906 1907 if (max_dev > le32_to_cpu(sb->max_dev)) { 1908 int bmask; 1909 sb->max_dev = cpu_to_le32(max_dev); 1910 rdev->sb_size = max_dev * 2 + 256; 1911 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1912 if (rdev->sb_size & bmask) 1913 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1914 } else 1915 max_dev = le32_to_cpu(sb->max_dev); 1916 1917 for (i=0; i<max_dev;i++) 1918 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 1919 1920 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) 1921 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL); 1922 1923 if (test_bit(MD_HAS_PPL, &mddev->flags)) { 1924 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags)) 1925 sb->feature_map |= 1926 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS); 1927 else 1928 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL); 1929 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset); 1930 sb->ppl.size = cpu_to_le16(rdev->ppl.size); 1931 } 1932 1933 rdev_for_each(rdev2, mddev) { 1934 i = rdev2->desc_nr; 1935 if (test_bit(Faulty, &rdev2->flags)) 1936 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); 1937 else if (test_bit(In_sync, &rdev2->flags)) 1938 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1939 else if (test_bit(Journal, &rdev2->flags)) 1940 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL); 1941 else if (rdev2->raid_disk >= 0) 1942 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1943 else 1944 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 1945 } 1946 1947 sb->sb_csum = calc_sb_1_csum(sb); 1948 } 1949 1950 static unsigned long long 1951 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1952 { 1953 struct mdp_superblock_1 *sb; 1954 sector_t max_sectors; 1955 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1956 return 0; /* component must fit device */ 1957 if (rdev->data_offset != rdev->new_data_offset) 1958 return 0; /* too confusing */ 1959 if (rdev->sb_start < rdev->data_offset) { 1960 /* minor versions 1 and 2; superblock before data */ 1961 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; 1962 max_sectors -= rdev->data_offset; 1963 if (!num_sectors || num_sectors > max_sectors) 1964 num_sectors = max_sectors; 1965 } else if (rdev->mddev->bitmap_info.offset) { 1966 /* minor version 0 with bitmap we can't move */ 1967 return 0; 1968 } else { 1969 /* minor version 0; superblock after data */ 1970 sector_t sb_start; 1971 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2; 1972 sb_start &= ~(sector_t)(4*2 - 1); 1973 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 1974 if (!num_sectors || num_sectors > max_sectors) 1975 num_sectors = max_sectors; 1976 rdev->sb_start = sb_start; 1977 } 1978 sb = page_address(rdev->sb_page); 1979 sb->data_size = cpu_to_le64(num_sectors); 1980 sb->super_offset = cpu_to_le64(rdev->sb_start); 1981 sb->sb_csum = calc_sb_1_csum(sb); 1982 do { 1983 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1984 rdev->sb_page); 1985 } while (md_super_wait(rdev->mddev) < 0); 1986 return num_sectors; 1987 1988 } 1989 1990 static int 1991 super_1_allow_new_offset(struct md_rdev *rdev, 1992 unsigned long long new_offset) 1993 { 1994 /* All necessary checks on new >= old have been done */ 1995 struct bitmap *bitmap; 1996 if (new_offset >= rdev->data_offset) 1997 return 1; 1998 1999 /* with 1.0 metadata, there is no metadata to tread on 2000 * so we can always move back */ 2001 if (rdev->mddev->minor_version == 0) 2002 return 1; 2003 2004 /* otherwise we must be sure not to step on 2005 * any metadata, so stay: 2006 * 36K beyond start of superblock 2007 * beyond end of badblocks 2008 * beyond write-intent bitmap 2009 */ 2010 if (rdev->sb_start + (32+4)*2 > new_offset) 2011 return 0; 2012 bitmap = rdev->mddev->bitmap; 2013 if (bitmap && !rdev->mddev->bitmap_info.file && 2014 rdev->sb_start + rdev->mddev->bitmap_info.offset + 2015 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset) 2016 return 0; 2017 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) 2018 return 0; 2019 2020 return 1; 2021 } 2022 2023 static struct super_type super_types[] = { 2024 [0] = { 2025 .name = "0.90.0", 2026 .owner = THIS_MODULE, 2027 .load_super = super_90_load, 2028 .validate_super = super_90_validate, 2029 .sync_super = super_90_sync, 2030 .rdev_size_change = super_90_rdev_size_change, 2031 .allow_new_offset = super_90_allow_new_offset, 2032 }, 2033 [1] = { 2034 .name = "md-1", 2035 .owner = THIS_MODULE, 2036 .load_super = super_1_load, 2037 .validate_super = super_1_validate, 2038 .sync_super = super_1_sync, 2039 .rdev_size_change = super_1_rdev_size_change, 2040 .allow_new_offset = super_1_allow_new_offset, 2041 }, 2042 }; 2043 2044 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) 2045 { 2046 if (mddev->sync_super) { 2047 mddev->sync_super(mddev, rdev); 2048 return; 2049 } 2050 2051 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); 2052 2053 super_types[mddev->major_version].sync_super(mddev, rdev); 2054 } 2055 2056 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) 2057 { 2058 struct md_rdev *rdev, *rdev2; 2059 2060 rcu_read_lock(); 2061 rdev_for_each_rcu(rdev, mddev1) { 2062 if (test_bit(Faulty, &rdev->flags) || 2063 test_bit(Journal, &rdev->flags) || 2064 rdev->raid_disk == -1) 2065 continue; 2066 rdev_for_each_rcu(rdev2, mddev2) { 2067 if (test_bit(Faulty, &rdev2->flags) || 2068 test_bit(Journal, &rdev2->flags) || 2069 rdev2->raid_disk == -1) 2070 continue; 2071 if (rdev->bdev->bd_contains == 2072 rdev2->bdev->bd_contains) { 2073 rcu_read_unlock(); 2074 return 1; 2075 } 2076 } 2077 } 2078 rcu_read_unlock(); 2079 return 0; 2080 } 2081 2082 static LIST_HEAD(pending_raid_disks); 2083 2084 /* 2085 * Try to register data integrity profile for an mddev 2086 * 2087 * This is called when an array is started and after a disk has been kicked 2088 * from the array. It only succeeds if all working and active component devices 2089 * are integrity capable with matching profiles. 2090 */ 2091 int md_integrity_register(struct mddev *mddev) 2092 { 2093 struct md_rdev *rdev, *reference = NULL; 2094 2095 if (list_empty(&mddev->disks)) 2096 return 0; /* nothing to do */ 2097 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) 2098 return 0; /* shouldn't register, or already is */ 2099 rdev_for_each(rdev, mddev) { 2100 /* skip spares and non-functional disks */ 2101 if (test_bit(Faulty, &rdev->flags)) 2102 continue; 2103 if (rdev->raid_disk < 0) 2104 continue; 2105 if (!reference) { 2106 /* Use the first rdev as the reference */ 2107 reference = rdev; 2108 continue; 2109 } 2110 /* does this rdev's profile match the reference profile? */ 2111 if (blk_integrity_compare(reference->bdev->bd_disk, 2112 rdev->bdev->bd_disk) < 0) 2113 return -EINVAL; 2114 } 2115 if (!reference || !bdev_get_integrity(reference->bdev)) 2116 return 0; 2117 /* 2118 * All component devices are integrity capable and have matching 2119 * profiles, register the common profile for the md device. 2120 */ 2121 blk_integrity_register(mddev->gendisk, 2122 bdev_get_integrity(reference->bdev)); 2123 2124 pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); 2125 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) { 2126 pr_err("md: failed to create integrity pool for %s\n", 2127 mdname(mddev)); 2128 return -EINVAL; 2129 } 2130 return 0; 2131 } 2132 EXPORT_SYMBOL(md_integrity_register); 2133 2134 /* 2135 * Attempt to add an rdev, but only if it is consistent with the current 2136 * integrity profile 2137 */ 2138 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) 2139 { 2140 struct blk_integrity *bi_mddev; 2141 char name[BDEVNAME_SIZE]; 2142 2143 if (!mddev->gendisk) 2144 return 0; 2145 2146 bi_mddev = blk_get_integrity(mddev->gendisk); 2147 2148 if (!bi_mddev) /* nothing to do */ 2149 return 0; 2150 2151 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { 2152 pr_err("%s: incompatible integrity profile for %s\n", 2153 mdname(mddev), bdevname(rdev->bdev, name)); 2154 return -ENXIO; 2155 } 2156 2157 return 0; 2158 } 2159 EXPORT_SYMBOL(md_integrity_add_rdev); 2160 2161 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) 2162 { 2163 char b[BDEVNAME_SIZE]; 2164 struct kobject *ko; 2165 int err; 2166 2167 /* prevent duplicates */ 2168 if (find_rdev(mddev, rdev->bdev->bd_dev)) 2169 return -EEXIST; 2170 2171 if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) && 2172 mddev->pers) 2173 return -EROFS; 2174 2175 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 2176 if (!test_bit(Journal, &rdev->flags) && 2177 rdev->sectors && 2178 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { 2179 if (mddev->pers) { 2180 /* Cannot change size, so fail 2181 * If mddev->level <= 0, then we don't care 2182 * about aligning sizes (e.g. linear) 2183 */ 2184 if (mddev->level > 0) 2185 return -ENOSPC; 2186 } else 2187 mddev->dev_sectors = rdev->sectors; 2188 } 2189 2190 /* Verify rdev->desc_nr is unique. 2191 * If it is -1, assign a free number, else 2192 * check number is not in use 2193 */ 2194 rcu_read_lock(); 2195 if (rdev->desc_nr < 0) { 2196 int choice = 0; 2197 if (mddev->pers) 2198 choice = mddev->raid_disks; 2199 while (md_find_rdev_nr_rcu(mddev, choice)) 2200 choice++; 2201 rdev->desc_nr = choice; 2202 } else { 2203 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { 2204 rcu_read_unlock(); 2205 return -EBUSY; 2206 } 2207 } 2208 rcu_read_unlock(); 2209 if (!test_bit(Journal, &rdev->flags) && 2210 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 2211 pr_warn("md: %s: array is limited to %d devices\n", 2212 mdname(mddev), mddev->max_disks); 2213 return -EBUSY; 2214 } 2215 bdevname(rdev->bdev,b); 2216 strreplace(b, '/', '!'); 2217 2218 rdev->mddev = mddev; 2219 pr_debug("md: bind<%s>\n", b); 2220 2221 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 2222 goto fail; 2223 2224 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 2225 if (sysfs_create_link(&rdev->kobj, ko, "block")) 2226 /* failure here is OK */; 2227 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 2228 2229 list_add_rcu(&rdev->same_set, &mddev->disks); 2230 bd_link_disk_holder(rdev->bdev, mddev->gendisk); 2231 2232 /* May as well allow recovery to be retried once */ 2233 mddev->recovery_disabled++; 2234 2235 return 0; 2236 2237 fail: 2238 pr_warn("md: failed to register dev-%s for %s\n", 2239 b, mdname(mddev)); 2240 return err; 2241 } 2242 2243 static void md_delayed_delete(struct work_struct *ws) 2244 { 2245 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); 2246 kobject_del(&rdev->kobj); 2247 kobject_put(&rdev->kobj); 2248 } 2249 2250 static void unbind_rdev_from_array(struct md_rdev *rdev) 2251 { 2252 char b[BDEVNAME_SIZE]; 2253 2254 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); 2255 list_del_rcu(&rdev->same_set); 2256 pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b)); 2257 rdev->mddev = NULL; 2258 sysfs_remove_link(&rdev->kobj, "block"); 2259 sysfs_put(rdev->sysfs_state); 2260 rdev->sysfs_state = NULL; 2261 rdev->badblocks.count = 0; 2262 /* We need to delay this, otherwise we can deadlock when 2263 * writing to 'remove' to "dev/state". We also need 2264 * to delay it due to rcu usage. 2265 */ 2266 synchronize_rcu(); 2267 INIT_WORK(&rdev->del_work, md_delayed_delete); 2268 kobject_get(&rdev->kobj); 2269 queue_work(md_misc_wq, &rdev->del_work); 2270 } 2271 2272 /* 2273 * prevent the device from being mounted, repartitioned or 2274 * otherwise reused by a RAID array (or any other kernel 2275 * subsystem), by bd_claiming the device. 2276 */ 2277 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) 2278 { 2279 int err = 0; 2280 struct block_device *bdev; 2281 char b[BDEVNAME_SIZE]; 2282 2283 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2284 shared ? (struct md_rdev *)lock_rdev : rdev); 2285 if (IS_ERR(bdev)) { 2286 pr_warn("md: could not open %s.\n", __bdevname(dev, b)); 2287 return PTR_ERR(bdev); 2288 } 2289 rdev->bdev = bdev; 2290 return err; 2291 } 2292 2293 static void unlock_rdev(struct md_rdev *rdev) 2294 { 2295 struct block_device *bdev = rdev->bdev; 2296 rdev->bdev = NULL; 2297 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2298 } 2299 2300 void md_autodetect_dev(dev_t dev); 2301 2302 static void export_rdev(struct md_rdev *rdev) 2303 { 2304 char b[BDEVNAME_SIZE]; 2305 2306 pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b)); 2307 md_rdev_clear(rdev); 2308 #ifndef MODULE 2309 if (test_bit(AutoDetected, &rdev->flags)) 2310 md_autodetect_dev(rdev->bdev->bd_dev); 2311 #endif 2312 unlock_rdev(rdev); 2313 kobject_put(&rdev->kobj); 2314 } 2315 2316 void md_kick_rdev_from_array(struct md_rdev *rdev) 2317 { 2318 unbind_rdev_from_array(rdev); 2319 export_rdev(rdev); 2320 } 2321 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array); 2322 2323 static void export_array(struct mddev *mddev) 2324 { 2325 struct md_rdev *rdev; 2326 2327 while (!list_empty(&mddev->disks)) { 2328 rdev = list_first_entry(&mddev->disks, struct md_rdev, 2329 same_set); 2330 md_kick_rdev_from_array(rdev); 2331 } 2332 mddev->raid_disks = 0; 2333 mddev->major_version = 0; 2334 } 2335 2336 static bool set_in_sync(struct mddev *mddev) 2337 { 2338 lockdep_assert_held(&mddev->lock); 2339 if (!mddev->in_sync) { 2340 mddev->sync_checkers++; 2341 spin_unlock(&mddev->lock); 2342 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending); 2343 spin_lock(&mddev->lock); 2344 if (!mddev->in_sync && 2345 percpu_ref_is_zero(&mddev->writes_pending)) { 2346 mddev->in_sync = 1; 2347 /* 2348 * Ensure ->in_sync is visible before we clear 2349 * ->sync_checkers. 2350 */ 2351 smp_mb(); 2352 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 2353 sysfs_notify_dirent_safe(mddev->sysfs_state); 2354 } 2355 if (--mddev->sync_checkers == 0) 2356 percpu_ref_switch_to_percpu(&mddev->writes_pending); 2357 } 2358 if (mddev->safemode == 1) 2359 mddev->safemode = 0; 2360 return mddev->in_sync; 2361 } 2362 2363 static void sync_sbs(struct mddev *mddev, int nospares) 2364 { 2365 /* Update each superblock (in-memory image), but 2366 * if we are allowed to, skip spares which already 2367 * have the right event counter, or have one earlier 2368 * (which would mean they aren't being marked as dirty 2369 * with the rest of the array) 2370 */ 2371 struct md_rdev *rdev; 2372 rdev_for_each(rdev, mddev) { 2373 if (rdev->sb_events == mddev->events || 2374 (nospares && 2375 rdev->raid_disk < 0 && 2376 rdev->sb_events+1 == mddev->events)) { 2377 /* Don't update this superblock */ 2378 rdev->sb_loaded = 2; 2379 } else { 2380 sync_super(mddev, rdev); 2381 rdev->sb_loaded = 1; 2382 } 2383 } 2384 } 2385 2386 static bool does_sb_need_changing(struct mddev *mddev) 2387 { 2388 struct md_rdev *rdev; 2389 struct mdp_superblock_1 *sb; 2390 int role; 2391 2392 /* Find a good rdev */ 2393 rdev_for_each(rdev, mddev) 2394 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags)) 2395 break; 2396 2397 /* No good device found. */ 2398 if (!rdev) 2399 return false; 2400 2401 sb = page_address(rdev->sb_page); 2402 /* Check if a device has become faulty or a spare become active */ 2403 rdev_for_each(rdev, mddev) { 2404 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 2405 /* Device activated? */ 2406 if (role == 0xffff && rdev->raid_disk >=0 && 2407 !test_bit(Faulty, &rdev->flags)) 2408 return true; 2409 /* Device turned faulty? */ 2410 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd)) 2411 return true; 2412 } 2413 2414 /* Check if any mddev parameters have changed */ 2415 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || 2416 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || 2417 (mddev->layout != le32_to_cpu(sb->layout)) || 2418 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || 2419 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) 2420 return true; 2421 2422 return false; 2423 } 2424 2425 void md_update_sb(struct mddev *mddev, int force_change) 2426 { 2427 struct md_rdev *rdev; 2428 int sync_req; 2429 int nospares = 0; 2430 int any_badblocks_changed = 0; 2431 int ret = -1; 2432 2433 if (mddev->ro) { 2434 if (force_change) 2435 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2436 return; 2437 } 2438 2439 repeat: 2440 if (mddev_is_clustered(mddev)) { 2441 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) 2442 force_change = 1; 2443 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) 2444 nospares = 1; 2445 ret = md_cluster_ops->metadata_update_start(mddev); 2446 /* Has someone else has updated the sb */ 2447 if (!does_sb_need_changing(mddev)) { 2448 if (ret == 0) 2449 md_cluster_ops->metadata_update_cancel(mddev); 2450 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), 2451 BIT(MD_SB_CHANGE_DEVS) | 2452 BIT(MD_SB_CHANGE_CLEAN)); 2453 return; 2454 } 2455 } 2456 2457 /* 2458 * First make sure individual recovery_offsets are correct 2459 * curr_resync_completed can only be used during recovery. 2460 * During reshape/resync it might use array-addresses rather 2461 * that device addresses. 2462 */ 2463 rdev_for_each(rdev, mddev) { 2464 if (rdev->raid_disk >= 0 && 2465 mddev->delta_disks >= 0 && 2466 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 2467 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && 2468 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 2469 !test_bit(Journal, &rdev->flags) && 2470 !test_bit(In_sync, &rdev->flags) && 2471 mddev->curr_resync_completed > rdev->recovery_offset) 2472 rdev->recovery_offset = mddev->curr_resync_completed; 2473 2474 } 2475 if (!mddev->persistent) { 2476 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 2477 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2478 if (!mddev->external) { 2479 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 2480 rdev_for_each(rdev, mddev) { 2481 if (rdev->badblocks.changed) { 2482 rdev->badblocks.changed = 0; 2483 ack_all_badblocks(&rdev->badblocks); 2484 md_error(mddev, rdev); 2485 } 2486 clear_bit(Blocked, &rdev->flags); 2487 clear_bit(BlockedBadBlocks, &rdev->flags); 2488 wake_up(&rdev->blocked_wait); 2489 } 2490 } 2491 wake_up(&mddev->sb_wait); 2492 return; 2493 } 2494 2495 spin_lock(&mddev->lock); 2496 2497 mddev->utime = ktime_get_real_seconds(); 2498 2499 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) 2500 force_change = 1; 2501 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) 2502 /* just a clean<-> dirty transition, possibly leave spares alone, 2503 * though if events isn't the right even/odd, we will have to do 2504 * spares after all 2505 */ 2506 nospares = 1; 2507 if (force_change) 2508 nospares = 0; 2509 if (mddev->degraded) 2510 /* If the array is degraded, then skipping spares is both 2511 * dangerous and fairly pointless. 2512 * Dangerous because a device that was removed from the array 2513 * might have a event_count that still looks up-to-date, 2514 * so it can be re-added without a resync. 2515 * Pointless because if there are any spares to skip, 2516 * then a recovery will happen and soon that array won't 2517 * be degraded any more and the spare can go back to sleep then. 2518 */ 2519 nospares = 0; 2520 2521 sync_req = mddev->in_sync; 2522 2523 /* If this is just a dirty<->clean transition, and the array is clean 2524 * and 'events' is odd, we can roll back to the previous clean state */ 2525 if (nospares 2526 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2527 && mddev->can_decrease_events 2528 && mddev->events != 1) { 2529 mddev->events--; 2530 mddev->can_decrease_events = 0; 2531 } else { 2532 /* otherwise we have to go forward and ... */ 2533 mddev->events ++; 2534 mddev->can_decrease_events = nospares; 2535 } 2536 2537 /* 2538 * This 64-bit counter should never wrap. 2539 * Either we are in around ~1 trillion A.C., assuming 2540 * 1 reboot per second, or we have a bug... 2541 */ 2542 WARN_ON(mddev->events == 0); 2543 2544 rdev_for_each(rdev, mddev) { 2545 if (rdev->badblocks.changed) 2546 any_badblocks_changed++; 2547 if (test_bit(Faulty, &rdev->flags)) 2548 set_bit(FaultRecorded, &rdev->flags); 2549 } 2550 2551 sync_sbs(mddev, nospares); 2552 spin_unlock(&mddev->lock); 2553 2554 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", 2555 mdname(mddev), mddev->in_sync); 2556 2557 if (mddev->queue) 2558 blk_add_trace_msg(mddev->queue, "md md_update_sb"); 2559 rewrite: 2560 md_bitmap_update_sb(mddev->bitmap); 2561 rdev_for_each(rdev, mddev) { 2562 char b[BDEVNAME_SIZE]; 2563 2564 if (rdev->sb_loaded != 1) 2565 continue; /* no noise on spare devices */ 2566 2567 if (!test_bit(Faulty, &rdev->flags)) { 2568 md_super_write(mddev,rdev, 2569 rdev->sb_start, rdev->sb_size, 2570 rdev->sb_page); 2571 pr_debug("md: (write) %s's sb offset: %llu\n", 2572 bdevname(rdev->bdev, b), 2573 (unsigned long long)rdev->sb_start); 2574 rdev->sb_events = mddev->events; 2575 if (rdev->badblocks.size) { 2576 md_super_write(mddev, rdev, 2577 rdev->badblocks.sector, 2578 rdev->badblocks.size << 9, 2579 rdev->bb_page); 2580 rdev->badblocks.size = 0; 2581 } 2582 2583 } else 2584 pr_debug("md: %s (skipping faulty)\n", 2585 bdevname(rdev->bdev, b)); 2586 2587 if (mddev->level == LEVEL_MULTIPATH) 2588 /* only need to write one superblock... */ 2589 break; 2590 } 2591 if (md_super_wait(mddev) < 0) 2592 goto rewrite; 2593 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */ 2594 2595 if (mddev_is_clustered(mddev) && ret == 0) 2596 md_cluster_ops->metadata_update_finish(mddev); 2597 2598 if (mddev->in_sync != sync_req || 2599 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), 2600 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN))) 2601 /* have to write it out again */ 2602 goto repeat; 2603 wake_up(&mddev->sb_wait); 2604 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2605 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2606 2607 rdev_for_each(rdev, mddev) { 2608 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) 2609 clear_bit(Blocked, &rdev->flags); 2610 2611 if (any_badblocks_changed) 2612 ack_all_badblocks(&rdev->badblocks); 2613 clear_bit(BlockedBadBlocks, &rdev->flags); 2614 wake_up(&rdev->blocked_wait); 2615 } 2616 } 2617 EXPORT_SYMBOL(md_update_sb); 2618 2619 static int add_bound_rdev(struct md_rdev *rdev) 2620 { 2621 struct mddev *mddev = rdev->mddev; 2622 int err = 0; 2623 bool add_journal = test_bit(Journal, &rdev->flags); 2624 2625 if (!mddev->pers->hot_remove_disk || add_journal) { 2626 /* If there is hot_add_disk but no hot_remove_disk 2627 * then added disks for geometry changes, 2628 * and should be added immediately. 2629 */ 2630 super_types[mddev->major_version]. 2631 validate_super(mddev, rdev); 2632 if (add_journal) 2633 mddev_suspend(mddev); 2634 err = mddev->pers->hot_add_disk(mddev, rdev); 2635 if (add_journal) 2636 mddev_resume(mddev); 2637 if (err) { 2638 md_kick_rdev_from_array(rdev); 2639 return err; 2640 } 2641 } 2642 sysfs_notify_dirent_safe(rdev->sysfs_state); 2643 2644 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2645 if (mddev->degraded) 2646 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 2647 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2648 md_new_event(mddev); 2649 md_wakeup_thread(mddev->thread); 2650 return 0; 2651 } 2652 2653 /* words written to sysfs files may, or may not, be \n terminated. 2654 * We want to accept with case. For this we use cmd_match. 2655 */ 2656 static int cmd_match(const char *cmd, const char *str) 2657 { 2658 /* See if cmd, written into a sysfs file, matches 2659 * str. They must either be the same, or cmd can 2660 * have a trailing newline 2661 */ 2662 while (*cmd && *str && *cmd == *str) { 2663 cmd++; 2664 str++; 2665 } 2666 if (*cmd == '\n') 2667 cmd++; 2668 if (*str || *cmd) 2669 return 0; 2670 return 1; 2671 } 2672 2673 struct rdev_sysfs_entry { 2674 struct attribute attr; 2675 ssize_t (*show)(struct md_rdev *, char *); 2676 ssize_t (*store)(struct md_rdev *, const char *, size_t); 2677 }; 2678 2679 static ssize_t 2680 state_show(struct md_rdev *rdev, char *page) 2681 { 2682 char *sep = ","; 2683 size_t len = 0; 2684 unsigned long flags = READ_ONCE(rdev->flags); 2685 2686 if (test_bit(Faulty, &flags) || 2687 (!test_bit(ExternalBbl, &flags) && 2688 rdev->badblocks.unacked_exist)) 2689 len += sprintf(page+len, "faulty%s", sep); 2690 if (test_bit(In_sync, &flags)) 2691 len += sprintf(page+len, "in_sync%s", sep); 2692 if (test_bit(Journal, &flags)) 2693 len += sprintf(page+len, "journal%s", sep); 2694 if (test_bit(WriteMostly, &flags)) 2695 len += sprintf(page+len, "write_mostly%s", sep); 2696 if (test_bit(Blocked, &flags) || 2697 (rdev->badblocks.unacked_exist 2698 && !test_bit(Faulty, &flags))) 2699 len += sprintf(page+len, "blocked%s", sep); 2700 if (!test_bit(Faulty, &flags) && 2701 !test_bit(Journal, &flags) && 2702 !test_bit(In_sync, &flags)) 2703 len += sprintf(page+len, "spare%s", sep); 2704 if (test_bit(WriteErrorSeen, &flags)) 2705 len += sprintf(page+len, "write_error%s", sep); 2706 if (test_bit(WantReplacement, &flags)) 2707 len += sprintf(page+len, "want_replacement%s", sep); 2708 if (test_bit(Replacement, &flags)) 2709 len += sprintf(page+len, "replacement%s", sep); 2710 if (test_bit(ExternalBbl, &flags)) 2711 len += sprintf(page+len, "external_bbl%s", sep); 2712 if (test_bit(FailFast, &flags)) 2713 len += sprintf(page+len, "failfast%s", sep); 2714 2715 if (len) 2716 len -= strlen(sep); 2717 2718 return len+sprintf(page+len, "\n"); 2719 } 2720 2721 static ssize_t 2722 state_store(struct md_rdev *rdev, const char *buf, size_t len) 2723 { 2724 /* can write 2725 * faulty - simulates an error 2726 * remove - disconnects the device 2727 * writemostly - sets write_mostly 2728 * -writemostly - clears write_mostly 2729 * blocked - sets the Blocked flags 2730 * -blocked - clears the Blocked and possibly simulates an error 2731 * insync - sets Insync providing device isn't active 2732 * -insync - clear Insync for a device with a slot assigned, 2733 * so that it gets rebuilt based on bitmap 2734 * write_error - sets WriteErrorSeen 2735 * -write_error - clears WriteErrorSeen 2736 * {,-}failfast - set/clear FailFast 2737 */ 2738 int err = -EINVAL; 2739 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2740 md_error(rdev->mddev, rdev); 2741 if (test_bit(Faulty, &rdev->flags)) 2742 err = 0; 2743 else 2744 err = -EBUSY; 2745 } else if (cmd_match(buf, "remove")) { 2746 if (rdev->mddev->pers) { 2747 clear_bit(Blocked, &rdev->flags); 2748 remove_and_add_spares(rdev->mddev, rdev); 2749 } 2750 if (rdev->raid_disk >= 0) 2751 err = -EBUSY; 2752 else { 2753 struct mddev *mddev = rdev->mddev; 2754 err = 0; 2755 if (mddev_is_clustered(mddev)) 2756 err = md_cluster_ops->remove_disk(mddev, rdev); 2757 2758 if (err == 0) { 2759 md_kick_rdev_from_array(rdev); 2760 if (mddev->pers) { 2761 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2762 md_wakeup_thread(mddev->thread); 2763 } 2764 md_new_event(mddev); 2765 } 2766 } 2767 } else if (cmd_match(buf, "writemostly")) { 2768 set_bit(WriteMostly, &rdev->flags); 2769 err = 0; 2770 } else if (cmd_match(buf, "-writemostly")) { 2771 clear_bit(WriteMostly, &rdev->flags); 2772 err = 0; 2773 } else if (cmd_match(buf, "blocked")) { 2774 set_bit(Blocked, &rdev->flags); 2775 err = 0; 2776 } else if (cmd_match(buf, "-blocked")) { 2777 if (!test_bit(Faulty, &rdev->flags) && 2778 !test_bit(ExternalBbl, &rdev->flags) && 2779 rdev->badblocks.unacked_exist) { 2780 /* metadata handler doesn't understand badblocks, 2781 * so we need to fail the device 2782 */ 2783 md_error(rdev->mddev, rdev); 2784 } 2785 clear_bit(Blocked, &rdev->flags); 2786 clear_bit(BlockedBadBlocks, &rdev->flags); 2787 wake_up(&rdev->blocked_wait); 2788 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2789 md_wakeup_thread(rdev->mddev->thread); 2790 2791 err = 0; 2792 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2793 set_bit(In_sync, &rdev->flags); 2794 err = 0; 2795 } else if (cmd_match(buf, "failfast")) { 2796 set_bit(FailFast, &rdev->flags); 2797 err = 0; 2798 } else if (cmd_match(buf, "-failfast")) { 2799 clear_bit(FailFast, &rdev->flags); 2800 err = 0; 2801 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 && 2802 !test_bit(Journal, &rdev->flags)) { 2803 if (rdev->mddev->pers == NULL) { 2804 clear_bit(In_sync, &rdev->flags); 2805 rdev->saved_raid_disk = rdev->raid_disk; 2806 rdev->raid_disk = -1; 2807 err = 0; 2808 } 2809 } else if (cmd_match(buf, "write_error")) { 2810 set_bit(WriteErrorSeen, &rdev->flags); 2811 err = 0; 2812 } else if (cmd_match(buf, "-write_error")) { 2813 clear_bit(WriteErrorSeen, &rdev->flags); 2814 err = 0; 2815 } else if (cmd_match(buf, "want_replacement")) { 2816 /* Any non-spare device that is not a replacement can 2817 * become want_replacement at any time, but we then need to 2818 * check if recovery is needed. 2819 */ 2820 if (rdev->raid_disk >= 0 && 2821 !test_bit(Journal, &rdev->flags) && 2822 !test_bit(Replacement, &rdev->flags)) 2823 set_bit(WantReplacement, &rdev->flags); 2824 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2825 md_wakeup_thread(rdev->mddev->thread); 2826 err = 0; 2827 } else if (cmd_match(buf, "-want_replacement")) { 2828 /* Clearing 'want_replacement' is always allowed. 2829 * Once replacements starts it is too late though. 2830 */ 2831 err = 0; 2832 clear_bit(WantReplacement, &rdev->flags); 2833 } else if (cmd_match(buf, "replacement")) { 2834 /* Can only set a device as a replacement when array has not 2835 * yet been started. Once running, replacement is automatic 2836 * from spares, or by assigning 'slot'. 2837 */ 2838 if (rdev->mddev->pers) 2839 err = -EBUSY; 2840 else { 2841 set_bit(Replacement, &rdev->flags); 2842 err = 0; 2843 } 2844 } else if (cmd_match(buf, "-replacement")) { 2845 /* Similarly, can only clear Replacement before start */ 2846 if (rdev->mddev->pers) 2847 err = -EBUSY; 2848 else { 2849 clear_bit(Replacement, &rdev->flags); 2850 err = 0; 2851 } 2852 } else if (cmd_match(buf, "re-add")) { 2853 if (!rdev->mddev->pers) 2854 err = -EINVAL; 2855 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) && 2856 rdev->saved_raid_disk >= 0) { 2857 /* clear_bit is performed _after_ all the devices 2858 * have their local Faulty bit cleared. If any writes 2859 * happen in the meantime in the local node, they 2860 * will land in the local bitmap, which will be synced 2861 * by this node eventually 2862 */ 2863 if (!mddev_is_clustered(rdev->mddev) || 2864 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) { 2865 clear_bit(Faulty, &rdev->flags); 2866 err = add_bound_rdev(rdev); 2867 } 2868 } else 2869 err = -EBUSY; 2870 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) { 2871 set_bit(ExternalBbl, &rdev->flags); 2872 rdev->badblocks.shift = 0; 2873 err = 0; 2874 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) { 2875 clear_bit(ExternalBbl, &rdev->flags); 2876 err = 0; 2877 } 2878 if (!err) 2879 sysfs_notify_dirent_safe(rdev->sysfs_state); 2880 return err ? err : len; 2881 } 2882 static struct rdev_sysfs_entry rdev_state = 2883 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store); 2884 2885 static ssize_t 2886 errors_show(struct md_rdev *rdev, char *page) 2887 { 2888 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 2889 } 2890 2891 static ssize_t 2892 errors_store(struct md_rdev *rdev, const char *buf, size_t len) 2893 { 2894 unsigned int n; 2895 int rv; 2896 2897 rv = kstrtouint(buf, 10, &n); 2898 if (rv < 0) 2899 return rv; 2900 atomic_set(&rdev->corrected_errors, n); 2901 return len; 2902 } 2903 static struct rdev_sysfs_entry rdev_errors = 2904 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 2905 2906 static ssize_t 2907 slot_show(struct md_rdev *rdev, char *page) 2908 { 2909 if (test_bit(Journal, &rdev->flags)) 2910 return sprintf(page, "journal\n"); 2911 else if (rdev->raid_disk < 0) 2912 return sprintf(page, "none\n"); 2913 else 2914 return sprintf(page, "%d\n", rdev->raid_disk); 2915 } 2916 2917 static ssize_t 2918 slot_store(struct md_rdev *rdev, const char *buf, size_t len) 2919 { 2920 int slot; 2921 int err; 2922 2923 if (test_bit(Journal, &rdev->flags)) 2924 return -EBUSY; 2925 if (strncmp(buf, "none", 4)==0) 2926 slot = -1; 2927 else { 2928 err = kstrtouint(buf, 10, (unsigned int *)&slot); 2929 if (err < 0) 2930 return err; 2931 } 2932 if (rdev->mddev->pers && slot == -1) { 2933 /* Setting 'slot' on an active array requires also 2934 * updating the 'rd%d' link, and communicating 2935 * with the personality with ->hot_*_disk. 2936 * For now we only support removing 2937 * failed/spare devices. This normally happens automatically, 2938 * but not when the metadata is externally managed. 2939 */ 2940 if (rdev->raid_disk == -1) 2941 return -EEXIST; 2942 /* personality does all needed checks */ 2943 if (rdev->mddev->pers->hot_remove_disk == NULL) 2944 return -EINVAL; 2945 clear_bit(Blocked, &rdev->flags); 2946 remove_and_add_spares(rdev->mddev, rdev); 2947 if (rdev->raid_disk >= 0) 2948 return -EBUSY; 2949 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2950 md_wakeup_thread(rdev->mddev->thread); 2951 } else if (rdev->mddev->pers) { 2952 /* Activating a spare .. or possibly reactivating 2953 * if we ever get bitmaps working here. 2954 */ 2955 int err; 2956 2957 if (rdev->raid_disk != -1) 2958 return -EBUSY; 2959 2960 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) 2961 return -EBUSY; 2962 2963 if (rdev->mddev->pers->hot_add_disk == NULL) 2964 return -EINVAL; 2965 2966 if (slot >= rdev->mddev->raid_disks && 2967 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 2968 return -ENOSPC; 2969 2970 rdev->raid_disk = slot; 2971 if (test_bit(In_sync, &rdev->flags)) 2972 rdev->saved_raid_disk = slot; 2973 else 2974 rdev->saved_raid_disk = -1; 2975 clear_bit(In_sync, &rdev->flags); 2976 clear_bit(Bitmap_sync, &rdev->flags); 2977 err = rdev->mddev->pers-> 2978 hot_add_disk(rdev->mddev, rdev); 2979 if (err) { 2980 rdev->raid_disk = -1; 2981 return err; 2982 } else 2983 sysfs_notify_dirent_safe(rdev->sysfs_state); 2984 if (sysfs_link_rdev(rdev->mddev, rdev)) 2985 /* failure here is OK */; 2986 /* don't wakeup anyone, leave that to userspace. */ 2987 } else { 2988 if (slot >= rdev->mddev->raid_disks && 2989 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 2990 return -ENOSPC; 2991 rdev->raid_disk = slot; 2992 /* assume it is working */ 2993 clear_bit(Faulty, &rdev->flags); 2994 clear_bit(WriteMostly, &rdev->flags); 2995 set_bit(In_sync, &rdev->flags); 2996 sysfs_notify_dirent_safe(rdev->sysfs_state); 2997 } 2998 return len; 2999 } 3000 3001 static struct rdev_sysfs_entry rdev_slot = 3002 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 3003 3004 static ssize_t 3005 offset_show(struct md_rdev *rdev, char *page) 3006 { 3007 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 3008 } 3009 3010 static ssize_t 3011 offset_store(struct md_rdev *rdev, const char *buf, size_t len) 3012 { 3013 unsigned long long offset; 3014 if (kstrtoull(buf, 10, &offset) < 0) 3015 return -EINVAL; 3016 if (rdev->mddev->pers && rdev->raid_disk >= 0) 3017 return -EBUSY; 3018 if (rdev->sectors && rdev->mddev->external) 3019 /* Must set offset before size, so overlap checks 3020 * can be sane */ 3021 return -EBUSY; 3022 rdev->data_offset = offset; 3023 rdev->new_data_offset = offset; 3024 return len; 3025 } 3026 3027 static struct rdev_sysfs_entry rdev_offset = 3028 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 3029 3030 static ssize_t new_offset_show(struct md_rdev *rdev, char *page) 3031 { 3032 return sprintf(page, "%llu\n", 3033 (unsigned long long)rdev->new_data_offset); 3034 } 3035 3036 static ssize_t new_offset_store(struct md_rdev *rdev, 3037 const char *buf, size_t len) 3038 { 3039 unsigned long long new_offset; 3040 struct mddev *mddev = rdev->mddev; 3041 3042 if (kstrtoull(buf, 10, &new_offset) < 0) 3043 return -EINVAL; 3044 3045 if (mddev->sync_thread || 3046 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) 3047 return -EBUSY; 3048 if (new_offset == rdev->data_offset) 3049 /* reset is always permitted */ 3050 ; 3051 else if (new_offset > rdev->data_offset) { 3052 /* must not push array size beyond rdev_sectors */ 3053 if (new_offset - rdev->data_offset 3054 + mddev->dev_sectors > rdev->sectors) 3055 return -E2BIG; 3056 } 3057 /* Metadata worries about other space details. */ 3058 3059 /* decreasing the offset is inconsistent with a backwards 3060 * reshape. 3061 */ 3062 if (new_offset < rdev->data_offset && 3063 mddev->reshape_backwards) 3064 return -EINVAL; 3065 /* Increasing offset is inconsistent with forwards 3066 * reshape. reshape_direction should be set to 3067 * 'backwards' first. 3068 */ 3069 if (new_offset > rdev->data_offset && 3070 !mddev->reshape_backwards) 3071 return -EINVAL; 3072 3073 if (mddev->pers && mddev->persistent && 3074 !super_types[mddev->major_version] 3075 .allow_new_offset(rdev, new_offset)) 3076 return -E2BIG; 3077 rdev->new_data_offset = new_offset; 3078 if (new_offset > rdev->data_offset) 3079 mddev->reshape_backwards = 1; 3080 else if (new_offset < rdev->data_offset) 3081 mddev->reshape_backwards = 0; 3082 3083 return len; 3084 } 3085 static struct rdev_sysfs_entry rdev_new_offset = 3086 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store); 3087 3088 static ssize_t 3089 rdev_size_show(struct md_rdev *rdev, char *page) 3090 { 3091 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 3092 } 3093 3094 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 3095 { 3096 /* check if two start/length pairs overlap */ 3097 if (s1+l1 <= s2) 3098 return 0; 3099 if (s2+l2 <= s1) 3100 return 0; 3101 return 1; 3102 } 3103 3104 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 3105 { 3106 unsigned long long blocks; 3107 sector_t new; 3108 3109 if (kstrtoull(buf, 10, &blocks) < 0) 3110 return -EINVAL; 3111 3112 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 3113 return -EINVAL; /* sector conversion overflow */ 3114 3115 new = blocks * 2; 3116 if (new != blocks * 2) 3117 return -EINVAL; /* unsigned long long to sector_t overflow */ 3118 3119 *sectors = new; 3120 return 0; 3121 } 3122 3123 static ssize_t 3124 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) 3125 { 3126 struct mddev *my_mddev = rdev->mddev; 3127 sector_t oldsectors = rdev->sectors; 3128 sector_t sectors; 3129 3130 if (test_bit(Journal, &rdev->flags)) 3131 return -EBUSY; 3132 if (strict_blocks_to_sectors(buf, §ors) < 0) 3133 return -EINVAL; 3134 if (rdev->data_offset != rdev->new_data_offset) 3135 return -EINVAL; /* too confusing */ 3136 if (my_mddev->pers && rdev->raid_disk >= 0) { 3137 if (my_mddev->persistent) { 3138 sectors = super_types[my_mddev->major_version]. 3139 rdev_size_change(rdev, sectors); 3140 if (!sectors) 3141 return -EBUSY; 3142 } else if (!sectors) 3143 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 3144 rdev->data_offset; 3145 if (!my_mddev->pers->resize) 3146 /* Cannot change size for RAID0 or Linear etc */ 3147 return -EINVAL; 3148 } 3149 if (sectors < my_mddev->dev_sectors) 3150 return -EINVAL; /* component must fit device */ 3151 3152 rdev->sectors = sectors; 3153 if (sectors > oldsectors && my_mddev->external) { 3154 /* Need to check that all other rdevs with the same 3155 * ->bdev do not overlap. 'rcu' is sufficient to walk 3156 * the rdev lists safely. 3157 * This check does not provide a hard guarantee, it 3158 * just helps avoid dangerous mistakes. 3159 */ 3160 struct mddev *mddev; 3161 int overlap = 0; 3162 struct list_head *tmp; 3163 3164 rcu_read_lock(); 3165 for_each_mddev(mddev, tmp) { 3166 struct md_rdev *rdev2; 3167 3168 rdev_for_each(rdev2, mddev) 3169 if (rdev->bdev == rdev2->bdev && 3170 rdev != rdev2 && 3171 overlaps(rdev->data_offset, rdev->sectors, 3172 rdev2->data_offset, 3173 rdev2->sectors)) { 3174 overlap = 1; 3175 break; 3176 } 3177 if (overlap) { 3178 mddev_put(mddev); 3179 break; 3180 } 3181 } 3182 rcu_read_unlock(); 3183 if (overlap) { 3184 /* Someone else could have slipped in a size 3185 * change here, but doing so is just silly. 3186 * We put oldsectors back because we *know* it is 3187 * safe, and trust userspace not to race with 3188 * itself 3189 */ 3190 rdev->sectors = oldsectors; 3191 return -EBUSY; 3192 } 3193 } 3194 return len; 3195 } 3196 3197 static struct rdev_sysfs_entry rdev_size = 3198 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 3199 3200 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) 3201 { 3202 unsigned long long recovery_start = rdev->recovery_offset; 3203 3204 if (test_bit(In_sync, &rdev->flags) || 3205 recovery_start == MaxSector) 3206 return sprintf(page, "none\n"); 3207 3208 return sprintf(page, "%llu\n", recovery_start); 3209 } 3210 3211 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) 3212 { 3213 unsigned long long recovery_start; 3214 3215 if (cmd_match(buf, "none")) 3216 recovery_start = MaxSector; 3217 else if (kstrtoull(buf, 10, &recovery_start)) 3218 return -EINVAL; 3219 3220 if (rdev->mddev->pers && 3221 rdev->raid_disk >= 0) 3222 return -EBUSY; 3223 3224 rdev->recovery_offset = recovery_start; 3225 if (recovery_start == MaxSector) 3226 set_bit(In_sync, &rdev->flags); 3227 else 3228 clear_bit(In_sync, &rdev->flags); 3229 return len; 3230 } 3231 3232 static struct rdev_sysfs_entry rdev_recovery_start = 3233 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 3234 3235 /* sysfs access to bad-blocks list. 3236 * We present two files. 3237 * 'bad-blocks' lists sector numbers and lengths of ranges that 3238 * are recorded as bad. The list is truncated to fit within 3239 * the one-page limit of sysfs. 3240 * Writing "sector length" to this file adds an acknowledged 3241 * bad block list. 3242 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet 3243 * been acknowledged. Writing to this file adds bad blocks 3244 * without acknowledging them. This is largely for testing. 3245 */ 3246 static ssize_t bb_show(struct md_rdev *rdev, char *page) 3247 { 3248 return badblocks_show(&rdev->badblocks, page, 0); 3249 } 3250 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) 3251 { 3252 int rv = badblocks_store(&rdev->badblocks, page, len, 0); 3253 /* Maybe that ack was all we needed */ 3254 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) 3255 wake_up(&rdev->blocked_wait); 3256 return rv; 3257 } 3258 static struct rdev_sysfs_entry rdev_bad_blocks = 3259 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); 3260 3261 static ssize_t ubb_show(struct md_rdev *rdev, char *page) 3262 { 3263 return badblocks_show(&rdev->badblocks, page, 1); 3264 } 3265 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) 3266 { 3267 return badblocks_store(&rdev->badblocks, page, len, 1); 3268 } 3269 static struct rdev_sysfs_entry rdev_unack_bad_blocks = 3270 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store); 3271 3272 static ssize_t 3273 ppl_sector_show(struct md_rdev *rdev, char *page) 3274 { 3275 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector); 3276 } 3277 3278 static ssize_t 3279 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len) 3280 { 3281 unsigned long long sector; 3282 3283 if (kstrtoull(buf, 10, §or) < 0) 3284 return -EINVAL; 3285 if (sector != (sector_t)sector) 3286 return -EINVAL; 3287 3288 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && 3289 rdev->raid_disk >= 0) 3290 return -EBUSY; 3291 3292 if (rdev->mddev->persistent) { 3293 if (rdev->mddev->major_version == 0) 3294 return -EINVAL; 3295 if ((sector > rdev->sb_start && 3296 sector - rdev->sb_start > S16_MAX) || 3297 (sector < rdev->sb_start && 3298 rdev->sb_start - sector > -S16_MIN)) 3299 return -EINVAL; 3300 rdev->ppl.offset = sector - rdev->sb_start; 3301 } else if (!rdev->mddev->external) { 3302 return -EBUSY; 3303 } 3304 rdev->ppl.sector = sector; 3305 return len; 3306 } 3307 3308 static struct rdev_sysfs_entry rdev_ppl_sector = 3309 __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store); 3310 3311 static ssize_t 3312 ppl_size_show(struct md_rdev *rdev, char *page) 3313 { 3314 return sprintf(page, "%u\n", rdev->ppl.size); 3315 } 3316 3317 static ssize_t 3318 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len) 3319 { 3320 unsigned int size; 3321 3322 if (kstrtouint(buf, 10, &size) < 0) 3323 return -EINVAL; 3324 3325 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && 3326 rdev->raid_disk >= 0) 3327 return -EBUSY; 3328 3329 if (rdev->mddev->persistent) { 3330 if (rdev->mddev->major_version == 0) 3331 return -EINVAL; 3332 if (size > U16_MAX) 3333 return -EINVAL; 3334 } else if (!rdev->mddev->external) { 3335 return -EBUSY; 3336 } 3337 rdev->ppl.size = size; 3338 return len; 3339 } 3340 3341 static struct rdev_sysfs_entry rdev_ppl_size = 3342 __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store); 3343 3344 static struct attribute *rdev_default_attrs[] = { 3345 &rdev_state.attr, 3346 &rdev_errors.attr, 3347 &rdev_slot.attr, 3348 &rdev_offset.attr, 3349 &rdev_new_offset.attr, 3350 &rdev_size.attr, 3351 &rdev_recovery_start.attr, 3352 &rdev_bad_blocks.attr, 3353 &rdev_unack_bad_blocks.attr, 3354 &rdev_ppl_sector.attr, 3355 &rdev_ppl_size.attr, 3356 NULL, 3357 }; 3358 static ssize_t 3359 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3360 { 3361 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3362 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3363 3364 if (!entry->show) 3365 return -EIO; 3366 if (!rdev->mddev) 3367 return -EBUSY; 3368 return entry->show(rdev, page); 3369 } 3370 3371 static ssize_t 3372 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 3373 const char *page, size_t length) 3374 { 3375 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3376 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3377 ssize_t rv; 3378 struct mddev *mddev = rdev->mddev; 3379 3380 if (!entry->store) 3381 return -EIO; 3382 if (!capable(CAP_SYS_ADMIN)) 3383 return -EACCES; 3384 rv = mddev ? mddev_lock(mddev): -EBUSY; 3385 if (!rv) { 3386 if (rdev->mddev == NULL) 3387 rv = -EBUSY; 3388 else 3389 rv = entry->store(rdev, page, length); 3390 mddev_unlock(mddev); 3391 } 3392 return rv; 3393 } 3394 3395 static void rdev_free(struct kobject *ko) 3396 { 3397 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); 3398 kfree(rdev); 3399 } 3400 static const struct sysfs_ops rdev_sysfs_ops = { 3401 .show = rdev_attr_show, 3402 .store = rdev_attr_store, 3403 }; 3404 static struct kobj_type rdev_ktype = { 3405 .release = rdev_free, 3406 .sysfs_ops = &rdev_sysfs_ops, 3407 .default_attrs = rdev_default_attrs, 3408 }; 3409 3410 int md_rdev_init(struct md_rdev *rdev) 3411 { 3412 rdev->desc_nr = -1; 3413 rdev->saved_raid_disk = -1; 3414 rdev->raid_disk = -1; 3415 rdev->flags = 0; 3416 rdev->data_offset = 0; 3417 rdev->new_data_offset = 0; 3418 rdev->sb_events = 0; 3419 rdev->last_read_error = 0; 3420 rdev->sb_loaded = 0; 3421 rdev->bb_page = NULL; 3422 atomic_set(&rdev->nr_pending, 0); 3423 atomic_set(&rdev->read_errors, 0); 3424 atomic_set(&rdev->corrected_errors, 0); 3425 3426 INIT_LIST_HEAD(&rdev->same_set); 3427 init_waitqueue_head(&rdev->blocked_wait); 3428 3429 /* Add space to store bad block list. 3430 * This reserves the space even on arrays where it cannot 3431 * be used - I wonder if that matters 3432 */ 3433 return badblocks_init(&rdev->badblocks, 0); 3434 } 3435 EXPORT_SYMBOL_GPL(md_rdev_init); 3436 /* 3437 * Import a device. If 'super_format' >= 0, then sanity check the superblock 3438 * 3439 * mark the device faulty if: 3440 * 3441 * - the device is nonexistent (zero size) 3442 * - the device has no valid superblock 3443 * 3444 * a faulty rdev _never_ has rdev->sb set. 3445 */ 3446 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) 3447 { 3448 char b[BDEVNAME_SIZE]; 3449 int err; 3450 struct md_rdev *rdev; 3451 sector_t size; 3452 3453 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 3454 if (!rdev) 3455 return ERR_PTR(-ENOMEM); 3456 3457 err = md_rdev_init(rdev); 3458 if (err) 3459 goto abort_free; 3460 err = alloc_disk_sb(rdev); 3461 if (err) 3462 goto abort_free; 3463 3464 err = lock_rdev(rdev, newdev, super_format == -2); 3465 if (err) 3466 goto abort_free; 3467 3468 kobject_init(&rdev->kobj, &rdev_ktype); 3469 3470 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; 3471 if (!size) { 3472 pr_warn("md: %s has zero or unknown size, marking faulty!\n", 3473 bdevname(rdev->bdev,b)); 3474 err = -EINVAL; 3475 goto abort_free; 3476 } 3477 3478 if (super_format >= 0) { 3479 err = super_types[super_format]. 3480 load_super(rdev, NULL, super_minor); 3481 if (err == -EINVAL) { 3482 pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n", 3483 bdevname(rdev->bdev,b), 3484 super_format, super_minor); 3485 goto abort_free; 3486 } 3487 if (err < 0) { 3488 pr_warn("md: could not read %s's sb, not importing!\n", 3489 bdevname(rdev->bdev,b)); 3490 goto abort_free; 3491 } 3492 } 3493 3494 return rdev; 3495 3496 abort_free: 3497 if (rdev->bdev) 3498 unlock_rdev(rdev); 3499 md_rdev_clear(rdev); 3500 kfree(rdev); 3501 return ERR_PTR(err); 3502 } 3503 3504 /* 3505 * Check a full RAID array for plausibility 3506 */ 3507 3508 static void analyze_sbs(struct mddev *mddev) 3509 { 3510 int i; 3511 struct md_rdev *rdev, *freshest, *tmp; 3512 char b[BDEVNAME_SIZE]; 3513 3514 freshest = NULL; 3515 rdev_for_each_safe(rdev, tmp, mddev) 3516 switch (super_types[mddev->major_version]. 3517 load_super(rdev, freshest, mddev->minor_version)) { 3518 case 1: 3519 freshest = rdev; 3520 break; 3521 case 0: 3522 break; 3523 default: 3524 pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n", 3525 bdevname(rdev->bdev,b)); 3526 md_kick_rdev_from_array(rdev); 3527 } 3528 3529 super_types[mddev->major_version]. 3530 validate_super(mddev, freshest); 3531 3532 i = 0; 3533 rdev_for_each_safe(rdev, tmp, mddev) { 3534 if (mddev->max_disks && 3535 (rdev->desc_nr >= mddev->max_disks || 3536 i > mddev->max_disks)) { 3537 pr_warn("md: %s: %s: only %d devices permitted\n", 3538 mdname(mddev), bdevname(rdev->bdev, b), 3539 mddev->max_disks); 3540 md_kick_rdev_from_array(rdev); 3541 continue; 3542 } 3543 if (rdev != freshest) { 3544 if (super_types[mddev->major_version]. 3545 validate_super(mddev, rdev)) { 3546 pr_warn("md: kicking non-fresh %s from array!\n", 3547 bdevname(rdev->bdev,b)); 3548 md_kick_rdev_from_array(rdev); 3549 continue; 3550 } 3551 } 3552 if (mddev->level == LEVEL_MULTIPATH) { 3553 rdev->desc_nr = i++; 3554 rdev->raid_disk = rdev->desc_nr; 3555 set_bit(In_sync, &rdev->flags); 3556 } else if (rdev->raid_disk >= 3557 (mddev->raid_disks - min(0, mddev->delta_disks)) && 3558 !test_bit(Journal, &rdev->flags)) { 3559 rdev->raid_disk = -1; 3560 clear_bit(In_sync, &rdev->flags); 3561 } 3562 } 3563 } 3564 3565 /* Read a fixed-point number. 3566 * Numbers in sysfs attributes should be in "standard" units where 3567 * possible, so time should be in seconds. 3568 * However we internally use a a much smaller unit such as 3569 * milliseconds or jiffies. 3570 * This function takes a decimal number with a possible fractional 3571 * component, and produces an integer which is the result of 3572 * multiplying that number by 10^'scale'. 3573 * all without any floating-point arithmetic. 3574 */ 3575 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 3576 { 3577 unsigned long result = 0; 3578 long decimals = -1; 3579 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 3580 if (*cp == '.') 3581 decimals = 0; 3582 else if (decimals < scale) { 3583 unsigned int value; 3584 value = *cp - '0'; 3585 result = result * 10 + value; 3586 if (decimals >= 0) 3587 decimals++; 3588 } 3589 cp++; 3590 } 3591 if (*cp == '\n') 3592 cp++; 3593 if (*cp) 3594 return -EINVAL; 3595 if (decimals < 0) 3596 decimals = 0; 3597 while (decimals < scale) { 3598 result *= 10; 3599 decimals ++; 3600 } 3601 *res = result; 3602 return 0; 3603 } 3604 3605 static ssize_t 3606 safe_delay_show(struct mddev *mddev, char *page) 3607 { 3608 int msec = (mddev->safemode_delay*1000)/HZ; 3609 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 3610 } 3611 static ssize_t 3612 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) 3613 { 3614 unsigned long msec; 3615 3616 if (mddev_is_clustered(mddev)) { 3617 pr_warn("md: Safemode is disabled for clustered mode\n"); 3618 return -EINVAL; 3619 } 3620 3621 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) 3622 return -EINVAL; 3623 if (msec == 0) 3624 mddev->safemode_delay = 0; 3625 else { 3626 unsigned long old_delay = mddev->safemode_delay; 3627 unsigned long new_delay = (msec*HZ)/1000; 3628 3629 if (new_delay == 0) 3630 new_delay = 1; 3631 mddev->safemode_delay = new_delay; 3632 if (new_delay < old_delay || old_delay == 0) 3633 mod_timer(&mddev->safemode_timer, jiffies+1); 3634 } 3635 return len; 3636 } 3637 static struct md_sysfs_entry md_safe_delay = 3638 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 3639 3640 static ssize_t 3641 level_show(struct mddev *mddev, char *page) 3642 { 3643 struct md_personality *p; 3644 int ret; 3645 spin_lock(&mddev->lock); 3646 p = mddev->pers; 3647 if (p) 3648 ret = sprintf(page, "%s\n", p->name); 3649 else if (mddev->clevel[0]) 3650 ret = sprintf(page, "%s\n", mddev->clevel); 3651 else if (mddev->level != LEVEL_NONE) 3652 ret = sprintf(page, "%d\n", mddev->level); 3653 else 3654 ret = 0; 3655 spin_unlock(&mddev->lock); 3656 return ret; 3657 } 3658 3659 static ssize_t 3660 level_store(struct mddev *mddev, const char *buf, size_t len) 3661 { 3662 char clevel[16]; 3663 ssize_t rv; 3664 size_t slen = len; 3665 struct md_personality *pers, *oldpers; 3666 long level; 3667 void *priv, *oldpriv; 3668 struct md_rdev *rdev; 3669 3670 if (slen == 0 || slen >= sizeof(clevel)) 3671 return -EINVAL; 3672 3673 rv = mddev_lock(mddev); 3674 if (rv) 3675 return rv; 3676 3677 if (mddev->pers == NULL) { 3678 strncpy(mddev->clevel, buf, slen); 3679 if (mddev->clevel[slen-1] == '\n') 3680 slen--; 3681 mddev->clevel[slen] = 0; 3682 mddev->level = LEVEL_NONE; 3683 rv = len; 3684 goto out_unlock; 3685 } 3686 rv = -EROFS; 3687 if (mddev->ro) 3688 goto out_unlock; 3689 3690 /* request to change the personality. Need to ensure: 3691 * - array is not engaged in resync/recovery/reshape 3692 * - old personality can be suspended 3693 * - new personality will access other array. 3694 */ 3695 3696 rv = -EBUSY; 3697 if (mddev->sync_thread || 3698 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3699 mddev->reshape_position != MaxSector || 3700 mddev->sysfs_active) 3701 goto out_unlock; 3702 3703 rv = -EINVAL; 3704 if (!mddev->pers->quiesce) { 3705 pr_warn("md: %s: %s does not support online personality change\n", 3706 mdname(mddev), mddev->pers->name); 3707 goto out_unlock; 3708 } 3709 3710 /* Now find the new personality */ 3711 strncpy(clevel, buf, slen); 3712 if (clevel[slen-1] == '\n') 3713 slen--; 3714 clevel[slen] = 0; 3715 if (kstrtol(clevel, 10, &level)) 3716 level = LEVEL_NONE; 3717 3718 if (request_module("md-%s", clevel) != 0) 3719 request_module("md-level-%s", clevel); 3720 spin_lock(&pers_lock); 3721 pers = find_pers(level, clevel); 3722 if (!pers || !try_module_get(pers->owner)) { 3723 spin_unlock(&pers_lock); 3724 pr_warn("md: personality %s not loaded\n", clevel); 3725 rv = -EINVAL; 3726 goto out_unlock; 3727 } 3728 spin_unlock(&pers_lock); 3729 3730 if (pers == mddev->pers) { 3731 /* Nothing to do! */ 3732 module_put(pers->owner); 3733 rv = len; 3734 goto out_unlock; 3735 } 3736 if (!pers->takeover) { 3737 module_put(pers->owner); 3738 pr_warn("md: %s: %s does not support personality takeover\n", 3739 mdname(mddev), clevel); 3740 rv = -EINVAL; 3741 goto out_unlock; 3742 } 3743 3744 rdev_for_each(rdev, mddev) 3745 rdev->new_raid_disk = rdev->raid_disk; 3746 3747 /* ->takeover must set new_* and/or delta_disks 3748 * if it succeeds, and may set them when it fails. 3749 */ 3750 priv = pers->takeover(mddev); 3751 if (IS_ERR(priv)) { 3752 mddev->new_level = mddev->level; 3753 mddev->new_layout = mddev->layout; 3754 mddev->new_chunk_sectors = mddev->chunk_sectors; 3755 mddev->raid_disks -= mddev->delta_disks; 3756 mddev->delta_disks = 0; 3757 mddev->reshape_backwards = 0; 3758 module_put(pers->owner); 3759 pr_warn("md: %s: %s would not accept array\n", 3760 mdname(mddev), clevel); 3761 rv = PTR_ERR(priv); 3762 goto out_unlock; 3763 } 3764 3765 /* Looks like we have a winner */ 3766 mddev_suspend(mddev); 3767 mddev_detach(mddev); 3768 3769 spin_lock(&mddev->lock); 3770 oldpers = mddev->pers; 3771 oldpriv = mddev->private; 3772 mddev->pers = pers; 3773 mddev->private = priv; 3774 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 3775 mddev->level = mddev->new_level; 3776 mddev->layout = mddev->new_layout; 3777 mddev->chunk_sectors = mddev->new_chunk_sectors; 3778 mddev->delta_disks = 0; 3779 mddev->reshape_backwards = 0; 3780 mddev->degraded = 0; 3781 spin_unlock(&mddev->lock); 3782 3783 if (oldpers->sync_request == NULL && 3784 mddev->external) { 3785 /* We are converting from a no-redundancy array 3786 * to a redundancy array and metadata is managed 3787 * externally so we need to be sure that writes 3788 * won't block due to a need to transition 3789 * clean->dirty 3790 * until external management is started. 3791 */ 3792 mddev->in_sync = 0; 3793 mddev->safemode_delay = 0; 3794 mddev->safemode = 0; 3795 } 3796 3797 oldpers->free(mddev, oldpriv); 3798 3799 if (oldpers->sync_request == NULL && 3800 pers->sync_request != NULL) { 3801 /* need to add the md_redundancy_group */ 3802 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 3803 pr_warn("md: cannot register extra attributes for %s\n", 3804 mdname(mddev)); 3805 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); 3806 } 3807 if (oldpers->sync_request != NULL && 3808 pers->sync_request == NULL) { 3809 /* need to remove the md_redundancy_group */ 3810 if (mddev->to_remove == NULL) 3811 mddev->to_remove = &md_redundancy_group; 3812 } 3813 3814 module_put(oldpers->owner); 3815 3816 rdev_for_each(rdev, mddev) { 3817 if (rdev->raid_disk < 0) 3818 continue; 3819 if (rdev->new_raid_disk >= mddev->raid_disks) 3820 rdev->new_raid_disk = -1; 3821 if (rdev->new_raid_disk == rdev->raid_disk) 3822 continue; 3823 sysfs_unlink_rdev(mddev, rdev); 3824 } 3825 rdev_for_each(rdev, mddev) { 3826 if (rdev->raid_disk < 0) 3827 continue; 3828 if (rdev->new_raid_disk == rdev->raid_disk) 3829 continue; 3830 rdev->raid_disk = rdev->new_raid_disk; 3831 if (rdev->raid_disk < 0) 3832 clear_bit(In_sync, &rdev->flags); 3833 else { 3834 if (sysfs_link_rdev(mddev, rdev)) 3835 pr_warn("md: cannot register rd%d for %s after level change\n", 3836 rdev->raid_disk, mdname(mddev)); 3837 } 3838 } 3839 3840 if (pers->sync_request == NULL) { 3841 /* this is now an array without redundancy, so 3842 * it must always be in_sync 3843 */ 3844 mddev->in_sync = 1; 3845 del_timer_sync(&mddev->safemode_timer); 3846 } 3847 blk_set_stacking_limits(&mddev->queue->limits); 3848 pers->run(mddev); 3849 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 3850 mddev_resume(mddev); 3851 if (!mddev->thread) 3852 md_update_sb(mddev, 1); 3853 sysfs_notify(&mddev->kobj, NULL, "level"); 3854 md_new_event(mddev); 3855 rv = len; 3856 out_unlock: 3857 mddev_unlock(mddev); 3858 return rv; 3859 } 3860 3861 static struct md_sysfs_entry md_level = 3862 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 3863 3864 static ssize_t 3865 layout_show(struct mddev *mddev, char *page) 3866 { 3867 /* just a number, not meaningful for all levels */ 3868 if (mddev->reshape_position != MaxSector && 3869 mddev->layout != mddev->new_layout) 3870 return sprintf(page, "%d (%d)\n", 3871 mddev->new_layout, mddev->layout); 3872 return sprintf(page, "%d\n", mddev->layout); 3873 } 3874 3875 static ssize_t 3876 layout_store(struct mddev *mddev, const char *buf, size_t len) 3877 { 3878 unsigned int n; 3879 int err; 3880 3881 err = kstrtouint(buf, 10, &n); 3882 if (err < 0) 3883 return err; 3884 err = mddev_lock(mddev); 3885 if (err) 3886 return err; 3887 3888 if (mddev->pers) { 3889 if (mddev->pers->check_reshape == NULL) 3890 err = -EBUSY; 3891 else if (mddev->ro) 3892 err = -EROFS; 3893 else { 3894 mddev->new_layout = n; 3895 err = mddev->pers->check_reshape(mddev); 3896 if (err) 3897 mddev->new_layout = mddev->layout; 3898 } 3899 } else { 3900 mddev->new_layout = n; 3901 if (mddev->reshape_position == MaxSector) 3902 mddev->layout = n; 3903 } 3904 mddev_unlock(mddev); 3905 return err ?: len; 3906 } 3907 static struct md_sysfs_entry md_layout = 3908 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 3909 3910 static ssize_t 3911 raid_disks_show(struct mddev *mddev, char *page) 3912 { 3913 if (mddev->raid_disks == 0) 3914 return 0; 3915 if (mddev->reshape_position != MaxSector && 3916 mddev->delta_disks != 0) 3917 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 3918 mddev->raid_disks - mddev->delta_disks); 3919 return sprintf(page, "%d\n", mddev->raid_disks); 3920 } 3921 3922 static int update_raid_disks(struct mddev *mddev, int raid_disks); 3923 3924 static ssize_t 3925 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) 3926 { 3927 unsigned int n; 3928 int err; 3929 3930 err = kstrtouint(buf, 10, &n); 3931 if (err < 0) 3932 return err; 3933 3934 err = mddev_lock(mddev); 3935 if (err) 3936 return err; 3937 if (mddev->pers) 3938 err = update_raid_disks(mddev, n); 3939 else if (mddev->reshape_position != MaxSector) { 3940 struct md_rdev *rdev; 3941 int olddisks = mddev->raid_disks - mddev->delta_disks; 3942 3943 err = -EINVAL; 3944 rdev_for_each(rdev, mddev) { 3945 if (olddisks < n && 3946 rdev->data_offset < rdev->new_data_offset) 3947 goto out_unlock; 3948 if (olddisks > n && 3949 rdev->data_offset > rdev->new_data_offset) 3950 goto out_unlock; 3951 } 3952 err = 0; 3953 mddev->delta_disks = n - olddisks; 3954 mddev->raid_disks = n; 3955 mddev->reshape_backwards = (mddev->delta_disks < 0); 3956 } else 3957 mddev->raid_disks = n; 3958 out_unlock: 3959 mddev_unlock(mddev); 3960 return err ? err : len; 3961 } 3962 static struct md_sysfs_entry md_raid_disks = 3963 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 3964 3965 static ssize_t 3966 chunk_size_show(struct mddev *mddev, char *page) 3967 { 3968 if (mddev->reshape_position != MaxSector && 3969 mddev->chunk_sectors != mddev->new_chunk_sectors) 3970 return sprintf(page, "%d (%d)\n", 3971 mddev->new_chunk_sectors << 9, 3972 mddev->chunk_sectors << 9); 3973 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 3974 } 3975 3976 static ssize_t 3977 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) 3978 { 3979 unsigned long n; 3980 int err; 3981 3982 err = kstrtoul(buf, 10, &n); 3983 if (err < 0) 3984 return err; 3985 3986 err = mddev_lock(mddev); 3987 if (err) 3988 return err; 3989 if (mddev->pers) { 3990 if (mddev->pers->check_reshape == NULL) 3991 err = -EBUSY; 3992 else if (mddev->ro) 3993 err = -EROFS; 3994 else { 3995 mddev->new_chunk_sectors = n >> 9; 3996 err = mddev->pers->check_reshape(mddev); 3997 if (err) 3998 mddev->new_chunk_sectors = mddev->chunk_sectors; 3999 } 4000 } else { 4001 mddev->new_chunk_sectors = n >> 9; 4002 if (mddev->reshape_position == MaxSector) 4003 mddev->chunk_sectors = n >> 9; 4004 } 4005 mddev_unlock(mddev); 4006 return err ?: len; 4007 } 4008 static struct md_sysfs_entry md_chunk_size = 4009 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 4010 4011 static ssize_t 4012 resync_start_show(struct mddev *mddev, char *page) 4013 { 4014 if (mddev->recovery_cp == MaxSector) 4015 return sprintf(page, "none\n"); 4016 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 4017 } 4018 4019 static ssize_t 4020 resync_start_store(struct mddev *mddev, const char *buf, size_t len) 4021 { 4022 unsigned long long n; 4023 int err; 4024 4025 if (cmd_match(buf, "none")) 4026 n = MaxSector; 4027 else { 4028 err = kstrtoull(buf, 10, &n); 4029 if (err < 0) 4030 return err; 4031 if (n != (sector_t)n) 4032 return -EINVAL; 4033 } 4034 4035 err = mddev_lock(mddev); 4036 if (err) 4037 return err; 4038 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 4039 err = -EBUSY; 4040 4041 if (!err) { 4042 mddev->recovery_cp = n; 4043 if (mddev->pers) 4044 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 4045 } 4046 mddev_unlock(mddev); 4047 return err ?: len; 4048 } 4049 static struct md_sysfs_entry md_resync_start = 4050 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR, 4051 resync_start_show, resync_start_store); 4052 4053 /* 4054 * The array state can be: 4055 * 4056 * clear 4057 * No devices, no size, no level 4058 * Equivalent to STOP_ARRAY ioctl 4059 * inactive 4060 * May have some settings, but array is not active 4061 * all IO results in error 4062 * When written, doesn't tear down array, but just stops it 4063 * suspended (not supported yet) 4064 * All IO requests will block. The array can be reconfigured. 4065 * Writing this, if accepted, will block until array is quiescent 4066 * readonly 4067 * no resync can happen. no superblocks get written. 4068 * write requests fail 4069 * read-auto 4070 * like readonly, but behaves like 'clean' on a write request. 4071 * 4072 * clean - no pending writes, but otherwise active. 4073 * When written to inactive array, starts without resync 4074 * If a write request arrives then 4075 * if metadata is known, mark 'dirty' and switch to 'active'. 4076 * if not known, block and switch to write-pending 4077 * If written to an active array that has pending writes, then fails. 4078 * active 4079 * fully active: IO and resync can be happening. 4080 * When written to inactive array, starts with resync 4081 * 4082 * write-pending 4083 * clean, but writes are blocked waiting for 'active' to be written. 4084 * 4085 * active-idle 4086 * like active, but no writes have been seen for a while (100msec). 4087 * 4088 */ 4089 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 4090 write_pending, active_idle, bad_word}; 4091 static char *array_states[] = { 4092 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 4093 "write-pending", "active-idle", NULL }; 4094 4095 static int match_word(const char *word, char **list) 4096 { 4097 int n; 4098 for (n=0; list[n]; n++) 4099 if (cmd_match(word, list[n])) 4100 break; 4101 return n; 4102 } 4103 4104 static ssize_t 4105 array_state_show(struct mddev *mddev, char *page) 4106 { 4107 enum array_state st = inactive; 4108 4109 if (mddev->pers) 4110 switch(mddev->ro) { 4111 case 1: 4112 st = readonly; 4113 break; 4114 case 2: 4115 st = read_auto; 4116 break; 4117 case 0: 4118 spin_lock(&mddev->lock); 4119 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 4120 st = write_pending; 4121 else if (mddev->in_sync) 4122 st = clean; 4123 else if (mddev->safemode) 4124 st = active_idle; 4125 else 4126 st = active; 4127 spin_unlock(&mddev->lock); 4128 } 4129 else { 4130 if (list_empty(&mddev->disks) && 4131 mddev->raid_disks == 0 && 4132 mddev->dev_sectors == 0) 4133 st = clear; 4134 else 4135 st = inactive; 4136 } 4137 return sprintf(page, "%s\n", array_states[st]); 4138 } 4139 4140 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev); 4141 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev); 4142 static int do_md_run(struct mddev *mddev); 4143 static int restart_array(struct mddev *mddev); 4144 4145 static ssize_t 4146 array_state_store(struct mddev *mddev, const char *buf, size_t len) 4147 { 4148 int err = 0; 4149 enum array_state st = match_word(buf, array_states); 4150 4151 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) { 4152 /* don't take reconfig_mutex when toggling between 4153 * clean and active 4154 */ 4155 spin_lock(&mddev->lock); 4156 if (st == active) { 4157 restart_array(mddev); 4158 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 4159 md_wakeup_thread(mddev->thread); 4160 wake_up(&mddev->sb_wait); 4161 } else /* st == clean */ { 4162 restart_array(mddev); 4163 if (!set_in_sync(mddev)) 4164 err = -EBUSY; 4165 } 4166 if (!err) 4167 sysfs_notify_dirent_safe(mddev->sysfs_state); 4168 spin_unlock(&mddev->lock); 4169 return err ?: len; 4170 } 4171 err = mddev_lock(mddev); 4172 if (err) 4173 return err; 4174 err = -EINVAL; 4175 switch(st) { 4176 case bad_word: 4177 break; 4178 case clear: 4179 /* stopping an active array */ 4180 err = do_md_stop(mddev, 0, NULL); 4181 break; 4182 case inactive: 4183 /* stopping an active array */ 4184 if (mddev->pers) 4185 err = do_md_stop(mddev, 2, NULL); 4186 else 4187 err = 0; /* already inactive */ 4188 break; 4189 case suspended: 4190 break; /* not supported yet */ 4191 case readonly: 4192 if (mddev->pers) 4193 err = md_set_readonly(mddev, NULL); 4194 else { 4195 mddev->ro = 1; 4196 set_disk_ro(mddev->gendisk, 1); 4197 err = do_md_run(mddev); 4198 } 4199 break; 4200 case read_auto: 4201 if (mddev->pers) { 4202 if (mddev->ro == 0) 4203 err = md_set_readonly(mddev, NULL); 4204 else if (mddev->ro == 1) 4205 err = restart_array(mddev); 4206 if (err == 0) { 4207 mddev->ro = 2; 4208 set_disk_ro(mddev->gendisk, 0); 4209 } 4210 } else { 4211 mddev->ro = 2; 4212 err = do_md_run(mddev); 4213 } 4214 break; 4215 case clean: 4216 if (mddev->pers) { 4217 err = restart_array(mddev); 4218 if (err) 4219 break; 4220 spin_lock(&mddev->lock); 4221 if (!set_in_sync(mddev)) 4222 err = -EBUSY; 4223 spin_unlock(&mddev->lock); 4224 } else 4225 err = -EINVAL; 4226 break; 4227 case active: 4228 if (mddev->pers) { 4229 err = restart_array(mddev); 4230 if (err) 4231 break; 4232 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 4233 wake_up(&mddev->sb_wait); 4234 err = 0; 4235 } else { 4236 mddev->ro = 0; 4237 set_disk_ro(mddev->gendisk, 0); 4238 err = do_md_run(mddev); 4239 } 4240 break; 4241 case write_pending: 4242 case active_idle: 4243 /* these cannot be set */ 4244 break; 4245 } 4246 4247 if (!err) { 4248 if (mddev->hold_active == UNTIL_IOCTL) 4249 mddev->hold_active = 0; 4250 sysfs_notify_dirent_safe(mddev->sysfs_state); 4251 } 4252 mddev_unlock(mddev); 4253 return err ?: len; 4254 } 4255 static struct md_sysfs_entry md_array_state = 4256 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 4257 4258 static ssize_t 4259 max_corrected_read_errors_show(struct mddev *mddev, char *page) { 4260 return sprintf(page, "%d\n", 4261 atomic_read(&mddev->max_corr_read_errors)); 4262 } 4263 4264 static ssize_t 4265 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) 4266 { 4267 unsigned int n; 4268 int rv; 4269 4270 rv = kstrtouint(buf, 10, &n); 4271 if (rv < 0) 4272 return rv; 4273 atomic_set(&mddev->max_corr_read_errors, n); 4274 return len; 4275 } 4276 4277 static struct md_sysfs_entry max_corr_read_errors = 4278 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 4279 max_corrected_read_errors_store); 4280 4281 static ssize_t 4282 null_show(struct mddev *mddev, char *page) 4283 { 4284 return -EINVAL; 4285 } 4286 4287 static ssize_t 4288 new_dev_store(struct mddev *mddev, const char *buf, size_t len) 4289 { 4290 /* buf must be %d:%d\n? giving major and minor numbers */ 4291 /* The new device is added to the array. 4292 * If the array has a persistent superblock, we read the 4293 * superblock to initialise info and check validity. 4294 * Otherwise, only checking done is that in bind_rdev_to_array, 4295 * which mainly checks size. 4296 */ 4297 char *e; 4298 int major = simple_strtoul(buf, &e, 10); 4299 int minor; 4300 dev_t dev; 4301 struct md_rdev *rdev; 4302 int err; 4303 4304 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 4305 return -EINVAL; 4306 minor = simple_strtoul(e+1, &e, 10); 4307 if (*e && *e != '\n') 4308 return -EINVAL; 4309 dev = MKDEV(major, minor); 4310 if (major != MAJOR(dev) || 4311 minor != MINOR(dev)) 4312 return -EOVERFLOW; 4313 4314 flush_workqueue(md_misc_wq); 4315 4316 err = mddev_lock(mddev); 4317 if (err) 4318 return err; 4319 if (mddev->persistent) { 4320 rdev = md_import_device(dev, mddev->major_version, 4321 mddev->minor_version); 4322 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 4323 struct md_rdev *rdev0 4324 = list_entry(mddev->disks.next, 4325 struct md_rdev, same_set); 4326 err = super_types[mddev->major_version] 4327 .load_super(rdev, rdev0, mddev->minor_version); 4328 if (err < 0) 4329 goto out; 4330 } 4331 } else if (mddev->external) 4332 rdev = md_import_device(dev, -2, -1); 4333 else 4334 rdev = md_import_device(dev, -1, -1); 4335 4336 if (IS_ERR(rdev)) { 4337 mddev_unlock(mddev); 4338 return PTR_ERR(rdev); 4339 } 4340 err = bind_rdev_to_array(rdev, mddev); 4341 out: 4342 if (err) 4343 export_rdev(rdev); 4344 mddev_unlock(mddev); 4345 if (!err) 4346 md_new_event(mddev); 4347 return err ? err : len; 4348 } 4349 4350 static struct md_sysfs_entry md_new_device = 4351 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 4352 4353 static ssize_t 4354 bitmap_store(struct mddev *mddev, const char *buf, size_t len) 4355 { 4356 char *end; 4357 unsigned long chunk, end_chunk; 4358 int err; 4359 4360 err = mddev_lock(mddev); 4361 if (err) 4362 return err; 4363 if (!mddev->bitmap) 4364 goto out; 4365 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 4366 while (*buf) { 4367 chunk = end_chunk = simple_strtoul(buf, &end, 0); 4368 if (buf == end) break; 4369 if (*end == '-') { /* range */ 4370 buf = end + 1; 4371 end_chunk = simple_strtoul(buf, &end, 0); 4372 if (buf == end) break; 4373 } 4374 if (*end && !isspace(*end)) break; 4375 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 4376 buf = skip_spaces(end); 4377 } 4378 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 4379 out: 4380 mddev_unlock(mddev); 4381 return len; 4382 } 4383 4384 static struct md_sysfs_entry md_bitmap = 4385 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 4386 4387 static ssize_t 4388 size_show(struct mddev *mddev, char *page) 4389 { 4390 return sprintf(page, "%llu\n", 4391 (unsigned long long)mddev->dev_sectors / 2); 4392 } 4393 4394 static int update_size(struct mddev *mddev, sector_t num_sectors); 4395 4396 static ssize_t 4397 size_store(struct mddev *mddev, const char *buf, size_t len) 4398 { 4399 /* If array is inactive, we can reduce the component size, but 4400 * not increase it (except from 0). 4401 * If array is active, we can try an on-line resize 4402 */ 4403 sector_t sectors; 4404 int err = strict_blocks_to_sectors(buf, §ors); 4405 4406 if (err < 0) 4407 return err; 4408 err = mddev_lock(mddev); 4409 if (err) 4410 return err; 4411 if (mddev->pers) { 4412 err = update_size(mddev, sectors); 4413 if (err == 0) 4414 md_update_sb(mddev, 1); 4415 } else { 4416 if (mddev->dev_sectors == 0 || 4417 mddev->dev_sectors > sectors) 4418 mddev->dev_sectors = sectors; 4419 else 4420 err = -ENOSPC; 4421 } 4422 mddev_unlock(mddev); 4423 return err ? err : len; 4424 } 4425 4426 static struct md_sysfs_entry md_size = 4427 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 4428 4429 /* Metadata version. 4430 * This is one of 4431 * 'none' for arrays with no metadata (good luck...) 4432 * 'external' for arrays with externally managed metadata, 4433 * or N.M for internally known formats 4434 */ 4435 static ssize_t 4436 metadata_show(struct mddev *mddev, char *page) 4437 { 4438 if (mddev->persistent) 4439 return sprintf(page, "%d.%d\n", 4440 mddev->major_version, mddev->minor_version); 4441 else if (mddev->external) 4442 return sprintf(page, "external:%s\n", mddev->metadata_type); 4443 else 4444 return sprintf(page, "none\n"); 4445 } 4446 4447 static ssize_t 4448 metadata_store(struct mddev *mddev, const char *buf, size_t len) 4449 { 4450 int major, minor; 4451 char *e; 4452 int err; 4453 /* Changing the details of 'external' metadata is 4454 * always permitted. Otherwise there must be 4455 * no devices attached to the array. 4456 */ 4457 4458 err = mddev_lock(mddev); 4459 if (err) 4460 return err; 4461 err = -EBUSY; 4462 if (mddev->external && strncmp(buf, "external:", 9) == 0) 4463 ; 4464 else if (!list_empty(&mddev->disks)) 4465 goto out_unlock; 4466 4467 err = 0; 4468 if (cmd_match(buf, "none")) { 4469 mddev->persistent = 0; 4470 mddev->external = 0; 4471 mddev->major_version = 0; 4472 mddev->minor_version = 90; 4473 goto out_unlock; 4474 } 4475 if (strncmp(buf, "external:", 9) == 0) { 4476 size_t namelen = len-9; 4477 if (namelen >= sizeof(mddev->metadata_type)) 4478 namelen = sizeof(mddev->metadata_type)-1; 4479 strncpy(mddev->metadata_type, buf+9, namelen); 4480 mddev->metadata_type[namelen] = 0; 4481 if (namelen && mddev->metadata_type[namelen-1] == '\n') 4482 mddev->metadata_type[--namelen] = 0; 4483 mddev->persistent = 0; 4484 mddev->external = 1; 4485 mddev->major_version = 0; 4486 mddev->minor_version = 90; 4487 goto out_unlock; 4488 } 4489 major = simple_strtoul(buf, &e, 10); 4490 err = -EINVAL; 4491 if (e==buf || *e != '.') 4492 goto out_unlock; 4493 buf = e+1; 4494 minor = simple_strtoul(buf, &e, 10); 4495 if (e==buf || (*e && *e != '\n') ) 4496 goto out_unlock; 4497 err = -ENOENT; 4498 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 4499 goto out_unlock; 4500 mddev->major_version = major; 4501 mddev->minor_version = minor; 4502 mddev->persistent = 1; 4503 mddev->external = 0; 4504 err = 0; 4505 out_unlock: 4506 mddev_unlock(mddev); 4507 return err ?: len; 4508 } 4509 4510 static struct md_sysfs_entry md_metadata = 4511 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4512 4513 static ssize_t 4514 action_show(struct mddev *mddev, char *page) 4515 { 4516 char *type = "idle"; 4517 unsigned long recovery = mddev->recovery; 4518 if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 4519 type = "frozen"; 4520 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) || 4521 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) { 4522 if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 4523 type = "reshape"; 4524 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 4525 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) 4526 type = "resync"; 4527 else if (test_bit(MD_RECOVERY_CHECK, &recovery)) 4528 type = "check"; 4529 else 4530 type = "repair"; 4531 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 4532 type = "recover"; 4533 else if (mddev->reshape_position != MaxSector) 4534 type = "reshape"; 4535 } 4536 return sprintf(page, "%s\n", type); 4537 } 4538 4539 static ssize_t 4540 action_store(struct mddev *mddev, const char *page, size_t len) 4541 { 4542 if (!mddev->pers || !mddev->pers->sync_request) 4543 return -EINVAL; 4544 4545 4546 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4547 if (cmd_match(page, "frozen")) 4548 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4549 else 4550 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4551 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 4552 mddev_lock(mddev) == 0) { 4553 flush_workqueue(md_misc_wq); 4554 if (mddev->sync_thread) { 4555 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4556 md_reap_sync_thread(mddev); 4557 } 4558 mddev_unlock(mddev); 4559 } 4560 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4561 return -EBUSY; 4562 else if (cmd_match(page, "resync")) 4563 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4564 else if (cmd_match(page, "recover")) { 4565 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4566 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4567 } else if (cmd_match(page, "reshape")) { 4568 int err; 4569 if (mddev->pers->start_reshape == NULL) 4570 return -EINVAL; 4571 err = mddev_lock(mddev); 4572 if (!err) { 4573 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4574 err = -EBUSY; 4575 else { 4576 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4577 err = mddev->pers->start_reshape(mddev); 4578 } 4579 mddev_unlock(mddev); 4580 } 4581 if (err) 4582 return err; 4583 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4584 } else { 4585 if (cmd_match(page, "check")) 4586 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4587 else if (!cmd_match(page, "repair")) 4588 return -EINVAL; 4589 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4590 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4591 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4592 } 4593 if (mddev->ro == 2) { 4594 /* A write to sync_action is enough to justify 4595 * canceling read-auto mode 4596 */ 4597 mddev->ro = 0; 4598 md_wakeup_thread(mddev->sync_thread); 4599 } 4600 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4601 md_wakeup_thread(mddev->thread); 4602 sysfs_notify_dirent_safe(mddev->sysfs_action); 4603 return len; 4604 } 4605 4606 static struct md_sysfs_entry md_scan_mode = 4607 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 4608 4609 static ssize_t 4610 last_sync_action_show(struct mddev *mddev, char *page) 4611 { 4612 return sprintf(page, "%s\n", mddev->last_sync_action); 4613 } 4614 4615 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action); 4616 4617 static ssize_t 4618 mismatch_cnt_show(struct mddev *mddev, char *page) 4619 { 4620 return sprintf(page, "%llu\n", 4621 (unsigned long long) 4622 atomic64_read(&mddev->resync_mismatches)); 4623 } 4624 4625 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 4626 4627 static ssize_t 4628 sync_min_show(struct mddev *mddev, char *page) 4629 { 4630 return sprintf(page, "%d (%s)\n", speed_min(mddev), 4631 mddev->sync_speed_min ? "local": "system"); 4632 } 4633 4634 static ssize_t 4635 sync_min_store(struct mddev *mddev, const char *buf, size_t len) 4636 { 4637 unsigned int min; 4638 int rv; 4639 4640 if (strncmp(buf, "system", 6)==0) { 4641 min = 0; 4642 } else { 4643 rv = kstrtouint(buf, 10, &min); 4644 if (rv < 0) 4645 return rv; 4646 if (min == 0) 4647 return -EINVAL; 4648 } 4649 mddev->sync_speed_min = min; 4650 return len; 4651 } 4652 4653 static struct md_sysfs_entry md_sync_min = 4654 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 4655 4656 static ssize_t 4657 sync_max_show(struct mddev *mddev, char *page) 4658 { 4659 return sprintf(page, "%d (%s)\n", speed_max(mddev), 4660 mddev->sync_speed_max ? "local": "system"); 4661 } 4662 4663 static ssize_t 4664 sync_max_store(struct mddev *mddev, const char *buf, size_t len) 4665 { 4666 unsigned int max; 4667 int rv; 4668 4669 if (strncmp(buf, "system", 6)==0) { 4670 max = 0; 4671 } else { 4672 rv = kstrtouint(buf, 10, &max); 4673 if (rv < 0) 4674 return rv; 4675 if (max == 0) 4676 return -EINVAL; 4677 } 4678 mddev->sync_speed_max = max; 4679 return len; 4680 } 4681 4682 static struct md_sysfs_entry md_sync_max = 4683 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 4684 4685 static ssize_t 4686 degraded_show(struct mddev *mddev, char *page) 4687 { 4688 return sprintf(page, "%d\n", mddev->degraded); 4689 } 4690 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 4691 4692 static ssize_t 4693 sync_force_parallel_show(struct mddev *mddev, char *page) 4694 { 4695 return sprintf(page, "%d\n", mddev->parallel_resync); 4696 } 4697 4698 static ssize_t 4699 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) 4700 { 4701 long n; 4702 4703 if (kstrtol(buf, 10, &n)) 4704 return -EINVAL; 4705 4706 if (n != 0 && n != 1) 4707 return -EINVAL; 4708 4709 mddev->parallel_resync = n; 4710 4711 if (mddev->sync_thread) 4712 wake_up(&resync_wait); 4713 4714 return len; 4715 } 4716 4717 /* force parallel resync, even with shared block devices */ 4718 static struct md_sysfs_entry md_sync_force_parallel = 4719 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 4720 sync_force_parallel_show, sync_force_parallel_store); 4721 4722 static ssize_t 4723 sync_speed_show(struct mddev *mddev, char *page) 4724 { 4725 unsigned long resync, dt, db; 4726 if (mddev->curr_resync == 0) 4727 return sprintf(page, "none\n"); 4728 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 4729 dt = (jiffies - mddev->resync_mark) / HZ; 4730 if (!dt) dt++; 4731 db = resync - mddev->resync_mark_cnt; 4732 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 4733 } 4734 4735 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 4736 4737 static ssize_t 4738 sync_completed_show(struct mddev *mddev, char *page) 4739 { 4740 unsigned long long max_sectors, resync; 4741 4742 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4743 return sprintf(page, "none\n"); 4744 4745 if (mddev->curr_resync == 1 || 4746 mddev->curr_resync == 2) 4747 return sprintf(page, "delayed\n"); 4748 4749 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 4750 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4751 max_sectors = mddev->resync_max_sectors; 4752 else 4753 max_sectors = mddev->dev_sectors; 4754 4755 resync = mddev->curr_resync_completed; 4756 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 4757 } 4758 4759 static struct md_sysfs_entry md_sync_completed = 4760 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL); 4761 4762 static ssize_t 4763 min_sync_show(struct mddev *mddev, char *page) 4764 { 4765 return sprintf(page, "%llu\n", 4766 (unsigned long long)mddev->resync_min); 4767 } 4768 static ssize_t 4769 min_sync_store(struct mddev *mddev, const char *buf, size_t len) 4770 { 4771 unsigned long long min; 4772 int err; 4773 4774 if (kstrtoull(buf, 10, &min)) 4775 return -EINVAL; 4776 4777 spin_lock(&mddev->lock); 4778 err = -EINVAL; 4779 if (min > mddev->resync_max) 4780 goto out_unlock; 4781 4782 err = -EBUSY; 4783 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4784 goto out_unlock; 4785 4786 /* Round down to multiple of 4K for safety */ 4787 mddev->resync_min = round_down(min, 8); 4788 err = 0; 4789 4790 out_unlock: 4791 spin_unlock(&mddev->lock); 4792 return err ?: len; 4793 } 4794 4795 static struct md_sysfs_entry md_min_sync = 4796 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 4797 4798 static ssize_t 4799 max_sync_show(struct mddev *mddev, char *page) 4800 { 4801 if (mddev->resync_max == MaxSector) 4802 return sprintf(page, "max\n"); 4803 else 4804 return sprintf(page, "%llu\n", 4805 (unsigned long long)mddev->resync_max); 4806 } 4807 static ssize_t 4808 max_sync_store(struct mddev *mddev, const char *buf, size_t len) 4809 { 4810 int err; 4811 spin_lock(&mddev->lock); 4812 if (strncmp(buf, "max", 3) == 0) 4813 mddev->resync_max = MaxSector; 4814 else { 4815 unsigned long long max; 4816 int chunk; 4817 4818 err = -EINVAL; 4819 if (kstrtoull(buf, 10, &max)) 4820 goto out_unlock; 4821 if (max < mddev->resync_min) 4822 goto out_unlock; 4823 4824 err = -EBUSY; 4825 if (max < mddev->resync_max && 4826 mddev->ro == 0 && 4827 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4828 goto out_unlock; 4829 4830 /* Must be a multiple of chunk_size */ 4831 chunk = mddev->chunk_sectors; 4832 if (chunk) { 4833 sector_t temp = max; 4834 4835 err = -EINVAL; 4836 if (sector_div(temp, chunk)) 4837 goto out_unlock; 4838 } 4839 mddev->resync_max = max; 4840 } 4841 wake_up(&mddev->recovery_wait); 4842 err = 0; 4843 out_unlock: 4844 spin_unlock(&mddev->lock); 4845 return err ?: len; 4846 } 4847 4848 static struct md_sysfs_entry md_max_sync = 4849 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 4850 4851 static ssize_t 4852 suspend_lo_show(struct mddev *mddev, char *page) 4853 { 4854 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 4855 } 4856 4857 static ssize_t 4858 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) 4859 { 4860 unsigned long long new; 4861 int err; 4862 4863 err = kstrtoull(buf, 10, &new); 4864 if (err < 0) 4865 return err; 4866 if (new != (sector_t)new) 4867 return -EINVAL; 4868 4869 err = mddev_lock(mddev); 4870 if (err) 4871 return err; 4872 err = -EINVAL; 4873 if (mddev->pers == NULL || 4874 mddev->pers->quiesce == NULL) 4875 goto unlock; 4876 mddev_suspend(mddev); 4877 mddev->suspend_lo = new; 4878 mddev_resume(mddev); 4879 4880 err = 0; 4881 unlock: 4882 mddev_unlock(mddev); 4883 return err ?: len; 4884 } 4885 static struct md_sysfs_entry md_suspend_lo = 4886 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 4887 4888 static ssize_t 4889 suspend_hi_show(struct mddev *mddev, char *page) 4890 { 4891 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 4892 } 4893 4894 static ssize_t 4895 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) 4896 { 4897 unsigned long long new; 4898 int err; 4899 4900 err = kstrtoull(buf, 10, &new); 4901 if (err < 0) 4902 return err; 4903 if (new != (sector_t)new) 4904 return -EINVAL; 4905 4906 err = mddev_lock(mddev); 4907 if (err) 4908 return err; 4909 err = -EINVAL; 4910 if (mddev->pers == NULL) 4911 goto unlock; 4912 4913 mddev_suspend(mddev); 4914 mddev->suspend_hi = new; 4915 mddev_resume(mddev); 4916 4917 err = 0; 4918 unlock: 4919 mddev_unlock(mddev); 4920 return err ?: len; 4921 } 4922 static struct md_sysfs_entry md_suspend_hi = 4923 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 4924 4925 static ssize_t 4926 reshape_position_show(struct mddev *mddev, char *page) 4927 { 4928 if (mddev->reshape_position != MaxSector) 4929 return sprintf(page, "%llu\n", 4930 (unsigned long long)mddev->reshape_position); 4931 strcpy(page, "none\n"); 4932 return 5; 4933 } 4934 4935 static ssize_t 4936 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) 4937 { 4938 struct md_rdev *rdev; 4939 unsigned long long new; 4940 int err; 4941 4942 err = kstrtoull(buf, 10, &new); 4943 if (err < 0) 4944 return err; 4945 if (new != (sector_t)new) 4946 return -EINVAL; 4947 err = mddev_lock(mddev); 4948 if (err) 4949 return err; 4950 err = -EBUSY; 4951 if (mddev->pers) 4952 goto unlock; 4953 mddev->reshape_position = new; 4954 mddev->delta_disks = 0; 4955 mddev->reshape_backwards = 0; 4956 mddev->new_level = mddev->level; 4957 mddev->new_layout = mddev->layout; 4958 mddev->new_chunk_sectors = mddev->chunk_sectors; 4959 rdev_for_each(rdev, mddev) 4960 rdev->new_data_offset = rdev->data_offset; 4961 err = 0; 4962 unlock: 4963 mddev_unlock(mddev); 4964 return err ?: len; 4965 } 4966 4967 static struct md_sysfs_entry md_reshape_position = 4968 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 4969 reshape_position_store); 4970 4971 static ssize_t 4972 reshape_direction_show(struct mddev *mddev, char *page) 4973 { 4974 return sprintf(page, "%s\n", 4975 mddev->reshape_backwards ? "backwards" : "forwards"); 4976 } 4977 4978 static ssize_t 4979 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) 4980 { 4981 int backwards = 0; 4982 int err; 4983 4984 if (cmd_match(buf, "forwards")) 4985 backwards = 0; 4986 else if (cmd_match(buf, "backwards")) 4987 backwards = 1; 4988 else 4989 return -EINVAL; 4990 if (mddev->reshape_backwards == backwards) 4991 return len; 4992 4993 err = mddev_lock(mddev); 4994 if (err) 4995 return err; 4996 /* check if we are allowed to change */ 4997 if (mddev->delta_disks) 4998 err = -EBUSY; 4999 else if (mddev->persistent && 5000 mddev->major_version == 0) 5001 err = -EINVAL; 5002 else 5003 mddev->reshape_backwards = backwards; 5004 mddev_unlock(mddev); 5005 return err ?: len; 5006 } 5007 5008 static struct md_sysfs_entry md_reshape_direction = 5009 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show, 5010 reshape_direction_store); 5011 5012 static ssize_t 5013 array_size_show(struct mddev *mddev, char *page) 5014 { 5015 if (mddev->external_size) 5016 return sprintf(page, "%llu\n", 5017 (unsigned long long)mddev->array_sectors/2); 5018 else 5019 return sprintf(page, "default\n"); 5020 } 5021 5022 static ssize_t 5023 array_size_store(struct mddev *mddev, const char *buf, size_t len) 5024 { 5025 sector_t sectors; 5026 int err; 5027 5028 err = mddev_lock(mddev); 5029 if (err) 5030 return err; 5031 5032 /* cluster raid doesn't support change array_sectors */ 5033 if (mddev_is_clustered(mddev)) { 5034 mddev_unlock(mddev); 5035 return -EINVAL; 5036 } 5037 5038 if (strncmp(buf, "default", 7) == 0) { 5039 if (mddev->pers) 5040 sectors = mddev->pers->size(mddev, 0, 0); 5041 else 5042 sectors = mddev->array_sectors; 5043 5044 mddev->external_size = 0; 5045 } else { 5046 if (strict_blocks_to_sectors(buf, §ors) < 0) 5047 err = -EINVAL; 5048 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 5049 err = -E2BIG; 5050 else 5051 mddev->external_size = 1; 5052 } 5053 5054 if (!err) { 5055 mddev->array_sectors = sectors; 5056 if (mddev->pers) { 5057 set_capacity(mddev->gendisk, mddev->array_sectors); 5058 revalidate_disk(mddev->gendisk); 5059 } 5060 } 5061 mddev_unlock(mddev); 5062 return err ?: len; 5063 } 5064 5065 static struct md_sysfs_entry md_array_size = 5066 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 5067 array_size_store); 5068 5069 static ssize_t 5070 consistency_policy_show(struct mddev *mddev, char *page) 5071 { 5072 int ret; 5073 5074 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 5075 ret = sprintf(page, "journal\n"); 5076 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) { 5077 ret = sprintf(page, "ppl\n"); 5078 } else if (mddev->bitmap) { 5079 ret = sprintf(page, "bitmap\n"); 5080 } else if (mddev->pers) { 5081 if (mddev->pers->sync_request) 5082 ret = sprintf(page, "resync\n"); 5083 else 5084 ret = sprintf(page, "none\n"); 5085 } else { 5086 ret = sprintf(page, "unknown\n"); 5087 } 5088 5089 return ret; 5090 } 5091 5092 static ssize_t 5093 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len) 5094 { 5095 int err = 0; 5096 5097 if (mddev->pers) { 5098 if (mddev->pers->change_consistency_policy) 5099 err = mddev->pers->change_consistency_policy(mddev, buf); 5100 else 5101 err = -EBUSY; 5102 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) { 5103 set_bit(MD_HAS_PPL, &mddev->flags); 5104 } else { 5105 err = -EINVAL; 5106 } 5107 5108 return err ? err : len; 5109 } 5110 5111 static struct md_sysfs_entry md_consistency_policy = 5112 __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show, 5113 consistency_policy_store); 5114 5115 static struct attribute *md_default_attrs[] = { 5116 &md_level.attr, 5117 &md_layout.attr, 5118 &md_raid_disks.attr, 5119 &md_chunk_size.attr, 5120 &md_size.attr, 5121 &md_resync_start.attr, 5122 &md_metadata.attr, 5123 &md_new_device.attr, 5124 &md_safe_delay.attr, 5125 &md_array_state.attr, 5126 &md_reshape_position.attr, 5127 &md_reshape_direction.attr, 5128 &md_array_size.attr, 5129 &max_corr_read_errors.attr, 5130 &md_consistency_policy.attr, 5131 NULL, 5132 }; 5133 5134 static struct attribute *md_redundancy_attrs[] = { 5135 &md_scan_mode.attr, 5136 &md_last_scan_mode.attr, 5137 &md_mismatches.attr, 5138 &md_sync_min.attr, 5139 &md_sync_max.attr, 5140 &md_sync_speed.attr, 5141 &md_sync_force_parallel.attr, 5142 &md_sync_completed.attr, 5143 &md_min_sync.attr, 5144 &md_max_sync.attr, 5145 &md_suspend_lo.attr, 5146 &md_suspend_hi.attr, 5147 &md_bitmap.attr, 5148 &md_degraded.attr, 5149 NULL, 5150 }; 5151 static struct attribute_group md_redundancy_group = { 5152 .name = NULL, 5153 .attrs = md_redundancy_attrs, 5154 }; 5155 5156 static ssize_t 5157 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 5158 { 5159 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 5160 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 5161 ssize_t rv; 5162 5163 if (!entry->show) 5164 return -EIO; 5165 spin_lock(&all_mddevs_lock); 5166 if (list_empty(&mddev->all_mddevs)) { 5167 spin_unlock(&all_mddevs_lock); 5168 return -EBUSY; 5169 } 5170 mddev_get(mddev); 5171 spin_unlock(&all_mddevs_lock); 5172 5173 rv = entry->show(mddev, page); 5174 mddev_put(mddev); 5175 return rv; 5176 } 5177 5178 static ssize_t 5179 md_attr_store(struct kobject *kobj, struct attribute *attr, 5180 const char *page, size_t length) 5181 { 5182 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 5183 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 5184 ssize_t rv; 5185 5186 if (!entry->store) 5187 return -EIO; 5188 if (!capable(CAP_SYS_ADMIN)) 5189 return -EACCES; 5190 spin_lock(&all_mddevs_lock); 5191 if (list_empty(&mddev->all_mddevs)) { 5192 spin_unlock(&all_mddevs_lock); 5193 return -EBUSY; 5194 } 5195 mddev_get(mddev); 5196 spin_unlock(&all_mddevs_lock); 5197 rv = entry->store(mddev, page, length); 5198 mddev_put(mddev); 5199 return rv; 5200 } 5201 5202 static void md_free(struct kobject *ko) 5203 { 5204 struct mddev *mddev = container_of(ko, struct mddev, kobj); 5205 5206 if (mddev->sysfs_state) 5207 sysfs_put(mddev->sysfs_state); 5208 5209 if (mddev->gendisk) 5210 del_gendisk(mddev->gendisk); 5211 if (mddev->queue) 5212 blk_cleanup_queue(mddev->queue); 5213 if (mddev->gendisk) 5214 put_disk(mddev->gendisk); 5215 percpu_ref_exit(&mddev->writes_pending); 5216 5217 bioset_exit(&mddev->bio_set); 5218 bioset_exit(&mddev->sync_set); 5219 kfree(mddev); 5220 } 5221 5222 static const struct sysfs_ops md_sysfs_ops = { 5223 .show = md_attr_show, 5224 .store = md_attr_store, 5225 }; 5226 static struct kobj_type md_ktype = { 5227 .release = md_free, 5228 .sysfs_ops = &md_sysfs_ops, 5229 .default_attrs = md_default_attrs, 5230 }; 5231 5232 int mdp_major = 0; 5233 5234 static void mddev_delayed_delete(struct work_struct *ws) 5235 { 5236 struct mddev *mddev = container_of(ws, struct mddev, del_work); 5237 5238 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); 5239 kobject_del(&mddev->kobj); 5240 kobject_put(&mddev->kobj); 5241 } 5242 5243 static void no_op(struct percpu_ref *r) {} 5244 5245 int mddev_init_writes_pending(struct mddev *mddev) 5246 { 5247 if (mddev->writes_pending.percpu_count_ptr) 5248 return 0; 5249 if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0) 5250 return -ENOMEM; 5251 /* We want to start with the refcount at zero */ 5252 percpu_ref_put(&mddev->writes_pending); 5253 return 0; 5254 } 5255 EXPORT_SYMBOL_GPL(mddev_init_writes_pending); 5256 5257 static int md_alloc(dev_t dev, char *name) 5258 { 5259 /* 5260 * If dev is zero, name is the name of a device to allocate with 5261 * an arbitrary minor number. It will be "md_???" 5262 * If dev is non-zero it must be a device number with a MAJOR of 5263 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then 5264 * the device is being created by opening a node in /dev. 5265 * If "name" is not NULL, the device is being created by 5266 * writing to /sys/module/md_mod/parameters/new_array. 5267 */ 5268 static DEFINE_MUTEX(disks_mutex); 5269 struct mddev *mddev = mddev_find(dev); 5270 struct gendisk *disk; 5271 int partitioned; 5272 int shift; 5273 int unit; 5274 int error; 5275 5276 if (!mddev) 5277 return -ENODEV; 5278 5279 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 5280 shift = partitioned ? MdpMinorShift : 0; 5281 unit = MINOR(mddev->unit) >> shift; 5282 5283 /* wait for any previous instance of this device to be 5284 * completely removed (mddev_delayed_delete). 5285 */ 5286 flush_workqueue(md_misc_wq); 5287 5288 mutex_lock(&disks_mutex); 5289 error = -EEXIST; 5290 if (mddev->gendisk) 5291 goto abort; 5292 5293 if (name && !dev) { 5294 /* Need to ensure that 'name' is not a duplicate. 5295 */ 5296 struct mddev *mddev2; 5297 spin_lock(&all_mddevs_lock); 5298 5299 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 5300 if (mddev2->gendisk && 5301 strcmp(mddev2->gendisk->disk_name, name) == 0) { 5302 spin_unlock(&all_mddevs_lock); 5303 goto abort; 5304 } 5305 spin_unlock(&all_mddevs_lock); 5306 } 5307 if (name && dev) 5308 /* 5309 * Creating /dev/mdNNN via "newarray", so adjust hold_active. 5310 */ 5311 mddev->hold_active = UNTIL_STOP; 5312 5313 error = -ENOMEM; 5314 mddev->queue = blk_alloc_queue(GFP_KERNEL); 5315 if (!mddev->queue) 5316 goto abort; 5317 mddev->queue->queuedata = mddev; 5318 5319 blk_queue_make_request(mddev->queue, md_make_request); 5320 blk_set_stacking_limits(&mddev->queue->limits); 5321 5322 disk = alloc_disk(1 << shift); 5323 if (!disk) { 5324 blk_cleanup_queue(mddev->queue); 5325 mddev->queue = NULL; 5326 goto abort; 5327 } 5328 disk->major = MAJOR(mddev->unit); 5329 disk->first_minor = unit << shift; 5330 if (name) 5331 strcpy(disk->disk_name, name); 5332 else if (partitioned) 5333 sprintf(disk->disk_name, "md_d%d", unit); 5334 else 5335 sprintf(disk->disk_name, "md%d", unit); 5336 disk->fops = &md_fops; 5337 disk->private_data = mddev; 5338 disk->queue = mddev->queue; 5339 blk_queue_write_cache(mddev->queue, true, true); 5340 /* Allow extended partitions. This makes the 5341 * 'mdp' device redundant, but we can't really 5342 * remove it now. 5343 */ 5344 disk->flags |= GENHD_FL_EXT_DEVT; 5345 mddev->gendisk = disk; 5346 /* As soon as we call add_disk(), another thread could get 5347 * through to md_open, so make sure it doesn't get too far 5348 */ 5349 mutex_lock(&mddev->open_mutex); 5350 add_disk(disk); 5351 5352 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); 5353 if (error) { 5354 /* This isn't possible, but as kobject_init_and_add is marked 5355 * __must_check, we must do something with the result 5356 */ 5357 pr_debug("md: cannot register %s/md - name in use\n", 5358 disk->disk_name); 5359 error = 0; 5360 } 5361 if (mddev->kobj.sd && 5362 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) 5363 pr_debug("pointless warning\n"); 5364 mutex_unlock(&mddev->open_mutex); 5365 abort: 5366 mutex_unlock(&disks_mutex); 5367 if (!error && mddev->kobj.sd) { 5368 kobject_uevent(&mddev->kobj, KOBJ_ADD); 5369 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 5370 } 5371 mddev_put(mddev); 5372 return error; 5373 } 5374 5375 static struct kobject *md_probe(dev_t dev, int *part, void *data) 5376 { 5377 if (create_on_open) 5378 md_alloc(dev, NULL); 5379 return NULL; 5380 } 5381 5382 static int add_named_array(const char *val, const struct kernel_param *kp) 5383 { 5384 /* 5385 * val must be "md_*" or "mdNNN". 5386 * For "md_*" we allocate an array with a large free minor number, and 5387 * set the name to val. val must not already be an active name. 5388 * For "mdNNN" we allocate an array with the minor number NNN 5389 * which must not already be in use. 5390 */ 5391 int len = strlen(val); 5392 char buf[DISK_NAME_LEN]; 5393 unsigned long devnum; 5394 5395 while (len && val[len-1] == '\n') 5396 len--; 5397 if (len >= DISK_NAME_LEN) 5398 return -E2BIG; 5399 strlcpy(buf, val, len+1); 5400 if (strncmp(buf, "md_", 3) == 0) 5401 return md_alloc(0, buf); 5402 if (strncmp(buf, "md", 2) == 0 && 5403 isdigit(buf[2]) && 5404 kstrtoul(buf+2, 10, &devnum) == 0 && 5405 devnum <= MINORMASK) 5406 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL); 5407 5408 return -EINVAL; 5409 } 5410 5411 static void md_safemode_timeout(struct timer_list *t) 5412 { 5413 struct mddev *mddev = from_timer(mddev, t, safemode_timer); 5414 5415 mddev->safemode = 1; 5416 if (mddev->external) 5417 sysfs_notify_dirent_safe(mddev->sysfs_state); 5418 5419 md_wakeup_thread(mddev->thread); 5420 } 5421 5422 static int start_dirty_degraded; 5423 5424 int md_run(struct mddev *mddev) 5425 { 5426 int err; 5427 struct md_rdev *rdev; 5428 struct md_personality *pers; 5429 5430 if (list_empty(&mddev->disks)) 5431 /* cannot run an array with no devices.. */ 5432 return -EINVAL; 5433 5434 if (mddev->pers) 5435 return -EBUSY; 5436 /* Cannot run until previous stop completes properly */ 5437 if (mddev->sysfs_active) 5438 return -EBUSY; 5439 5440 /* 5441 * Analyze all RAID superblock(s) 5442 */ 5443 if (!mddev->raid_disks) { 5444 if (!mddev->persistent) 5445 return -EINVAL; 5446 analyze_sbs(mddev); 5447 } 5448 5449 if (mddev->level != LEVEL_NONE) 5450 request_module("md-level-%d", mddev->level); 5451 else if (mddev->clevel[0]) 5452 request_module("md-%s", mddev->clevel); 5453 5454 /* 5455 * Drop all container device buffers, from now on 5456 * the only valid external interface is through the md 5457 * device. 5458 */ 5459 mddev->has_superblocks = false; 5460 rdev_for_each(rdev, mddev) { 5461 if (test_bit(Faulty, &rdev->flags)) 5462 continue; 5463 sync_blockdev(rdev->bdev); 5464 invalidate_bdev(rdev->bdev); 5465 if (mddev->ro != 1 && 5466 (bdev_read_only(rdev->bdev) || 5467 bdev_read_only(rdev->meta_bdev))) { 5468 mddev->ro = 1; 5469 if (mddev->gendisk) 5470 set_disk_ro(mddev->gendisk, 1); 5471 } 5472 5473 if (rdev->sb_page) 5474 mddev->has_superblocks = true; 5475 5476 /* perform some consistency tests on the device. 5477 * We don't want the data to overlap the metadata, 5478 * Internal Bitmap issues have been handled elsewhere. 5479 */ 5480 if (rdev->meta_bdev) { 5481 /* Nothing to check */; 5482 } else if (rdev->data_offset < rdev->sb_start) { 5483 if (mddev->dev_sectors && 5484 rdev->data_offset + mddev->dev_sectors 5485 > rdev->sb_start) { 5486 pr_warn("md: %s: data overlaps metadata\n", 5487 mdname(mddev)); 5488 return -EINVAL; 5489 } 5490 } else { 5491 if (rdev->sb_start + rdev->sb_size/512 5492 > rdev->data_offset) { 5493 pr_warn("md: %s: metadata overlaps data\n", 5494 mdname(mddev)); 5495 return -EINVAL; 5496 } 5497 } 5498 sysfs_notify_dirent_safe(rdev->sysfs_state); 5499 } 5500 5501 if (!bioset_initialized(&mddev->bio_set)) { 5502 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 5503 if (err) 5504 return err; 5505 } 5506 if (!bioset_initialized(&mddev->sync_set)) { 5507 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 5508 if (err) 5509 return err; 5510 } 5511 5512 spin_lock(&pers_lock); 5513 pers = find_pers(mddev->level, mddev->clevel); 5514 if (!pers || !try_module_get(pers->owner)) { 5515 spin_unlock(&pers_lock); 5516 if (mddev->level != LEVEL_NONE) 5517 pr_warn("md: personality for level %d is not loaded!\n", 5518 mddev->level); 5519 else 5520 pr_warn("md: personality for level %s is not loaded!\n", 5521 mddev->clevel); 5522 err = -EINVAL; 5523 goto abort; 5524 } 5525 spin_unlock(&pers_lock); 5526 if (mddev->level != pers->level) { 5527 mddev->level = pers->level; 5528 mddev->new_level = pers->level; 5529 } 5530 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 5531 5532 if (mddev->reshape_position != MaxSector && 5533 pers->start_reshape == NULL) { 5534 /* This personality cannot handle reshaping... */ 5535 module_put(pers->owner); 5536 err = -EINVAL; 5537 goto abort; 5538 } 5539 5540 if (pers->sync_request) { 5541 /* Warn if this is a potentially silly 5542 * configuration. 5543 */ 5544 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5545 struct md_rdev *rdev2; 5546 int warned = 0; 5547 5548 rdev_for_each(rdev, mddev) 5549 rdev_for_each(rdev2, mddev) { 5550 if (rdev < rdev2 && 5551 rdev->bdev->bd_contains == 5552 rdev2->bdev->bd_contains) { 5553 pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n", 5554 mdname(mddev), 5555 bdevname(rdev->bdev,b), 5556 bdevname(rdev2->bdev,b2)); 5557 warned = 1; 5558 } 5559 } 5560 5561 if (warned) 5562 pr_warn("True protection against single-disk failure might be compromised.\n"); 5563 } 5564 5565 mddev->recovery = 0; 5566 /* may be over-ridden by personality */ 5567 mddev->resync_max_sectors = mddev->dev_sectors; 5568 5569 mddev->ok_start_degraded = start_dirty_degraded; 5570 5571 if (start_readonly && mddev->ro == 0) 5572 mddev->ro = 2; /* read-only, but switch on first write */ 5573 5574 err = pers->run(mddev); 5575 if (err) 5576 pr_warn("md: pers->run() failed ...\n"); 5577 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { 5578 WARN_ONCE(!mddev->external_size, 5579 "%s: default size too small, but 'external_size' not in effect?\n", 5580 __func__); 5581 pr_warn("md: invalid array_size %llu > default size %llu\n", 5582 (unsigned long long)mddev->array_sectors / 2, 5583 (unsigned long long)pers->size(mddev, 0, 0) / 2); 5584 err = -EINVAL; 5585 } 5586 if (err == 0 && pers->sync_request && 5587 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { 5588 struct bitmap *bitmap; 5589 5590 bitmap = md_bitmap_create(mddev, -1); 5591 if (IS_ERR(bitmap)) { 5592 err = PTR_ERR(bitmap); 5593 pr_warn("%s: failed to create bitmap (%d)\n", 5594 mdname(mddev), err); 5595 } else 5596 mddev->bitmap = bitmap; 5597 5598 } 5599 if (err) { 5600 mddev_detach(mddev); 5601 if (mddev->private) 5602 pers->free(mddev, mddev->private); 5603 mddev->private = NULL; 5604 module_put(pers->owner); 5605 md_bitmap_destroy(mddev); 5606 goto abort; 5607 } 5608 if (mddev->queue) { 5609 bool nonrot = true; 5610 5611 rdev_for_each(rdev, mddev) { 5612 if (rdev->raid_disk >= 0 && 5613 !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) { 5614 nonrot = false; 5615 break; 5616 } 5617 } 5618 if (mddev->degraded) 5619 nonrot = false; 5620 if (nonrot) 5621 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); 5622 else 5623 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); 5624 mddev->queue->backing_dev_info->congested_data = mddev; 5625 mddev->queue->backing_dev_info->congested_fn = md_congested; 5626 } 5627 if (pers->sync_request) { 5628 if (mddev->kobj.sd && 5629 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 5630 pr_warn("md: cannot register extra attributes for %s\n", 5631 mdname(mddev)); 5632 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 5633 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 5634 mddev->ro = 0; 5635 5636 atomic_set(&mddev->max_corr_read_errors, 5637 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 5638 mddev->safemode = 0; 5639 if (mddev_is_clustered(mddev)) 5640 mddev->safemode_delay = 0; 5641 else 5642 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 5643 mddev->in_sync = 1; 5644 smp_wmb(); 5645 spin_lock(&mddev->lock); 5646 mddev->pers = pers; 5647 spin_unlock(&mddev->lock); 5648 rdev_for_each(rdev, mddev) 5649 if (rdev->raid_disk >= 0) 5650 if (sysfs_link_rdev(mddev, rdev)) 5651 /* failure here is OK */; 5652 5653 if (mddev->degraded && !mddev->ro) 5654 /* This ensures that recovering status is reported immediately 5655 * via sysfs - until a lack of spares is confirmed. 5656 */ 5657 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5658 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5659 5660 if (mddev->sb_flags) 5661 md_update_sb(mddev, 0); 5662 5663 md_new_event(mddev); 5664 sysfs_notify_dirent_safe(mddev->sysfs_state); 5665 sysfs_notify_dirent_safe(mddev->sysfs_action); 5666 sysfs_notify(&mddev->kobj, NULL, "degraded"); 5667 return 0; 5668 5669 abort: 5670 bioset_exit(&mddev->bio_set); 5671 bioset_exit(&mddev->sync_set); 5672 return err; 5673 } 5674 EXPORT_SYMBOL_GPL(md_run); 5675 5676 static int do_md_run(struct mddev *mddev) 5677 { 5678 int err; 5679 5680 err = md_run(mddev); 5681 if (err) 5682 goto out; 5683 err = md_bitmap_load(mddev); 5684 if (err) { 5685 md_bitmap_destroy(mddev); 5686 goto out; 5687 } 5688 5689 if (mddev_is_clustered(mddev)) 5690 md_allow_write(mddev); 5691 5692 /* run start up tasks that require md_thread */ 5693 md_start(mddev); 5694 5695 md_wakeup_thread(mddev->thread); 5696 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 5697 5698 set_capacity(mddev->gendisk, mddev->array_sectors); 5699 revalidate_disk(mddev->gendisk); 5700 mddev->changed = 1; 5701 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 5702 out: 5703 return err; 5704 } 5705 5706 int md_start(struct mddev *mddev) 5707 { 5708 int ret = 0; 5709 5710 if (mddev->pers->start) { 5711 set_bit(MD_RECOVERY_WAIT, &mddev->recovery); 5712 md_wakeup_thread(mddev->thread); 5713 ret = mddev->pers->start(mddev); 5714 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); 5715 md_wakeup_thread(mddev->sync_thread); 5716 } 5717 return ret; 5718 } 5719 EXPORT_SYMBOL_GPL(md_start); 5720 5721 static int restart_array(struct mddev *mddev) 5722 { 5723 struct gendisk *disk = mddev->gendisk; 5724 struct md_rdev *rdev; 5725 bool has_journal = false; 5726 bool has_readonly = false; 5727 5728 /* Complain if it has no devices */ 5729 if (list_empty(&mddev->disks)) 5730 return -ENXIO; 5731 if (!mddev->pers) 5732 return -EINVAL; 5733 if (!mddev->ro) 5734 return -EBUSY; 5735 5736 rcu_read_lock(); 5737 rdev_for_each_rcu(rdev, mddev) { 5738 if (test_bit(Journal, &rdev->flags) && 5739 !test_bit(Faulty, &rdev->flags)) 5740 has_journal = true; 5741 if (bdev_read_only(rdev->bdev)) 5742 has_readonly = true; 5743 } 5744 rcu_read_unlock(); 5745 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal) 5746 /* Don't restart rw with journal missing/faulty */ 5747 return -EINVAL; 5748 if (has_readonly) 5749 return -EROFS; 5750 5751 mddev->safemode = 0; 5752 mddev->ro = 0; 5753 set_disk_ro(disk, 0); 5754 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); 5755 /* Kick recovery or resync if necessary */ 5756 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5757 md_wakeup_thread(mddev->thread); 5758 md_wakeup_thread(mddev->sync_thread); 5759 sysfs_notify_dirent_safe(mddev->sysfs_state); 5760 return 0; 5761 } 5762 5763 static void md_clean(struct mddev *mddev) 5764 { 5765 mddev->array_sectors = 0; 5766 mddev->external_size = 0; 5767 mddev->dev_sectors = 0; 5768 mddev->raid_disks = 0; 5769 mddev->recovery_cp = 0; 5770 mddev->resync_min = 0; 5771 mddev->resync_max = MaxSector; 5772 mddev->reshape_position = MaxSector; 5773 mddev->external = 0; 5774 mddev->persistent = 0; 5775 mddev->level = LEVEL_NONE; 5776 mddev->clevel[0] = 0; 5777 mddev->flags = 0; 5778 mddev->sb_flags = 0; 5779 mddev->ro = 0; 5780 mddev->metadata_type[0] = 0; 5781 mddev->chunk_sectors = 0; 5782 mddev->ctime = mddev->utime = 0; 5783 mddev->layout = 0; 5784 mddev->max_disks = 0; 5785 mddev->events = 0; 5786 mddev->can_decrease_events = 0; 5787 mddev->delta_disks = 0; 5788 mddev->reshape_backwards = 0; 5789 mddev->new_level = LEVEL_NONE; 5790 mddev->new_layout = 0; 5791 mddev->new_chunk_sectors = 0; 5792 mddev->curr_resync = 0; 5793 atomic64_set(&mddev->resync_mismatches, 0); 5794 mddev->suspend_lo = mddev->suspend_hi = 0; 5795 mddev->sync_speed_min = mddev->sync_speed_max = 0; 5796 mddev->recovery = 0; 5797 mddev->in_sync = 0; 5798 mddev->changed = 0; 5799 mddev->degraded = 0; 5800 mddev->safemode = 0; 5801 mddev->private = NULL; 5802 mddev->cluster_info = NULL; 5803 mddev->bitmap_info.offset = 0; 5804 mddev->bitmap_info.default_offset = 0; 5805 mddev->bitmap_info.default_space = 0; 5806 mddev->bitmap_info.chunksize = 0; 5807 mddev->bitmap_info.daemon_sleep = 0; 5808 mddev->bitmap_info.max_write_behind = 0; 5809 mddev->bitmap_info.nodes = 0; 5810 } 5811 5812 static void __md_stop_writes(struct mddev *mddev) 5813 { 5814 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5815 flush_workqueue(md_misc_wq); 5816 if (mddev->sync_thread) { 5817 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5818 md_reap_sync_thread(mddev); 5819 } 5820 5821 del_timer_sync(&mddev->safemode_timer); 5822 5823 if (mddev->pers && mddev->pers->quiesce) { 5824 mddev->pers->quiesce(mddev, 1); 5825 mddev->pers->quiesce(mddev, 0); 5826 } 5827 md_bitmap_flush(mddev); 5828 5829 if (mddev->ro == 0 && 5830 ((!mddev->in_sync && !mddev_is_clustered(mddev)) || 5831 mddev->sb_flags)) { 5832 /* mark array as shutdown cleanly */ 5833 if (!mddev_is_clustered(mddev)) 5834 mddev->in_sync = 1; 5835 md_update_sb(mddev, 1); 5836 } 5837 } 5838 5839 void md_stop_writes(struct mddev *mddev) 5840 { 5841 mddev_lock_nointr(mddev); 5842 __md_stop_writes(mddev); 5843 mddev_unlock(mddev); 5844 } 5845 EXPORT_SYMBOL_GPL(md_stop_writes); 5846 5847 static void mddev_detach(struct mddev *mddev) 5848 { 5849 md_bitmap_wait_behind_writes(mddev); 5850 if (mddev->pers && mddev->pers->quiesce) { 5851 mddev->pers->quiesce(mddev, 1); 5852 mddev->pers->quiesce(mddev, 0); 5853 } 5854 md_unregister_thread(&mddev->thread); 5855 if (mddev->queue) 5856 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 5857 } 5858 5859 static void __md_stop(struct mddev *mddev) 5860 { 5861 struct md_personality *pers = mddev->pers; 5862 md_bitmap_destroy(mddev); 5863 mddev_detach(mddev); 5864 /* Ensure ->event_work is done */ 5865 flush_workqueue(md_misc_wq); 5866 spin_lock(&mddev->lock); 5867 mddev->pers = NULL; 5868 spin_unlock(&mddev->lock); 5869 pers->free(mddev, mddev->private); 5870 mddev->private = NULL; 5871 if (pers->sync_request && mddev->to_remove == NULL) 5872 mddev->to_remove = &md_redundancy_group; 5873 module_put(pers->owner); 5874 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5875 } 5876 5877 void md_stop(struct mddev *mddev) 5878 { 5879 /* stop the array and free an attached data structures. 5880 * This is called from dm-raid 5881 */ 5882 __md_stop(mddev); 5883 bioset_exit(&mddev->bio_set); 5884 bioset_exit(&mddev->sync_set); 5885 } 5886 5887 EXPORT_SYMBOL_GPL(md_stop); 5888 5889 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) 5890 { 5891 int err = 0; 5892 int did_freeze = 0; 5893 5894 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 5895 did_freeze = 1; 5896 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5897 md_wakeup_thread(mddev->thread); 5898 } 5899 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5900 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5901 if (mddev->sync_thread) 5902 /* Thread might be blocked waiting for metadata update 5903 * which will now never happen */ 5904 wake_up_process(mddev->sync_thread->tsk); 5905 5906 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 5907 return -EBUSY; 5908 mddev_unlock(mddev); 5909 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, 5910 &mddev->recovery)); 5911 wait_event(mddev->sb_wait, 5912 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 5913 mddev_lock_nointr(mddev); 5914 5915 mutex_lock(&mddev->open_mutex); 5916 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 5917 mddev->sync_thread || 5918 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 5919 pr_warn("md: %s still in use.\n",mdname(mddev)); 5920 if (did_freeze) { 5921 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5922 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5923 md_wakeup_thread(mddev->thread); 5924 } 5925 err = -EBUSY; 5926 goto out; 5927 } 5928 if (mddev->pers) { 5929 __md_stop_writes(mddev); 5930 5931 err = -ENXIO; 5932 if (mddev->ro==1) 5933 goto out; 5934 mddev->ro = 1; 5935 set_disk_ro(mddev->gendisk, 1); 5936 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5937 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5938 md_wakeup_thread(mddev->thread); 5939 sysfs_notify_dirent_safe(mddev->sysfs_state); 5940 err = 0; 5941 } 5942 out: 5943 mutex_unlock(&mddev->open_mutex); 5944 return err; 5945 } 5946 5947 /* mode: 5948 * 0 - completely stop and dis-assemble array 5949 * 2 - stop but do not disassemble array 5950 */ 5951 static int do_md_stop(struct mddev *mddev, int mode, 5952 struct block_device *bdev) 5953 { 5954 struct gendisk *disk = mddev->gendisk; 5955 struct md_rdev *rdev; 5956 int did_freeze = 0; 5957 5958 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 5959 did_freeze = 1; 5960 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5961 md_wakeup_thread(mddev->thread); 5962 } 5963 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5964 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5965 if (mddev->sync_thread) 5966 /* Thread might be blocked waiting for metadata update 5967 * which will now never happen */ 5968 wake_up_process(mddev->sync_thread->tsk); 5969 5970 mddev_unlock(mddev); 5971 wait_event(resync_wait, (mddev->sync_thread == NULL && 5972 !test_bit(MD_RECOVERY_RUNNING, 5973 &mddev->recovery))); 5974 mddev_lock_nointr(mddev); 5975 5976 mutex_lock(&mddev->open_mutex); 5977 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 5978 mddev->sysfs_active || 5979 mddev->sync_thread || 5980 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 5981 pr_warn("md: %s still in use.\n",mdname(mddev)); 5982 mutex_unlock(&mddev->open_mutex); 5983 if (did_freeze) { 5984 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5985 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5986 md_wakeup_thread(mddev->thread); 5987 } 5988 return -EBUSY; 5989 } 5990 if (mddev->pers) { 5991 if (mddev->ro) 5992 set_disk_ro(disk, 0); 5993 5994 __md_stop_writes(mddev); 5995 __md_stop(mddev); 5996 mddev->queue->backing_dev_info->congested_fn = NULL; 5997 5998 /* tell userspace to handle 'inactive' */ 5999 sysfs_notify_dirent_safe(mddev->sysfs_state); 6000 6001 rdev_for_each(rdev, mddev) 6002 if (rdev->raid_disk >= 0) 6003 sysfs_unlink_rdev(mddev, rdev); 6004 6005 set_capacity(disk, 0); 6006 mutex_unlock(&mddev->open_mutex); 6007 mddev->changed = 1; 6008 revalidate_disk(disk); 6009 6010 if (mddev->ro) 6011 mddev->ro = 0; 6012 } else 6013 mutex_unlock(&mddev->open_mutex); 6014 /* 6015 * Free resources if final stop 6016 */ 6017 if (mode == 0) { 6018 pr_info("md: %s stopped.\n", mdname(mddev)); 6019 6020 if (mddev->bitmap_info.file) { 6021 struct file *f = mddev->bitmap_info.file; 6022 spin_lock(&mddev->lock); 6023 mddev->bitmap_info.file = NULL; 6024 spin_unlock(&mddev->lock); 6025 fput(f); 6026 } 6027 mddev->bitmap_info.offset = 0; 6028 6029 export_array(mddev); 6030 6031 md_clean(mddev); 6032 if (mddev->hold_active == UNTIL_STOP) 6033 mddev->hold_active = 0; 6034 } 6035 md_new_event(mddev); 6036 sysfs_notify_dirent_safe(mddev->sysfs_state); 6037 return 0; 6038 } 6039 6040 #ifndef MODULE 6041 static void autorun_array(struct mddev *mddev) 6042 { 6043 struct md_rdev *rdev; 6044 int err; 6045 6046 if (list_empty(&mddev->disks)) 6047 return; 6048 6049 pr_info("md: running: "); 6050 6051 rdev_for_each(rdev, mddev) { 6052 char b[BDEVNAME_SIZE]; 6053 pr_cont("<%s>", bdevname(rdev->bdev,b)); 6054 } 6055 pr_cont("\n"); 6056 6057 err = do_md_run(mddev); 6058 if (err) { 6059 pr_warn("md: do_md_run() returned %d\n", err); 6060 do_md_stop(mddev, 0, NULL); 6061 } 6062 } 6063 6064 /* 6065 * lets try to run arrays based on all disks that have arrived 6066 * until now. (those are in pending_raid_disks) 6067 * 6068 * the method: pick the first pending disk, collect all disks with 6069 * the same UUID, remove all from the pending list and put them into 6070 * the 'same_array' list. Then order this list based on superblock 6071 * update time (freshest comes first), kick out 'old' disks and 6072 * compare superblocks. If everything's fine then run it. 6073 * 6074 * If "unit" is allocated, then bump its reference count 6075 */ 6076 static void autorun_devices(int part) 6077 { 6078 struct md_rdev *rdev0, *rdev, *tmp; 6079 struct mddev *mddev; 6080 char b[BDEVNAME_SIZE]; 6081 6082 pr_info("md: autorun ...\n"); 6083 while (!list_empty(&pending_raid_disks)) { 6084 int unit; 6085 dev_t dev; 6086 LIST_HEAD(candidates); 6087 rdev0 = list_entry(pending_raid_disks.next, 6088 struct md_rdev, same_set); 6089 6090 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b)); 6091 INIT_LIST_HEAD(&candidates); 6092 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 6093 if (super_90_load(rdev, rdev0, 0) >= 0) { 6094 pr_debug("md: adding %s ...\n", 6095 bdevname(rdev->bdev,b)); 6096 list_move(&rdev->same_set, &candidates); 6097 } 6098 /* 6099 * now we have a set of devices, with all of them having 6100 * mostly sane superblocks. It's time to allocate the 6101 * mddev. 6102 */ 6103 if (part) { 6104 dev = MKDEV(mdp_major, 6105 rdev0->preferred_minor << MdpMinorShift); 6106 unit = MINOR(dev) >> MdpMinorShift; 6107 } else { 6108 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 6109 unit = MINOR(dev); 6110 } 6111 if (rdev0->preferred_minor != unit) { 6112 pr_warn("md: unit number in %s is bad: %d\n", 6113 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 6114 break; 6115 } 6116 6117 md_probe(dev, NULL, NULL); 6118 mddev = mddev_find(dev); 6119 if (!mddev || !mddev->gendisk) { 6120 if (mddev) 6121 mddev_put(mddev); 6122 break; 6123 } 6124 if (mddev_lock(mddev)) 6125 pr_warn("md: %s locked, cannot run\n", mdname(mddev)); 6126 else if (mddev->raid_disks || mddev->major_version 6127 || !list_empty(&mddev->disks)) { 6128 pr_warn("md: %s already running, cannot run %s\n", 6129 mdname(mddev), bdevname(rdev0->bdev,b)); 6130 mddev_unlock(mddev); 6131 } else { 6132 pr_debug("md: created %s\n", mdname(mddev)); 6133 mddev->persistent = 1; 6134 rdev_for_each_list(rdev, tmp, &candidates) { 6135 list_del_init(&rdev->same_set); 6136 if (bind_rdev_to_array(rdev, mddev)) 6137 export_rdev(rdev); 6138 } 6139 autorun_array(mddev); 6140 mddev_unlock(mddev); 6141 } 6142 /* on success, candidates will be empty, on error 6143 * it won't... 6144 */ 6145 rdev_for_each_list(rdev, tmp, &candidates) { 6146 list_del_init(&rdev->same_set); 6147 export_rdev(rdev); 6148 } 6149 mddev_put(mddev); 6150 } 6151 pr_info("md: ... autorun DONE.\n"); 6152 } 6153 #endif /* !MODULE */ 6154 6155 static int get_version(void __user *arg) 6156 { 6157 mdu_version_t ver; 6158 6159 ver.major = MD_MAJOR_VERSION; 6160 ver.minor = MD_MINOR_VERSION; 6161 ver.patchlevel = MD_PATCHLEVEL_VERSION; 6162 6163 if (copy_to_user(arg, &ver, sizeof(ver))) 6164 return -EFAULT; 6165 6166 return 0; 6167 } 6168 6169 static int get_array_info(struct mddev *mddev, void __user *arg) 6170 { 6171 mdu_array_info_t info; 6172 int nr,working,insync,failed,spare; 6173 struct md_rdev *rdev; 6174 6175 nr = working = insync = failed = spare = 0; 6176 rcu_read_lock(); 6177 rdev_for_each_rcu(rdev, mddev) { 6178 nr++; 6179 if (test_bit(Faulty, &rdev->flags)) 6180 failed++; 6181 else { 6182 working++; 6183 if (test_bit(In_sync, &rdev->flags)) 6184 insync++; 6185 else if (test_bit(Journal, &rdev->flags)) 6186 /* TODO: add journal count to md_u.h */ 6187 ; 6188 else 6189 spare++; 6190 } 6191 } 6192 rcu_read_unlock(); 6193 6194 info.major_version = mddev->major_version; 6195 info.minor_version = mddev->minor_version; 6196 info.patch_version = MD_PATCHLEVEL_VERSION; 6197 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); 6198 info.level = mddev->level; 6199 info.size = mddev->dev_sectors / 2; 6200 if (info.size != mddev->dev_sectors / 2) /* overflow */ 6201 info.size = -1; 6202 info.nr_disks = nr; 6203 info.raid_disks = mddev->raid_disks; 6204 info.md_minor = mddev->md_minor; 6205 info.not_persistent= !mddev->persistent; 6206 6207 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); 6208 info.state = 0; 6209 if (mddev->in_sync) 6210 info.state = (1<<MD_SB_CLEAN); 6211 if (mddev->bitmap && mddev->bitmap_info.offset) 6212 info.state |= (1<<MD_SB_BITMAP_PRESENT); 6213 if (mddev_is_clustered(mddev)) 6214 info.state |= (1<<MD_SB_CLUSTERED); 6215 info.active_disks = insync; 6216 info.working_disks = working; 6217 info.failed_disks = failed; 6218 info.spare_disks = spare; 6219 6220 info.layout = mddev->layout; 6221 info.chunk_size = mddev->chunk_sectors << 9; 6222 6223 if (copy_to_user(arg, &info, sizeof(info))) 6224 return -EFAULT; 6225 6226 return 0; 6227 } 6228 6229 static int get_bitmap_file(struct mddev *mddev, void __user * arg) 6230 { 6231 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 6232 char *ptr; 6233 int err; 6234 6235 file = kzalloc(sizeof(*file), GFP_NOIO); 6236 if (!file) 6237 return -ENOMEM; 6238 6239 err = 0; 6240 spin_lock(&mddev->lock); 6241 /* bitmap enabled */ 6242 if (mddev->bitmap_info.file) { 6243 ptr = file_path(mddev->bitmap_info.file, file->pathname, 6244 sizeof(file->pathname)); 6245 if (IS_ERR(ptr)) 6246 err = PTR_ERR(ptr); 6247 else 6248 memmove(file->pathname, ptr, 6249 sizeof(file->pathname)-(ptr-file->pathname)); 6250 } 6251 spin_unlock(&mddev->lock); 6252 6253 if (err == 0 && 6254 copy_to_user(arg, file, sizeof(*file))) 6255 err = -EFAULT; 6256 6257 kfree(file); 6258 return err; 6259 } 6260 6261 static int get_disk_info(struct mddev *mddev, void __user * arg) 6262 { 6263 mdu_disk_info_t info; 6264 struct md_rdev *rdev; 6265 6266 if (copy_from_user(&info, arg, sizeof(info))) 6267 return -EFAULT; 6268 6269 rcu_read_lock(); 6270 rdev = md_find_rdev_nr_rcu(mddev, info.number); 6271 if (rdev) { 6272 info.major = MAJOR(rdev->bdev->bd_dev); 6273 info.minor = MINOR(rdev->bdev->bd_dev); 6274 info.raid_disk = rdev->raid_disk; 6275 info.state = 0; 6276 if (test_bit(Faulty, &rdev->flags)) 6277 info.state |= (1<<MD_DISK_FAULTY); 6278 else if (test_bit(In_sync, &rdev->flags)) { 6279 info.state |= (1<<MD_DISK_ACTIVE); 6280 info.state |= (1<<MD_DISK_SYNC); 6281 } 6282 if (test_bit(Journal, &rdev->flags)) 6283 info.state |= (1<<MD_DISK_JOURNAL); 6284 if (test_bit(WriteMostly, &rdev->flags)) 6285 info.state |= (1<<MD_DISK_WRITEMOSTLY); 6286 if (test_bit(FailFast, &rdev->flags)) 6287 info.state |= (1<<MD_DISK_FAILFAST); 6288 } else { 6289 info.major = info.minor = 0; 6290 info.raid_disk = -1; 6291 info.state = (1<<MD_DISK_REMOVED); 6292 } 6293 rcu_read_unlock(); 6294 6295 if (copy_to_user(arg, &info, sizeof(info))) 6296 return -EFAULT; 6297 6298 return 0; 6299 } 6300 6301 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) 6302 { 6303 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 6304 struct md_rdev *rdev; 6305 dev_t dev = MKDEV(info->major,info->minor); 6306 6307 if (mddev_is_clustered(mddev) && 6308 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) { 6309 pr_warn("%s: Cannot add to clustered mddev.\n", 6310 mdname(mddev)); 6311 return -EINVAL; 6312 } 6313 6314 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 6315 return -EOVERFLOW; 6316 6317 if (!mddev->raid_disks) { 6318 int err; 6319 /* expecting a device which has a superblock */ 6320 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 6321 if (IS_ERR(rdev)) { 6322 pr_warn("md: md_import_device returned %ld\n", 6323 PTR_ERR(rdev)); 6324 return PTR_ERR(rdev); 6325 } 6326 if (!list_empty(&mddev->disks)) { 6327 struct md_rdev *rdev0 6328 = list_entry(mddev->disks.next, 6329 struct md_rdev, same_set); 6330 err = super_types[mddev->major_version] 6331 .load_super(rdev, rdev0, mddev->minor_version); 6332 if (err < 0) { 6333 pr_warn("md: %s has different UUID to %s\n", 6334 bdevname(rdev->bdev,b), 6335 bdevname(rdev0->bdev,b2)); 6336 export_rdev(rdev); 6337 return -EINVAL; 6338 } 6339 } 6340 err = bind_rdev_to_array(rdev, mddev); 6341 if (err) 6342 export_rdev(rdev); 6343 return err; 6344 } 6345 6346 /* 6347 * add_new_disk can be used once the array is assembled 6348 * to add "hot spares". They must already have a superblock 6349 * written 6350 */ 6351 if (mddev->pers) { 6352 int err; 6353 if (!mddev->pers->hot_add_disk) { 6354 pr_warn("%s: personality does not support diskops!\n", 6355 mdname(mddev)); 6356 return -EINVAL; 6357 } 6358 if (mddev->persistent) 6359 rdev = md_import_device(dev, mddev->major_version, 6360 mddev->minor_version); 6361 else 6362 rdev = md_import_device(dev, -1, -1); 6363 if (IS_ERR(rdev)) { 6364 pr_warn("md: md_import_device returned %ld\n", 6365 PTR_ERR(rdev)); 6366 return PTR_ERR(rdev); 6367 } 6368 /* set saved_raid_disk if appropriate */ 6369 if (!mddev->persistent) { 6370 if (info->state & (1<<MD_DISK_SYNC) && 6371 info->raid_disk < mddev->raid_disks) { 6372 rdev->raid_disk = info->raid_disk; 6373 set_bit(In_sync, &rdev->flags); 6374 clear_bit(Bitmap_sync, &rdev->flags); 6375 } else 6376 rdev->raid_disk = -1; 6377 rdev->saved_raid_disk = rdev->raid_disk; 6378 } else 6379 super_types[mddev->major_version]. 6380 validate_super(mddev, rdev); 6381 if ((info->state & (1<<MD_DISK_SYNC)) && 6382 rdev->raid_disk != info->raid_disk) { 6383 /* This was a hot-add request, but events doesn't 6384 * match, so reject it. 6385 */ 6386 export_rdev(rdev); 6387 return -EINVAL; 6388 } 6389 6390 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 6391 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 6392 set_bit(WriteMostly, &rdev->flags); 6393 else 6394 clear_bit(WriteMostly, &rdev->flags); 6395 if (info->state & (1<<MD_DISK_FAILFAST)) 6396 set_bit(FailFast, &rdev->flags); 6397 else 6398 clear_bit(FailFast, &rdev->flags); 6399 6400 if (info->state & (1<<MD_DISK_JOURNAL)) { 6401 struct md_rdev *rdev2; 6402 bool has_journal = false; 6403 6404 /* make sure no existing journal disk */ 6405 rdev_for_each(rdev2, mddev) { 6406 if (test_bit(Journal, &rdev2->flags)) { 6407 has_journal = true; 6408 break; 6409 } 6410 } 6411 if (has_journal || mddev->bitmap) { 6412 export_rdev(rdev); 6413 return -EBUSY; 6414 } 6415 set_bit(Journal, &rdev->flags); 6416 } 6417 /* 6418 * check whether the device shows up in other nodes 6419 */ 6420 if (mddev_is_clustered(mddev)) { 6421 if (info->state & (1 << MD_DISK_CANDIDATE)) 6422 set_bit(Candidate, &rdev->flags); 6423 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) { 6424 /* --add initiated by this node */ 6425 err = md_cluster_ops->add_new_disk(mddev, rdev); 6426 if (err) { 6427 export_rdev(rdev); 6428 return err; 6429 } 6430 } 6431 } 6432 6433 rdev->raid_disk = -1; 6434 err = bind_rdev_to_array(rdev, mddev); 6435 6436 if (err) 6437 export_rdev(rdev); 6438 6439 if (mddev_is_clustered(mddev)) { 6440 if (info->state & (1 << MD_DISK_CANDIDATE)) { 6441 if (!err) { 6442 err = md_cluster_ops->new_disk_ack(mddev, 6443 err == 0); 6444 if (err) 6445 md_kick_rdev_from_array(rdev); 6446 } 6447 } else { 6448 if (err) 6449 md_cluster_ops->add_new_disk_cancel(mddev); 6450 else 6451 err = add_bound_rdev(rdev); 6452 } 6453 6454 } else if (!err) 6455 err = add_bound_rdev(rdev); 6456 6457 return err; 6458 } 6459 6460 /* otherwise, add_new_disk is only allowed 6461 * for major_version==0 superblocks 6462 */ 6463 if (mddev->major_version != 0) { 6464 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev)); 6465 return -EINVAL; 6466 } 6467 6468 if (!(info->state & (1<<MD_DISK_FAULTY))) { 6469 int err; 6470 rdev = md_import_device(dev, -1, 0); 6471 if (IS_ERR(rdev)) { 6472 pr_warn("md: error, md_import_device() returned %ld\n", 6473 PTR_ERR(rdev)); 6474 return PTR_ERR(rdev); 6475 } 6476 rdev->desc_nr = info->number; 6477 if (info->raid_disk < mddev->raid_disks) 6478 rdev->raid_disk = info->raid_disk; 6479 else 6480 rdev->raid_disk = -1; 6481 6482 if (rdev->raid_disk < mddev->raid_disks) 6483 if (info->state & (1<<MD_DISK_SYNC)) 6484 set_bit(In_sync, &rdev->flags); 6485 6486 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 6487 set_bit(WriteMostly, &rdev->flags); 6488 if (info->state & (1<<MD_DISK_FAILFAST)) 6489 set_bit(FailFast, &rdev->flags); 6490 6491 if (!mddev->persistent) { 6492 pr_debug("md: nonpersistent superblock ...\n"); 6493 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 6494 } else 6495 rdev->sb_start = calc_dev_sboffset(rdev); 6496 rdev->sectors = rdev->sb_start; 6497 6498 err = bind_rdev_to_array(rdev, mddev); 6499 if (err) { 6500 export_rdev(rdev); 6501 return err; 6502 } 6503 } 6504 6505 return 0; 6506 } 6507 6508 static int hot_remove_disk(struct mddev *mddev, dev_t dev) 6509 { 6510 char b[BDEVNAME_SIZE]; 6511 struct md_rdev *rdev; 6512 6513 if (!mddev->pers) 6514 return -ENODEV; 6515 6516 rdev = find_rdev(mddev, dev); 6517 if (!rdev) 6518 return -ENXIO; 6519 6520 if (rdev->raid_disk < 0) 6521 goto kick_rdev; 6522 6523 clear_bit(Blocked, &rdev->flags); 6524 remove_and_add_spares(mddev, rdev); 6525 6526 if (rdev->raid_disk >= 0) 6527 goto busy; 6528 6529 kick_rdev: 6530 if (mddev_is_clustered(mddev)) 6531 md_cluster_ops->remove_disk(mddev, rdev); 6532 6533 md_kick_rdev_from_array(rdev); 6534 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6535 if (mddev->thread) 6536 md_wakeup_thread(mddev->thread); 6537 else 6538 md_update_sb(mddev, 1); 6539 md_new_event(mddev); 6540 6541 return 0; 6542 busy: 6543 pr_debug("md: cannot remove active disk %s from %s ...\n", 6544 bdevname(rdev->bdev,b), mdname(mddev)); 6545 return -EBUSY; 6546 } 6547 6548 static int hot_add_disk(struct mddev *mddev, dev_t dev) 6549 { 6550 char b[BDEVNAME_SIZE]; 6551 int err; 6552 struct md_rdev *rdev; 6553 6554 if (!mddev->pers) 6555 return -ENODEV; 6556 6557 if (mddev->major_version != 0) { 6558 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n", 6559 mdname(mddev)); 6560 return -EINVAL; 6561 } 6562 if (!mddev->pers->hot_add_disk) { 6563 pr_warn("%s: personality does not support diskops!\n", 6564 mdname(mddev)); 6565 return -EINVAL; 6566 } 6567 6568 rdev = md_import_device(dev, -1, 0); 6569 if (IS_ERR(rdev)) { 6570 pr_warn("md: error, md_import_device() returned %ld\n", 6571 PTR_ERR(rdev)); 6572 return -EINVAL; 6573 } 6574 6575 if (mddev->persistent) 6576 rdev->sb_start = calc_dev_sboffset(rdev); 6577 else 6578 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 6579 6580 rdev->sectors = rdev->sb_start; 6581 6582 if (test_bit(Faulty, &rdev->flags)) { 6583 pr_warn("md: can not hot-add faulty %s disk to %s!\n", 6584 bdevname(rdev->bdev,b), mdname(mddev)); 6585 err = -EINVAL; 6586 goto abort_export; 6587 } 6588 6589 clear_bit(In_sync, &rdev->flags); 6590 rdev->desc_nr = -1; 6591 rdev->saved_raid_disk = -1; 6592 err = bind_rdev_to_array(rdev, mddev); 6593 if (err) 6594 goto abort_export; 6595 6596 /* 6597 * The rest should better be atomic, we can have disk failures 6598 * noticed in interrupt contexts ... 6599 */ 6600 6601 rdev->raid_disk = -1; 6602 6603 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6604 if (!mddev->thread) 6605 md_update_sb(mddev, 1); 6606 /* 6607 * Kick recovery, maybe this spare has to be added to the 6608 * array immediately. 6609 */ 6610 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6611 md_wakeup_thread(mddev->thread); 6612 md_new_event(mddev); 6613 return 0; 6614 6615 abort_export: 6616 export_rdev(rdev); 6617 return err; 6618 } 6619 6620 static int set_bitmap_file(struct mddev *mddev, int fd) 6621 { 6622 int err = 0; 6623 6624 if (mddev->pers) { 6625 if (!mddev->pers->quiesce || !mddev->thread) 6626 return -EBUSY; 6627 if (mddev->recovery || mddev->sync_thread) 6628 return -EBUSY; 6629 /* we should be able to change the bitmap.. */ 6630 } 6631 6632 if (fd >= 0) { 6633 struct inode *inode; 6634 struct file *f; 6635 6636 if (mddev->bitmap || mddev->bitmap_info.file) 6637 return -EEXIST; /* cannot add when bitmap is present */ 6638 f = fget(fd); 6639 6640 if (f == NULL) { 6641 pr_warn("%s: error: failed to get bitmap file\n", 6642 mdname(mddev)); 6643 return -EBADF; 6644 } 6645 6646 inode = f->f_mapping->host; 6647 if (!S_ISREG(inode->i_mode)) { 6648 pr_warn("%s: error: bitmap file must be a regular file\n", 6649 mdname(mddev)); 6650 err = -EBADF; 6651 } else if (!(f->f_mode & FMODE_WRITE)) { 6652 pr_warn("%s: error: bitmap file must open for write\n", 6653 mdname(mddev)); 6654 err = -EBADF; 6655 } else if (atomic_read(&inode->i_writecount) != 1) { 6656 pr_warn("%s: error: bitmap file is already in use\n", 6657 mdname(mddev)); 6658 err = -EBUSY; 6659 } 6660 if (err) { 6661 fput(f); 6662 return err; 6663 } 6664 mddev->bitmap_info.file = f; 6665 mddev->bitmap_info.offset = 0; /* file overrides offset */ 6666 } else if (mddev->bitmap == NULL) 6667 return -ENOENT; /* cannot remove what isn't there */ 6668 err = 0; 6669 if (mddev->pers) { 6670 if (fd >= 0) { 6671 struct bitmap *bitmap; 6672 6673 bitmap = md_bitmap_create(mddev, -1); 6674 mddev_suspend(mddev); 6675 if (!IS_ERR(bitmap)) { 6676 mddev->bitmap = bitmap; 6677 err = md_bitmap_load(mddev); 6678 } else 6679 err = PTR_ERR(bitmap); 6680 if (err) { 6681 md_bitmap_destroy(mddev); 6682 fd = -1; 6683 } 6684 mddev_resume(mddev); 6685 } else if (fd < 0) { 6686 mddev_suspend(mddev); 6687 md_bitmap_destroy(mddev); 6688 mddev_resume(mddev); 6689 } 6690 } 6691 if (fd < 0) { 6692 struct file *f = mddev->bitmap_info.file; 6693 if (f) { 6694 spin_lock(&mddev->lock); 6695 mddev->bitmap_info.file = NULL; 6696 spin_unlock(&mddev->lock); 6697 fput(f); 6698 } 6699 } 6700 6701 return err; 6702 } 6703 6704 /* 6705 * set_array_info is used two different ways 6706 * The original usage is when creating a new array. 6707 * In this usage, raid_disks is > 0 and it together with 6708 * level, size, not_persistent,layout,chunksize determine the 6709 * shape of the array. 6710 * This will always create an array with a type-0.90.0 superblock. 6711 * The newer usage is when assembling an array. 6712 * In this case raid_disks will be 0, and the major_version field is 6713 * use to determine which style super-blocks are to be found on the devices. 6714 * The minor and patch _version numbers are also kept incase the 6715 * super_block handler wishes to interpret them. 6716 */ 6717 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) 6718 { 6719 6720 if (info->raid_disks == 0) { 6721 /* just setting version number for superblock loading */ 6722 if (info->major_version < 0 || 6723 info->major_version >= ARRAY_SIZE(super_types) || 6724 super_types[info->major_version].name == NULL) { 6725 /* maybe try to auto-load a module? */ 6726 pr_warn("md: superblock version %d not known\n", 6727 info->major_version); 6728 return -EINVAL; 6729 } 6730 mddev->major_version = info->major_version; 6731 mddev->minor_version = info->minor_version; 6732 mddev->patch_version = info->patch_version; 6733 mddev->persistent = !info->not_persistent; 6734 /* ensure mddev_put doesn't delete this now that there 6735 * is some minimal configuration. 6736 */ 6737 mddev->ctime = ktime_get_real_seconds(); 6738 return 0; 6739 } 6740 mddev->major_version = MD_MAJOR_VERSION; 6741 mddev->minor_version = MD_MINOR_VERSION; 6742 mddev->patch_version = MD_PATCHLEVEL_VERSION; 6743 mddev->ctime = ktime_get_real_seconds(); 6744 6745 mddev->level = info->level; 6746 mddev->clevel[0] = 0; 6747 mddev->dev_sectors = 2 * (sector_t)info->size; 6748 mddev->raid_disks = info->raid_disks; 6749 /* don't set md_minor, it is determined by which /dev/md* was 6750 * openned 6751 */ 6752 if (info->state & (1<<MD_SB_CLEAN)) 6753 mddev->recovery_cp = MaxSector; 6754 else 6755 mddev->recovery_cp = 0; 6756 mddev->persistent = ! info->not_persistent; 6757 mddev->external = 0; 6758 6759 mddev->layout = info->layout; 6760 mddev->chunk_sectors = info->chunk_size >> 9; 6761 6762 if (mddev->persistent) { 6763 mddev->max_disks = MD_SB_DISKS; 6764 mddev->flags = 0; 6765 mddev->sb_flags = 0; 6766 } 6767 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6768 6769 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 6770 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 6771 mddev->bitmap_info.offset = 0; 6772 6773 mddev->reshape_position = MaxSector; 6774 6775 /* 6776 * Generate a 128 bit UUID 6777 */ 6778 get_random_bytes(mddev->uuid, 16); 6779 6780 mddev->new_level = mddev->level; 6781 mddev->new_chunk_sectors = mddev->chunk_sectors; 6782 mddev->new_layout = mddev->layout; 6783 mddev->delta_disks = 0; 6784 mddev->reshape_backwards = 0; 6785 6786 return 0; 6787 } 6788 6789 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) 6790 { 6791 lockdep_assert_held(&mddev->reconfig_mutex); 6792 6793 if (mddev->external_size) 6794 return; 6795 6796 mddev->array_sectors = array_sectors; 6797 } 6798 EXPORT_SYMBOL(md_set_array_sectors); 6799 6800 static int update_size(struct mddev *mddev, sector_t num_sectors) 6801 { 6802 struct md_rdev *rdev; 6803 int rv; 6804 int fit = (num_sectors == 0); 6805 sector_t old_dev_sectors = mddev->dev_sectors; 6806 6807 if (mddev->pers->resize == NULL) 6808 return -EINVAL; 6809 /* The "num_sectors" is the number of sectors of each device that 6810 * is used. This can only make sense for arrays with redundancy. 6811 * linear and raid0 always use whatever space is available. We can only 6812 * consider changing this number if no resync or reconstruction is 6813 * happening, and if the new size is acceptable. It must fit before the 6814 * sb_start or, if that is <data_offset, it must fit before the size 6815 * of each device. If num_sectors is zero, we find the largest size 6816 * that fits. 6817 */ 6818 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 6819 mddev->sync_thread) 6820 return -EBUSY; 6821 if (mddev->ro) 6822 return -EROFS; 6823 6824 rdev_for_each(rdev, mddev) { 6825 sector_t avail = rdev->sectors; 6826 6827 if (fit && (num_sectors == 0 || num_sectors > avail)) 6828 num_sectors = avail; 6829 if (avail < num_sectors) 6830 return -ENOSPC; 6831 } 6832 rv = mddev->pers->resize(mddev, num_sectors); 6833 if (!rv) { 6834 if (mddev_is_clustered(mddev)) 6835 md_cluster_ops->update_size(mddev, old_dev_sectors); 6836 else if (mddev->queue) { 6837 set_capacity(mddev->gendisk, mddev->array_sectors); 6838 revalidate_disk(mddev->gendisk); 6839 } 6840 } 6841 return rv; 6842 } 6843 6844 static int update_raid_disks(struct mddev *mddev, int raid_disks) 6845 { 6846 int rv; 6847 struct md_rdev *rdev; 6848 /* change the number of raid disks */ 6849 if (mddev->pers->check_reshape == NULL) 6850 return -EINVAL; 6851 if (mddev->ro) 6852 return -EROFS; 6853 if (raid_disks <= 0 || 6854 (mddev->max_disks && raid_disks >= mddev->max_disks)) 6855 return -EINVAL; 6856 if (mddev->sync_thread || 6857 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 6858 mddev->reshape_position != MaxSector) 6859 return -EBUSY; 6860 6861 rdev_for_each(rdev, mddev) { 6862 if (mddev->raid_disks < raid_disks && 6863 rdev->data_offset < rdev->new_data_offset) 6864 return -EINVAL; 6865 if (mddev->raid_disks > raid_disks && 6866 rdev->data_offset > rdev->new_data_offset) 6867 return -EINVAL; 6868 } 6869 6870 mddev->delta_disks = raid_disks - mddev->raid_disks; 6871 if (mddev->delta_disks < 0) 6872 mddev->reshape_backwards = 1; 6873 else if (mddev->delta_disks > 0) 6874 mddev->reshape_backwards = 0; 6875 6876 rv = mddev->pers->check_reshape(mddev); 6877 if (rv < 0) { 6878 mddev->delta_disks = 0; 6879 mddev->reshape_backwards = 0; 6880 } 6881 return rv; 6882 } 6883 6884 /* 6885 * update_array_info is used to change the configuration of an 6886 * on-line array. 6887 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 6888 * fields in the info are checked against the array. 6889 * Any differences that cannot be handled will cause an error. 6890 * Normally, only one change can be managed at a time. 6891 */ 6892 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) 6893 { 6894 int rv = 0; 6895 int cnt = 0; 6896 int state = 0; 6897 6898 /* calculate expected state,ignoring low bits */ 6899 if (mddev->bitmap && mddev->bitmap_info.offset) 6900 state |= (1 << MD_SB_BITMAP_PRESENT); 6901 6902 if (mddev->major_version != info->major_version || 6903 mddev->minor_version != info->minor_version || 6904 /* mddev->patch_version != info->patch_version || */ 6905 mddev->ctime != info->ctime || 6906 mddev->level != info->level || 6907 /* mddev->layout != info->layout || */ 6908 mddev->persistent != !info->not_persistent || 6909 mddev->chunk_sectors != info->chunk_size >> 9 || 6910 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 6911 ((state^info->state) & 0xfffffe00) 6912 ) 6913 return -EINVAL; 6914 /* Check there is only one change */ 6915 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 6916 cnt++; 6917 if (mddev->raid_disks != info->raid_disks) 6918 cnt++; 6919 if (mddev->layout != info->layout) 6920 cnt++; 6921 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 6922 cnt++; 6923 if (cnt == 0) 6924 return 0; 6925 if (cnt > 1) 6926 return -EINVAL; 6927 6928 if (mddev->layout != info->layout) { 6929 /* Change layout 6930 * we don't need to do anything at the md level, the 6931 * personality will take care of it all. 6932 */ 6933 if (mddev->pers->check_reshape == NULL) 6934 return -EINVAL; 6935 else { 6936 mddev->new_layout = info->layout; 6937 rv = mddev->pers->check_reshape(mddev); 6938 if (rv) 6939 mddev->new_layout = mddev->layout; 6940 return rv; 6941 } 6942 } 6943 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 6944 rv = update_size(mddev, (sector_t)info->size * 2); 6945 6946 if (mddev->raid_disks != info->raid_disks) 6947 rv = update_raid_disks(mddev, info->raid_disks); 6948 6949 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 6950 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { 6951 rv = -EINVAL; 6952 goto err; 6953 } 6954 if (mddev->recovery || mddev->sync_thread) { 6955 rv = -EBUSY; 6956 goto err; 6957 } 6958 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 6959 struct bitmap *bitmap; 6960 /* add the bitmap */ 6961 if (mddev->bitmap) { 6962 rv = -EEXIST; 6963 goto err; 6964 } 6965 if (mddev->bitmap_info.default_offset == 0) { 6966 rv = -EINVAL; 6967 goto err; 6968 } 6969 mddev->bitmap_info.offset = 6970 mddev->bitmap_info.default_offset; 6971 mddev->bitmap_info.space = 6972 mddev->bitmap_info.default_space; 6973 bitmap = md_bitmap_create(mddev, -1); 6974 mddev_suspend(mddev); 6975 if (!IS_ERR(bitmap)) { 6976 mddev->bitmap = bitmap; 6977 rv = md_bitmap_load(mddev); 6978 } else 6979 rv = PTR_ERR(bitmap); 6980 if (rv) 6981 md_bitmap_destroy(mddev); 6982 mddev_resume(mddev); 6983 } else { 6984 /* remove the bitmap */ 6985 if (!mddev->bitmap) { 6986 rv = -ENOENT; 6987 goto err; 6988 } 6989 if (mddev->bitmap->storage.file) { 6990 rv = -EINVAL; 6991 goto err; 6992 } 6993 if (mddev->bitmap_info.nodes) { 6994 /* hold PW on all the bitmap lock */ 6995 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) { 6996 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n"); 6997 rv = -EPERM; 6998 md_cluster_ops->unlock_all_bitmaps(mddev); 6999 goto err; 7000 } 7001 7002 mddev->bitmap_info.nodes = 0; 7003 md_cluster_ops->leave(mddev); 7004 } 7005 mddev_suspend(mddev); 7006 md_bitmap_destroy(mddev); 7007 mddev_resume(mddev); 7008 mddev->bitmap_info.offset = 0; 7009 } 7010 } 7011 md_update_sb(mddev, 1); 7012 return rv; 7013 err: 7014 return rv; 7015 } 7016 7017 static int set_disk_faulty(struct mddev *mddev, dev_t dev) 7018 { 7019 struct md_rdev *rdev; 7020 int err = 0; 7021 7022 if (mddev->pers == NULL) 7023 return -ENODEV; 7024 7025 rcu_read_lock(); 7026 rdev = md_find_rdev_rcu(mddev, dev); 7027 if (!rdev) 7028 err = -ENODEV; 7029 else { 7030 md_error(mddev, rdev); 7031 if (!test_bit(Faulty, &rdev->flags)) 7032 err = -EBUSY; 7033 } 7034 rcu_read_unlock(); 7035 return err; 7036 } 7037 7038 /* 7039 * We have a problem here : there is no easy way to give a CHS 7040 * virtual geometry. We currently pretend that we have a 2 heads 7041 * 4 sectors (with a BIG number of cylinders...). This drives 7042 * dosfs just mad... ;-) 7043 */ 7044 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 7045 { 7046 struct mddev *mddev = bdev->bd_disk->private_data; 7047 7048 geo->heads = 2; 7049 geo->sectors = 4; 7050 geo->cylinders = mddev->array_sectors / 8; 7051 return 0; 7052 } 7053 7054 static inline bool md_ioctl_valid(unsigned int cmd) 7055 { 7056 switch (cmd) { 7057 case ADD_NEW_DISK: 7058 case BLKROSET: 7059 case GET_ARRAY_INFO: 7060 case GET_BITMAP_FILE: 7061 case GET_DISK_INFO: 7062 case HOT_ADD_DISK: 7063 case HOT_REMOVE_DISK: 7064 case RAID_AUTORUN: 7065 case RAID_VERSION: 7066 case RESTART_ARRAY_RW: 7067 case RUN_ARRAY: 7068 case SET_ARRAY_INFO: 7069 case SET_BITMAP_FILE: 7070 case SET_DISK_FAULTY: 7071 case STOP_ARRAY: 7072 case STOP_ARRAY_RO: 7073 case CLUSTERED_DISK_NACK: 7074 return true; 7075 default: 7076 return false; 7077 } 7078 } 7079 7080 static int md_ioctl(struct block_device *bdev, fmode_t mode, 7081 unsigned int cmd, unsigned long arg) 7082 { 7083 int err = 0; 7084 void __user *argp = (void __user *)arg; 7085 struct mddev *mddev = NULL; 7086 int ro; 7087 bool did_set_md_closing = false; 7088 7089 if (!md_ioctl_valid(cmd)) 7090 return -ENOTTY; 7091 7092 switch (cmd) { 7093 case RAID_VERSION: 7094 case GET_ARRAY_INFO: 7095 case GET_DISK_INFO: 7096 break; 7097 default: 7098 if (!capable(CAP_SYS_ADMIN)) 7099 return -EACCES; 7100 } 7101 7102 /* 7103 * Commands dealing with the RAID driver but not any 7104 * particular array: 7105 */ 7106 switch (cmd) { 7107 case RAID_VERSION: 7108 err = get_version(argp); 7109 goto out; 7110 7111 #ifndef MODULE 7112 case RAID_AUTORUN: 7113 err = 0; 7114 autostart_arrays(arg); 7115 goto out; 7116 #endif 7117 default:; 7118 } 7119 7120 /* 7121 * Commands creating/starting a new array: 7122 */ 7123 7124 mddev = bdev->bd_disk->private_data; 7125 7126 if (!mddev) { 7127 BUG(); 7128 goto out; 7129 } 7130 7131 /* Some actions do not requires the mutex */ 7132 switch (cmd) { 7133 case GET_ARRAY_INFO: 7134 if (!mddev->raid_disks && !mddev->external) 7135 err = -ENODEV; 7136 else 7137 err = get_array_info(mddev, argp); 7138 goto out; 7139 7140 case GET_DISK_INFO: 7141 if (!mddev->raid_disks && !mddev->external) 7142 err = -ENODEV; 7143 else 7144 err = get_disk_info(mddev, argp); 7145 goto out; 7146 7147 case SET_DISK_FAULTY: 7148 err = set_disk_faulty(mddev, new_decode_dev(arg)); 7149 goto out; 7150 7151 case GET_BITMAP_FILE: 7152 err = get_bitmap_file(mddev, argp); 7153 goto out; 7154 7155 } 7156 7157 if (cmd == ADD_NEW_DISK) 7158 /* need to ensure md_delayed_delete() has completed */ 7159 flush_workqueue(md_misc_wq); 7160 7161 if (cmd == HOT_REMOVE_DISK) 7162 /* need to ensure recovery thread has run */ 7163 wait_event_interruptible_timeout(mddev->sb_wait, 7164 !test_bit(MD_RECOVERY_NEEDED, 7165 &mddev->recovery), 7166 msecs_to_jiffies(5000)); 7167 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { 7168 /* Need to flush page cache, and ensure no-one else opens 7169 * and writes 7170 */ 7171 mutex_lock(&mddev->open_mutex); 7172 if (mddev->pers && atomic_read(&mddev->openers) > 1) { 7173 mutex_unlock(&mddev->open_mutex); 7174 err = -EBUSY; 7175 goto out; 7176 } 7177 WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags)); 7178 set_bit(MD_CLOSING, &mddev->flags); 7179 did_set_md_closing = true; 7180 mutex_unlock(&mddev->open_mutex); 7181 sync_blockdev(bdev); 7182 } 7183 err = mddev_lock(mddev); 7184 if (err) { 7185 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n", 7186 err, cmd); 7187 goto out; 7188 } 7189 7190 if (cmd == SET_ARRAY_INFO) { 7191 mdu_array_info_t info; 7192 if (!arg) 7193 memset(&info, 0, sizeof(info)); 7194 else if (copy_from_user(&info, argp, sizeof(info))) { 7195 err = -EFAULT; 7196 goto unlock; 7197 } 7198 if (mddev->pers) { 7199 err = update_array_info(mddev, &info); 7200 if (err) { 7201 pr_warn("md: couldn't update array info. %d\n", err); 7202 goto unlock; 7203 } 7204 goto unlock; 7205 } 7206 if (!list_empty(&mddev->disks)) { 7207 pr_warn("md: array %s already has disks!\n", mdname(mddev)); 7208 err = -EBUSY; 7209 goto unlock; 7210 } 7211 if (mddev->raid_disks) { 7212 pr_warn("md: array %s already initialised!\n", mdname(mddev)); 7213 err = -EBUSY; 7214 goto unlock; 7215 } 7216 err = set_array_info(mddev, &info); 7217 if (err) { 7218 pr_warn("md: couldn't set array info. %d\n", err); 7219 goto unlock; 7220 } 7221 goto unlock; 7222 } 7223 7224 /* 7225 * Commands querying/configuring an existing array: 7226 */ 7227 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 7228 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 7229 if ((!mddev->raid_disks && !mddev->external) 7230 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 7231 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 7232 && cmd != GET_BITMAP_FILE) { 7233 err = -ENODEV; 7234 goto unlock; 7235 } 7236 7237 /* 7238 * Commands even a read-only array can execute: 7239 */ 7240 switch (cmd) { 7241 case RESTART_ARRAY_RW: 7242 err = restart_array(mddev); 7243 goto unlock; 7244 7245 case STOP_ARRAY: 7246 err = do_md_stop(mddev, 0, bdev); 7247 goto unlock; 7248 7249 case STOP_ARRAY_RO: 7250 err = md_set_readonly(mddev, bdev); 7251 goto unlock; 7252 7253 case HOT_REMOVE_DISK: 7254 err = hot_remove_disk(mddev, new_decode_dev(arg)); 7255 goto unlock; 7256 7257 case ADD_NEW_DISK: 7258 /* We can support ADD_NEW_DISK on read-only arrays 7259 * only if we are re-adding a preexisting device. 7260 * So require mddev->pers and MD_DISK_SYNC. 7261 */ 7262 if (mddev->pers) { 7263 mdu_disk_info_t info; 7264 if (copy_from_user(&info, argp, sizeof(info))) 7265 err = -EFAULT; 7266 else if (!(info.state & (1<<MD_DISK_SYNC))) 7267 /* Need to clear read-only for this */ 7268 break; 7269 else 7270 err = add_new_disk(mddev, &info); 7271 goto unlock; 7272 } 7273 break; 7274 7275 case BLKROSET: 7276 if (get_user(ro, (int __user *)(arg))) { 7277 err = -EFAULT; 7278 goto unlock; 7279 } 7280 err = -EINVAL; 7281 7282 /* if the bdev is going readonly the value of mddev->ro 7283 * does not matter, no writes are coming 7284 */ 7285 if (ro) 7286 goto unlock; 7287 7288 /* are we are already prepared for writes? */ 7289 if (mddev->ro != 1) 7290 goto unlock; 7291 7292 /* transitioning to readauto need only happen for 7293 * arrays that call md_write_start 7294 */ 7295 if (mddev->pers) { 7296 err = restart_array(mddev); 7297 if (err == 0) { 7298 mddev->ro = 2; 7299 set_disk_ro(mddev->gendisk, 0); 7300 } 7301 } 7302 goto unlock; 7303 } 7304 7305 /* 7306 * The remaining ioctls are changing the state of the 7307 * superblock, so we do not allow them on read-only arrays. 7308 */ 7309 if (mddev->ro && mddev->pers) { 7310 if (mddev->ro == 2) { 7311 mddev->ro = 0; 7312 sysfs_notify_dirent_safe(mddev->sysfs_state); 7313 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7314 /* mddev_unlock will wake thread */ 7315 /* If a device failed while we were read-only, we 7316 * need to make sure the metadata is updated now. 7317 */ 7318 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { 7319 mddev_unlock(mddev); 7320 wait_event(mddev->sb_wait, 7321 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && 7322 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 7323 mddev_lock_nointr(mddev); 7324 } 7325 } else { 7326 err = -EROFS; 7327 goto unlock; 7328 } 7329 } 7330 7331 switch (cmd) { 7332 case ADD_NEW_DISK: 7333 { 7334 mdu_disk_info_t info; 7335 if (copy_from_user(&info, argp, sizeof(info))) 7336 err = -EFAULT; 7337 else 7338 err = add_new_disk(mddev, &info); 7339 goto unlock; 7340 } 7341 7342 case CLUSTERED_DISK_NACK: 7343 if (mddev_is_clustered(mddev)) 7344 md_cluster_ops->new_disk_ack(mddev, false); 7345 else 7346 err = -EINVAL; 7347 goto unlock; 7348 7349 case HOT_ADD_DISK: 7350 err = hot_add_disk(mddev, new_decode_dev(arg)); 7351 goto unlock; 7352 7353 case RUN_ARRAY: 7354 err = do_md_run(mddev); 7355 goto unlock; 7356 7357 case SET_BITMAP_FILE: 7358 err = set_bitmap_file(mddev, (int)arg); 7359 goto unlock; 7360 7361 default: 7362 err = -EINVAL; 7363 goto unlock; 7364 } 7365 7366 unlock: 7367 if (mddev->hold_active == UNTIL_IOCTL && 7368 err != -EINVAL) 7369 mddev->hold_active = 0; 7370 mddev_unlock(mddev); 7371 out: 7372 if(did_set_md_closing) 7373 clear_bit(MD_CLOSING, &mddev->flags); 7374 return err; 7375 } 7376 #ifdef CONFIG_COMPAT 7377 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, 7378 unsigned int cmd, unsigned long arg) 7379 { 7380 switch (cmd) { 7381 case HOT_REMOVE_DISK: 7382 case HOT_ADD_DISK: 7383 case SET_DISK_FAULTY: 7384 case SET_BITMAP_FILE: 7385 /* These take in integer arg, do not convert */ 7386 break; 7387 default: 7388 arg = (unsigned long)compat_ptr(arg); 7389 break; 7390 } 7391 7392 return md_ioctl(bdev, mode, cmd, arg); 7393 } 7394 #endif /* CONFIG_COMPAT */ 7395 7396 static int md_open(struct block_device *bdev, fmode_t mode) 7397 { 7398 /* 7399 * Succeed if we can lock the mddev, which confirms that 7400 * it isn't being stopped right now. 7401 */ 7402 struct mddev *mddev = mddev_find(bdev->bd_dev); 7403 int err; 7404 7405 if (!mddev) 7406 return -ENODEV; 7407 7408 if (mddev->gendisk != bdev->bd_disk) { 7409 /* we are racing with mddev_put which is discarding this 7410 * bd_disk. 7411 */ 7412 mddev_put(mddev); 7413 /* Wait until bdev->bd_disk is definitely gone */ 7414 flush_workqueue(md_misc_wq); 7415 /* Then retry the open from the top */ 7416 return -ERESTARTSYS; 7417 } 7418 BUG_ON(mddev != bdev->bd_disk->private_data); 7419 7420 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) 7421 goto out; 7422 7423 if (test_bit(MD_CLOSING, &mddev->flags)) { 7424 mutex_unlock(&mddev->open_mutex); 7425 err = -ENODEV; 7426 goto out; 7427 } 7428 7429 err = 0; 7430 atomic_inc(&mddev->openers); 7431 mutex_unlock(&mddev->open_mutex); 7432 7433 check_disk_change(bdev); 7434 out: 7435 if (err) 7436 mddev_put(mddev); 7437 return err; 7438 } 7439 7440 static void md_release(struct gendisk *disk, fmode_t mode) 7441 { 7442 struct mddev *mddev = disk->private_data; 7443 7444 BUG_ON(!mddev); 7445 atomic_dec(&mddev->openers); 7446 mddev_put(mddev); 7447 } 7448 7449 static int md_media_changed(struct gendisk *disk) 7450 { 7451 struct mddev *mddev = disk->private_data; 7452 7453 return mddev->changed; 7454 } 7455 7456 static int md_revalidate(struct gendisk *disk) 7457 { 7458 struct mddev *mddev = disk->private_data; 7459 7460 mddev->changed = 0; 7461 return 0; 7462 } 7463 static const struct block_device_operations md_fops = 7464 { 7465 .owner = THIS_MODULE, 7466 .open = md_open, 7467 .release = md_release, 7468 .ioctl = md_ioctl, 7469 #ifdef CONFIG_COMPAT 7470 .compat_ioctl = md_compat_ioctl, 7471 #endif 7472 .getgeo = md_getgeo, 7473 .media_changed = md_media_changed, 7474 .revalidate_disk= md_revalidate, 7475 }; 7476 7477 static int md_thread(void *arg) 7478 { 7479 struct md_thread *thread = arg; 7480 7481 /* 7482 * md_thread is a 'system-thread', it's priority should be very 7483 * high. We avoid resource deadlocks individually in each 7484 * raid personality. (RAID5 does preallocation) We also use RR and 7485 * the very same RT priority as kswapd, thus we will never get 7486 * into a priority inversion deadlock. 7487 * 7488 * we definitely have to have equal or higher priority than 7489 * bdflush, otherwise bdflush will deadlock if there are too 7490 * many dirty RAID5 blocks. 7491 */ 7492 7493 allow_signal(SIGKILL); 7494 while (!kthread_should_stop()) { 7495 7496 /* We need to wait INTERRUPTIBLE so that 7497 * we don't add to the load-average. 7498 * That means we need to be sure no signals are 7499 * pending 7500 */ 7501 if (signal_pending(current)) 7502 flush_signals(current); 7503 7504 wait_event_interruptible_timeout 7505 (thread->wqueue, 7506 test_bit(THREAD_WAKEUP, &thread->flags) 7507 || kthread_should_stop() || kthread_should_park(), 7508 thread->timeout); 7509 7510 clear_bit(THREAD_WAKEUP, &thread->flags); 7511 if (kthread_should_park()) 7512 kthread_parkme(); 7513 if (!kthread_should_stop()) 7514 thread->run(thread); 7515 } 7516 7517 return 0; 7518 } 7519 7520 void md_wakeup_thread(struct md_thread *thread) 7521 { 7522 if (thread) { 7523 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); 7524 set_bit(THREAD_WAKEUP, &thread->flags); 7525 wake_up(&thread->wqueue); 7526 } 7527 } 7528 EXPORT_SYMBOL(md_wakeup_thread); 7529 7530 struct md_thread *md_register_thread(void (*run) (struct md_thread *), 7531 struct mddev *mddev, const char *name) 7532 { 7533 struct md_thread *thread; 7534 7535 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL); 7536 if (!thread) 7537 return NULL; 7538 7539 init_waitqueue_head(&thread->wqueue); 7540 7541 thread->run = run; 7542 thread->mddev = mddev; 7543 thread->timeout = MAX_SCHEDULE_TIMEOUT; 7544 thread->tsk = kthread_run(md_thread, thread, 7545 "%s_%s", 7546 mdname(thread->mddev), 7547 name); 7548 if (IS_ERR(thread->tsk)) { 7549 kfree(thread); 7550 return NULL; 7551 } 7552 return thread; 7553 } 7554 EXPORT_SYMBOL(md_register_thread); 7555 7556 void md_unregister_thread(struct md_thread **threadp) 7557 { 7558 struct md_thread *thread = *threadp; 7559 if (!thread) 7560 return; 7561 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 7562 /* Locking ensures that mddev_unlock does not wake_up a 7563 * non-existent thread 7564 */ 7565 spin_lock(&pers_lock); 7566 *threadp = NULL; 7567 spin_unlock(&pers_lock); 7568 7569 kthread_stop(thread->tsk); 7570 kfree(thread); 7571 } 7572 EXPORT_SYMBOL(md_unregister_thread); 7573 7574 void md_error(struct mddev *mddev, struct md_rdev *rdev) 7575 { 7576 if (!rdev || test_bit(Faulty, &rdev->flags)) 7577 return; 7578 7579 if (!mddev->pers || !mddev->pers->error_handler) 7580 return; 7581 mddev->pers->error_handler(mddev,rdev); 7582 if (mddev->degraded) 7583 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7584 sysfs_notify_dirent_safe(rdev->sysfs_state); 7585 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7586 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7587 md_wakeup_thread(mddev->thread); 7588 if (mddev->event_work.func) 7589 queue_work(md_misc_wq, &mddev->event_work); 7590 md_new_event(mddev); 7591 } 7592 EXPORT_SYMBOL(md_error); 7593 7594 /* seq_file implementation /proc/mdstat */ 7595 7596 static void status_unused(struct seq_file *seq) 7597 { 7598 int i = 0; 7599 struct md_rdev *rdev; 7600 7601 seq_printf(seq, "unused devices: "); 7602 7603 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 7604 char b[BDEVNAME_SIZE]; 7605 i++; 7606 seq_printf(seq, "%s ", 7607 bdevname(rdev->bdev,b)); 7608 } 7609 if (!i) 7610 seq_printf(seq, "<none>"); 7611 7612 seq_printf(seq, "\n"); 7613 } 7614 7615 static int status_resync(struct seq_file *seq, struct mddev *mddev) 7616 { 7617 sector_t max_sectors, resync, res; 7618 unsigned long dt, db; 7619 sector_t rt; 7620 int scale; 7621 unsigned int per_milli; 7622 7623 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 7624 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7625 max_sectors = mddev->resync_max_sectors; 7626 else 7627 max_sectors = mddev->dev_sectors; 7628 7629 resync = mddev->curr_resync; 7630 if (resync <= 3) { 7631 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7632 /* Still cleaning up */ 7633 resync = max_sectors; 7634 } else if (resync > max_sectors) 7635 resync = max_sectors; 7636 else 7637 resync -= atomic_read(&mddev->recovery_active); 7638 7639 if (resync == 0) { 7640 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { 7641 struct md_rdev *rdev; 7642 7643 rdev_for_each(rdev, mddev) 7644 if (rdev->raid_disk >= 0 && 7645 !test_bit(Faulty, &rdev->flags) && 7646 rdev->recovery_offset != MaxSector && 7647 rdev->recovery_offset) { 7648 seq_printf(seq, "\trecover=REMOTE"); 7649 return 1; 7650 } 7651 if (mddev->reshape_position != MaxSector) 7652 seq_printf(seq, "\treshape=REMOTE"); 7653 else 7654 seq_printf(seq, "\tresync=REMOTE"); 7655 return 1; 7656 } 7657 if (mddev->recovery_cp < MaxSector) { 7658 seq_printf(seq, "\tresync=PENDING"); 7659 return 1; 7660 } 7661 return 0; 7662 } 7663 if (resync < 3) { 7664 seq_printf(seq, "\tresync=DELAYED"); 7665 return 1; 7666 } 7667 7668 WARN_ON(max_sectors == 0); 7669 /* Pick 'scale' such that (resync>>scale)*1000 will fit 7670 * in a sector_t, and (max_sectors>>scale) will fit in a 7671 * u32, as those are the requirements for sector_div. 7672 * Thus 'scale' must be at least 10 7673 */ 7674 scale = 10; 7675 if (sizeof(sector_t) > sizeof(unsigned long)) { 7676 while ( max_sectors/2 > (1ULL<<(scale+32))) 7677 scale++; 7678 } 7679 res = (resync>>scale)*1000; 7680 sector_div(res, (u32)((max_sectors>>scale)+1)); 7681 7682 per_milli = res; 7683 { 7684 int i, x = per_milli/50, y = 20-x; 7685 seq_printf(seq, "["); 7686 for (i = 0; i < x; i++) 7687 seq_printf(seq, "="); 7688 seq_printf(seq, ">"); 7689 for (i = 0; i < y; i++) 7690 seq_printf(seq, "."); 7691 seq_printf(seq, "] "); 7692 } 7693 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 7694 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 7695 "reshape" : 7696 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 7697 "check" : 7698 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 7699 "resync" : "recovery"))), 7700 per_milli/10, per_milli % 10, 7701 (unsigned long long) resync/2, 7702 (unsigned long long) max_sectors/2); 7703 7704 /* 7705 * dt: time from mark until now 7706 * db: blocks written from mark until now 7707 * rt: remaining time 7708 * 7709 * rt is a sector_t, so could be 32bit or 64bit. 7710 * So we divide before multiply in case it is 32bit and close 7711 * to the limit. 7712 * We scale the divisor (db) by 32 to avoid losing precision 7713 * near the end of resync when the number of remaining sectors 7714 * is close to 'db'. 7715 * We then divide rt by 32 after multiplying by db to compensate. 7716 * The '+1' avoids division by zero if db is very small. 7717 */ 7718 dt = ((jiffies - mddev->resync_mark) / HZ); 7719 if (!dt) dt++; 7720 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) 7721 - mddev->resync_mark_cnt; 7722 7723 rt = max_sectors - resync; /* number of remaining sectors */ 7724 sector_div(rt, db/32+1); 7725 rt *= dt; 7726 rt >>= 5; 7727 7728 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 7729 ((unsigned long)rt % 60)/6); 7730 7731 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 7732 return 1; 7733 } 7734 7735 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 7736 { 7737 struct list_head *tmp; 7738 loff_t l = *pos; 7739 struct mddev *mddev; 7740 7741 if (l >= 0x10000) 7742 return NULL; 7743 if (!l--) 7744 /* header */ 7745 return (void*)1; 7746 7747 spin_lock(&all_mddevs_lock); 7748 list_for_each(tmp,&all_mddevs) 7749 if (!l--) { 7750 mddev = list_entry(tmp, struct mddev, all_mddevs); 7751 mddev_get(mddev); 7752 spin_unlock(&all_mddevs_lock); 7753 return mddev; 7754 } 7755 spin_unlock(&all_mddevs_lock); 7756 if (!l--) 7757 return (void*)2;/* tail */ 7758 return NULL; 7759 } 7760 7761 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 7762 { 7763 struct list_head *tmp; 7764 struct mddev *next_mddev, *mddev = v; 7765 7766 ++*pos; 7767 if (v == (void*)2) 7768 return NULL; 7769 7770 spin_lock(&all_mddevs_lock); 7771 if (v == (void*)1) 7772 tmp = all_mddevs.next; 7773 else 7774 tmp = mddev->all_mddevs.next; 7775 if (tmp != &all_mddevs) 7776 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs)); 7777 else { 7778 next_mddev = (void*)2; 7779 *pos = 0x10000; 7780 } 7781 spin_unlock(&all_mddevs_lock); 7782 7783 if (v != (void*)1) 7784 mddev_put(mddev); 7785 return next_mddev; 7786 7787 } 7788 7789 static void md_seq_stop(struct seq_file *seq, void *v) 7790 { 7791 struct mddev *mddev = v; 7792 7793 if (mddev && v != (void*)1 && v != (void*)2) 7794 mddev_put(mddev); 7795 } 7796 7797 static int md_seq_show(struct seq_file *seq, void *v) 7798 { 7799 struct mddev *mddev = v; 7800 sector_t sectors; 7801 struct md_rdev *rdev; 7802 7803 if (v == (void*)1) { 7804 struct md_personality *pers; 7805 seq_printf(seq, "Personalities : "); 7806 spin_lock(&pers_lock); 7807 list_for_each_entry(pers, &pers_list, list) 7808 seq_printf(seq, "[%s] ", pers->name); 7809 7810 spin_unlock(&pers_lock); 7811 seq_printf(seq, "\n"); 7812 seq->poll_event = atomic_read(&md_event_count); 7813 return 0; 7814 } 7815 if (v == (void*)2) { 7816 status_unused(seq); 7817 return 0; 7818 } 7819 7820 spin_lock(&mddev->lock); 7821 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 7822 seq_printf(seq, "%s : %sactive", mdname(mddev), 7823 mddev->pers ? "" : "in"); 7824 if (mddev->pers) { 7825 if (mddev->ro==1) 7826 seq_printf(seq, " (read-only)"); 7827 if (mddev->ro==2) 7828 seq_printf(seq, " (auto-read-only)"); 7829 seq_printf(seq, " %s", mddev->pers->name); 7830 } 7831 7832 sectors = 0; 7833 rcu_read_lock(); 7834 rdev_for_each_rcu(rdev, mddev) { 7835 char b[BDEVNAME_SIZE]; 7836 seq_printf(seq, " %s[%d]", 7837 bdevname(rdev->bdev,b), rdev->desc_nr); 7838 if (test_bit(WriteMostly, &rdev->flags)) 7839 seq_printf(seq, "(W)"); 7840 if (test_bit(Journal, &rdev->flags)) 7841 seq_printf(seq, "(J)"); 7842 if (test_bit(Faulty, &rdev->flags)) { 7843 seq_printf(seq, "(F)"); 7844 continue; 7845 } 7846 if (rdev->raid_disk < 0) 7847 seq_printf(seq, "(S)"); /* spare */ 7848 if (test_bit(Replacement, &rdev->flags)) 7849 seq_printf(seq, "(R)"); 7850 sectors += rdev->sectors; 7851 } 7852 rcu_read_unlock(); 7853 7854 if (!list_empty(&mddev->disks)) { 7855 if (mddev->pers) 7856 seq_printf(seq, "\n %llu blocks", 7857 (unsigned long long) 7858 mddev->array_sectors / 2); 7859 else 7860 seq_printf(seq, "\n %llu blocks", 7861 (unsigned long long)sectors / 2); 7862 } 7863 if (mddev->persistent) { 7864 if (mddev->major_version != 0 || 7865 mddev->minor_version != 90) { 7866 seq_printf(seq," super %d.%d", 7867 mddev->major_version, 7868 mddev->minor_version); 7869 } 7870 } else if (mddev->external) 7871 seq_printf(seq, " super external:%s", 7872 mddev->metadata_type); 7873 else 7874 seq_printf(seq, " super non-persistent"); 7875 7876 if (mddev->pers) { 7877 mddev->pers->status(seq, mddev); 7878 seq_printf(seq, "\n "); 7879 if (mddev->pers->sync_request) { 7880 if (status_resync(seq, mddev)) 7881 seq_printf(seq, "\n "); 7882 } 7883 } else 7884 seq_printf(seq, "\n "); 7885 7886 md_bitmap_status(seq, mddev->bitmap); 7887 7888 seq_printf(seq, "\n"); 7889 } 7890 spin_unlock(&mddev->lock); 7891 7892 return 0; 7893 } 7894 7895 static const struct seq_operations md_seq_ops = { 7896 .start = md_seq_start, 7897 .next = md_seq_next, 7898 .stop = md_seq_stop, 7899 .show = md_seq_show, 7900 }; 7901 7902 static int md_seq_open(struct inode *inode, struct file *file) 7903 { 7904 struct seq_file *seq; 7905 int error; 7906 7907 error = seq_open(file, &md_seq_ops); 7908 if (error) 7909 return error; 7910 7911 seq = file->private_data; 7912 seq->poll_event = atomic_read(&md_event_count); 7913 return error; 7914 } 7915 7916 static int md_unloading; 7917 static __poll_t mdstat_poll(struct file *filp, poll_table *wait) 7918 { 7919 struct seq_file *seq = filp->private_data; 7920 __poll_t mask; 7921 7922 if (md_unloading) 7923 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; 7924 poll_wait(filp, &md_event_waiters, wait); 7925 7926 /* always allow read */ 7927 mask = EPOLLIN | EPOLLRDNORM; 7928 7929 if (seq->poll_event != atomic_read(&md_event_count)) 7930 mask |= EPOLLERR | EPOLLPRI; 7931 return mask; 7932 } 7933 7934 static const struct file_operations md_seq_fops = { 7935 .owner = THIS_MODULE, 7936 .open = md_seq_open, 7937 .read = seq_read, 7938 .llseek = seq_lseek, 7939 .release = seq_release, 7940 .poll = mdstat_poll, 7941 }; 7942 7943 int register_md_personality(struct md_personality *p) 7944 { 7945 pr_debug("md: %s personality registered for level %d\n", 7946 p->name, p->level); 7947 spin_lock(&pers_lock); 7948 list_add_tail(&p->list, &pers_list); 7949 spin_unlock(&pers_lock); 7950 return 0; 7951 } 7952 EXPORT_SYMBOL(register_md_personality); 7953 7954 int unregister_md_personality(struct md_personality *p) 7955 { 7956 pr_debug("md: %s personality unregistered\n", p->name); 7957 spin_lock(&pers_lock); 7958 list_del_init(&p->list); 7959 spin_unlock(&pers_lock); 7960 return 0; 7961 } 7962 EXPORT_SYMBOL(unregister_md_personality); 7963 7964 int register_md_cluster_operations(struct md_cluster_operations *ops, 7965 struct module *module) 7966 { 7967 int ret = 0; 7968 spin_lock(&pers_lock); 7969 if (md_cluster_ops != NULL) 7970 ret = -EALREADY; 7971 else { 7972 md_cluster_ops = ops; 7973 md_cluster_mod = module; 7974 } 7975 spin_unlock(&pers_lock); 7976 return ret; 7977 } 7978 EXPORT_SYMBOL(register_md_cluster_operations); 7979 7980 int unregister_md_cluster_operations(void) 7981 { 7982 spin_lock(&pers_lock); 7983 md_cluster_ops = NULL; 7984 spin_unlock(&pers_lock); 7985 return 0; 7986 } 7987 EXPORT_SYMBOL(unregister_md_cluster_operations); 7988 7989 int md_setup_cluster(struct mddev *mddev, int nodes) 7990 { 7991 if (!md_cluster_ops) 7992 request_module("md-cluster"); 7993 spin_lock(&pers_lock); 7994 /* ensure module won't be unloaded */ 7995 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) { 7996 pr_warn("can't find md-cluster module or get it's reference.\n"); 7997 spin_unlock(&pers_lock); 7998 return -ENOENT; 7999 } 8000 spin_unlock(&pers_lock); 8001 8002 return md_cluster_ops->join(mddev, nodes); 8003 } 8004 8005 void md_cluster_stop(struct mddev *mddev) 8006 { 8007 if (!md_cluster_ops) 8008 return; 8009 md_cluster_ops->leave(mddev); 8010 module_put(md_cluster_mod); 8011 } 8012 8013 static int is_mddev_idle(struct mddev *mddev, int init) 8014 { 8015 struct md_rdev *rdev; 8016 int idle; 8017 int curr_events; 8018 8019 idle = 1; 8020 rcu_read_lock(); 8021 rdev_for_each_rcu(rdev, mddev) { 8022 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 8023 curr_events = (int)part_stat_read_accum(&disk->part0, sectors) - 8024 atomic_read(&disk->sync_io); 8025 /* sync IO will cause sync_io to increase before the disk_stats 8026 * as sync_io is counted when a request starts, and 8027 * disk_stats is counted when it completes. 8028 * So resync activity will cause curr_events to be smaller than 8029 * when there was no such activity. 8030 * non-sync IO will cause disk_stat to increase without 8031 * increasing sync_io so curr_events will (eventually) 8032 * be larger than it was before. Once it becomes 8033 * substantially larger, the test below will cause 8034 * the array to appear non-idle, and resync will slow 8035 * down. 8036 * If there is a lot of outstanding resync activity when 8037 * we set last_event to curr_events, then all that activity 8038 * completing might cause the array to appear non-idle 8039 * and resync will be slowed down even though there might 8040 * not have been non-resync activity. This will only 8041 * happen once though. 'last_events' will soon reflect 8042 * the state where there is little or no outstanding 8043 * resync requests, and further resync activity will 8044 * always make curr_events less than last_events. 8045 * 8046 */ 8047 if (init || curr_events - rdev->last_events > 64) { 8048 rdev->last_events = curr_events; 8049 idle = 0; 8050 } 8051 } 8052 rcu_read_unlock(); 8053 return idle; 8054 } 8055 8056 void md_done_sync(struct mddev *mddev, int blocks, int ok) 8057 { 8058 /* another "blocks" (512byte) blocks have been synced */ 8059 atomic_sub(blocks, &mddev->recovery_active); 8060 wake_up(&mddev->recovery_wait); 8061 if (!ok) { 8062 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8063 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); 8064 md_wakeup_thread(mddev->thread); 8065 // stop recovery, signal do_sync .... 8066 } 8067 } 8068 EXPORT_SYMBOL(md_done_sync); 8069 8070 /* md_write_start(mddev, bi) 8071 * If we need to update some array metadata (e.g. 'active' flag 8072 * in superblock) before writing, schedule a superblock update 8073 * and wait for it to complete. 8074 * A return value of 'false' means that the write wasn't recorded 8075 * and cannot proceed as the array is being suspend. 8076 */ 8077 bool md_write_start(struct mddev *mddev, struct bio *bi) 8078 { 8079 int did_change = 0; 8080 8081 if (bio_data_dir(bi) != WRITE) 8082 return true; 8083 8084 BUG_ON(mddev->ro == 1); 8085 if (mddev->ro == 2) { 8086 /* need to switch to read/write */ 8087 mddev->ro = 0; 8088 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8089 md_wakeup_thread(mddev->thread); 8090 md_wakeup_thread(mddev->sync_thread); 8091 did_change = 1; 8092 } 8093 rcu_read_lock(); 8094 percpu_ref_get(&mddev->writes_pending); 8095 smp_mb(); /* Match smp_mb in set_in_sync() */ 8096 if (mddev->safemode == 1) 8097 mddev->safemode = 0; 8098 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */ 8099 if (mddev->in_sync || mddev->sync_checkers) { 8100 spin_lock(&mddev->lock); 8101 if (mddev->in_sync) { 8102 mddev->in_sync = 0; 8103 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8104 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 8105 md_wakeup_thread(mddev->thread); 8106 did_change = 1; 8107 } 8108 spin_unlock(&mddev->lock); 8109 } 8110 rcu_read_unlock(); 8111 if (did_change) 8112 sysfs_notify_dirent_safe(mddev->sysfs_state); 8113 if (!mddev->has_superblocks) 8114 return true; 8115 wait_event(mddev->sb_wait, 8116 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || 8117 mddev->suspended); 8118 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 8119 percpu_ref_put(&mddev->writes_pending); 8120 return false; 8121 } 8122 return true; 8123 } 8124 EXPORT_SYMBOL(md_write_start); 8125 8126 /* md_write_inc can only be called when md_write_start() has 8127 * already been called at least once of the current request. 8128 * It increments the counter and is useful when a single request 8129 * is split into several parts. Each part causes an increment and 8130 * so needs a matching md_write_end(). 8131 * Unlike md_write_start(), it is safe to call md_write_inc() inside 8132 * a spinlocked region. 8133 */ 8134 void md_write_inc(struct mddev *mddev, struct bio *bi) 8135 { 8136 if (bio_data_dir(bi) != WRITE) 8137 return; 8138 WARN_ON_ONCE(mddev->in_sync || mddev->ro); 8139 percpu_ref_get(&mddev->writes_pending); 8140 } 8141 EXPORT_SYMBOL(md_write_inc); 8142 8143 void md_write_end(struct mddev *mddev) 8144 { 8145 percpu_ref_put(&mddev->writes_pending); 8146 8147 if (mddev->safemode == 2) 8148 md_wakeup_thread(mddev->thread); 8149 else if (mddev->safemode_delay) 8150 /* The roundup() ensures this only performs locking once 8151 * every ->safemode_delay jiffies 8152 */ 8153 mod_timer(&mddev->safemode_timer, 8154 roundup(jiffies, mddev->safemode_delay) + 8155 mddev->safemode_delay); 8156 } 8157 8158 EXPORT_SYMBOL(md_write_end); 8159 8160 /* md_allow_write(mddev) 8161 * Calling this ensures that the array is marked 'active' so that writes 8162 * may proceed without blocking. It is important to call this before 8163 * attempting a GFP_KERNEL allocation while holding the mddev lock. 8164 * Must be called with mddev_lock held. 8165 */ 8166 void md_allow_write(struct mddev *mddev) 8167 { 8168 if (!mddev->pers) 8169 return; 8170 if (mddev->ro) 8171 return; 8172 if (!mddev->pers->sync_request) 8173 return; 8174 8175 spin_lock(&mddev->lock); 8176 if (mddev->in_sync) { 8177 mddev->in_sync = 0; 8178 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8179 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 8180 if (mddev->safemode_delay && 8181 mddev->safemode == 0) 8182 mddev->safemode = 1; 8183 spin_unlock(&mddev->lock); 8184 md_update_sb(mddev, 0); 8185 sysfs_notify_dirent_safe(mddev->sysfs_state); 8186 /* wait for the dirty state to be recorded in the metadata */ 8187 wait_event(mddev->sb_wait, 8188 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 8189 } else 8190 spin_unlock(&mddev->lock); 8191 } 8192 EXPORT_SYMBOL_GPL(md_allow_write); 8193 8194 #define SYNC_MARKS 10 8195 #define SYNC_MARK_STEP (3*HZ) 8196 #define UPDATE_FREQUENCY (5*60*HZ) 8197 void md_do_sync(struct md_thread *thread) 8198 { 8199 struct mddev *mddev = thread->mddev; 8200 struct mddev *mddev2; 8201 unsigned int currspeed = 0, 8202 window; 8203 sector_t max_sectors,j, io_sectors, recovery_done; 8204 unsigned long mark[SYNC_MARKS]; 8205 unsigned long update_time; 8206 sector_t mark_cnt[SYNC_MARKS]; 8207 int last_mark,m; 8208 struct list_head *tmp; 8209 sector_t last_check; 8210 int skipped = 0; 8211 struct md_rdev *rdev; 8212 char *desc, *action = NULL; 8213 struct blk_plug plug; 8214 int ret; 8215 8216 /* just incase thread restarts... */ 8217 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 8218 test_bit(MD_RECOVERY_WAIT, &mddev->recovery)) 8219 return; 8220 if (mddev->ro) {/* never try to sync a read-only array */ 8221 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8222 return; 8223 } 8224 8225 if (mddev_is_clustered(mddev)) { 8226 ret = md_cluster_ops->resync_start(mddev); 8227 if (ret) 8228 goto skip; 8229 8230 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags); 8231 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 8232 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || 8233 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 8234 && ((unsigned long long)mddev->curr_resync_completed 8235 < (unsigned long long)mddev->resync_max_sectors)) 8236 goto skip; 8237 } 8238 8239 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8240 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 8241 desc = "data-check"; 8242 action = "check"; 8243 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 8244 desc = "requested-resync"; 8245 action = "repair"; 8246 } else 8247 desc = "resync"; 8248 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 8249 desc = "reshape"; 8250 else 8251 desc = "recovery"; 8252 8253 mddev->last_sync_action = action ?: desc; 8254 8255 /* we overload curr_resync somewhat here. 8256 * 0 == not engaged in resync at all 8257 * 2 == checking that there is no conflict with another sync 8258 * 1 == like 2, but have yielded to allow conflicting resync to 8259 * commense 8260 * other == active in resync - this many blocks 8261 * 8262 * Before starting a resync we must have set curr_resync to 8263 * 2, and then checked that every "conflicting" array has curr_resync 8264 * less than ours. When we find one that is the same or higher 8265 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 8266 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 8267 * This will mean we have to start checking from the beginning again. 8268 * 8269 */ 8270 8271 do { 8272 int mddev2_minor = -1; 8273 mddev->curr_resync = 2; 8274 8275 try_again: 8276 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8277 goto skip; 8278 for_each_mddev(mddev2, tmp) { 8279 if (mddev2 == mddev) 8280 continue; 8281 if (!mddev->parallel_resync 8282 && mddev2->curr_resync 8283 && match_mddev_units(mddev, mddev2)) { 8284 DEFINE_WAIT(wq); 8285 if (mddev < mddev2 && mddev->curr_resync == 2) { 8286 /* arbitrarily yield */ 8287 mddev->curr_resync = 1; 8288 wake_up(&resync_wait); 8289 } 8290 if (mddev > mddev2 && mddev->curr_resync == 1) 8291 /* no need to wait here, we can wait the next 8292 * time 'round when curr_resync == 2 8293 */ 8294 continue; 8295 /* We need to wait 'interruptible' so as not to 8296 * contribute to the load average, and not to 8297 * be caught by 'softlockup' 8298 */ 8299 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 8300 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8301 mddev2->curr_resync >= mddev->curr_resync) { 8302 if (mddev2_minor != mddev2->md_minor) { 8303 mddev2_minor = mddev2->md_minor; 8304 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n", 8305 desc, mdname(mddev), 8306 mdname(mddev2)); 8307 } 8308 mddev_put(mddev2); 8309 if (signal_pending(current)) 8310 flush_signals(current); 8311 schedule(); 8312 finish_wait(&resync_wait, &wq); 8313 goto try_again; 8314 } 8315 finish_wait(&resync_wait, &wq); 8316 } 8317 } 8318 } while (mddev->curr_resync < 2); 8319 8320 j = 0; 8321 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8322 /* resync follows the size requested by the personality, 8323 * which defaults to physical size, but can be virtual size 8324 */ 8325 max_sectors = mddev->resync_max_sectors; 8326 atomic64_set(&mddev->resync_mismatches, 0); 8327 /* we don't use the checkpoint if there's a bitmap */ 8328 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 8329 j = mddev->resync_min; 8330 else if (!mddev->bitmap) 8331 j = mddev->recovery_cp; 8332 8333 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 8334 max_sectors = mddev->resync_max_sectors; 8335 /* 8336 * If the original node aborts reshaping then we continue the 8337 * reshaping, so set j again to avoid restart reshape from the 8338 * first beginning 8339 */ 8340 if (mddev_is_clustered(mddev) && 8341 mddev->reshape_position != MaxSector) 8342 j = mddev->reshape_position; 8343 } else { 8344 /* recovery follows the physical size of devices */ 8345 max_sectors = mddev->dev_sectors; 8346 j = MaxSector; 8347 rcu_read_lock(); 8348 rdev_for_each_rcu(rdev, mddev) 8349 if (rdev->raid_disk >= 0 && 8350 !test_bit(Journal, &rdev->flags) && 8351 !test_bit(Faulty, &rdev->flags) && 8352 !test_bit(In_sync, &rdev->flags) && 8353 rdev->recovery_offset < j) 8354 j = rdev->recovery_offset; 8355 rcu_read_unlock(); 8356 8357 /* If there is a bitmap, we need to make sure all 8358 * writes that started before we added a spare 8359 * complete before we start doing a recovery. 8360 * Otherwise the write might complete and (via 8361 * bitmap_endwrite) set a bit in the bitmap after the 8362 * recovery has checked that bit and skipped that 8363 * region. 8364 */ 8365 if (mddev->bitmap) { 8366 mddev->pers->quiesce(mddev, 1); 8367 mddev->pers->quiesce(mddev, 0); 8368 } 8369 } 8370 8371 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); 8372 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); 8373 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n", 8374 speed_max(mddev), desc); 8375 8376 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 8377 8378 io_sectors = 0; 8379 for (m = 0; m < SYNC_MARKS; m++) { 8380 mark[m] = jiffies; 8381 mark_cnt[m] = io_sectors; 8382 } 8383 last_mark = 0; 8384 mddev->resync_mark = mark[last_mark]; 8385 mddev->resync_mark_cnt = mark_cnt[last_mark]; 8386 8387 /* 8388 * Tune reconstruction: 8389 */ 8390 window = 32*(PAGE_SIZE/512); 8391 pr_debug("md: using %dk window, over a total of %lluk.\n", 8392 window/2, (unsigned long long)max_sectors/2); 8393 8394 atomic_set(&mddev->recovery_active, 0); 8395 last_check = 0; 8396 8397 if (j>2) { 8398 pr_debug("md: resuming %s of %s from checkpoint.\n", 8399 desc, mdname(mddev)); 8400 mddev->curr_resync = j; 8401 } else 8402 mddev->curr_resync = 3; /* no longer delayed */ 8403 mddev->curr_resync_completed = j; 8404 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 8405 md_new_event(mddev); 8406 update_time = jiffies; 8407 8408 blk_start_plug(&plug); 8409 while (j < max_sectors) { 8410 sector_t sectors; 8411 8412 skipped = 0; 8413 8414 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8415 ((mddev->curr_resync > mddev->curr_resync_completed && 8416 (mddev->curr_resync - mddev->curr_resync_completed) 8417 > (max_sectors >> 4)) || 8418 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) || 8419 (j - mddev->curr_resync_completed)*2 8420 >= mddev->resync_max - mddev->curr_resync_completed || 8421 mddev->curr_resync_completed > mddev->resync_max 8422 )) { 8423 /* time to update curr_resync_completed */ 8424 wait_event(mddev->recovery_wait, 8425 atomic_read(&mddev->recovery_active) == 0); 8426 mddev->curr_resync_completed = j; 8427 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 8428 j > mddev->recovery_cp) 8429 mddev->recovery_cp = j; 8430 update_time = jiffies; 8431 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8432 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 8433 } 8434 8435 while (j >= mddev->resync_max && 8436 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8437 /* As this condition is controlled by user-space, 8438 * we can block indefinitely, so use '_interruptible' 8439 * to avoid triggering warnings. 8440 */ 8441 flush_signals(current); /* just in case */ 8442 wait_event_interruptible(mddev->recovery_wait, 8443 mddev->resync_max > j 8444 || test_bit(MD_RECOVERY_INTR, 8445 &mddev->recovery)); 8446 } 8447 8448 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8449 break; 8450 8451 sectors = mddev->pers->sync_request(mddev, j, &skipped); 8452 if (sectors == 0) { 8453 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8454 break; 8455 } 8456 8457 if (!skipped) { /* actual IO requested */ 8458 io_sectors += sectors; 8459 atomic_add(sectors, &mddev->recovery_active); 8460 } 8461 8462 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8463 break; 8464 8465 j += sectors; 8466 if (j > max_sectors) 8467 /* when skipping, extra large numbers can be returned. */ 8468 j = max_sectors; 8469 if (j > 2) 8470 mddev->curr_resync = j; 8471 mddev->curr_mark_cnt = io_sectors; 8472 if (last_check == 0) 8473 /* this is the earliest that rebuild will be 8474 * visible in /proc/mdstat 8475 */ 8476 md_new_event(mddev); 8477 8478 if (last_check + window > io_sectors || j == max_sectors) 8479 continue; 8480 8481 last_check = io_sectors; 8482 repeat: 8483 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 8484 /* step marks */ 8485 int next = (last_mark+1) % SYNC_MARKS; 8486 8487 mddev->resync_mark = mark[next]; 8488 mddev->resync_mark_cnt = mark_cnt[next]; 8489 mark[next] = jiffies; 8490 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 8491 last_mark = next; 8492 } 8493 8494 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8495 break; 8496 8497 /* 8498 * this loop exits only if either when we are slower than 8499 * the 'hard' speed limit, or the system was IO-idle for 8500 * a jiffy. 8501 * the system might be non-idle CPU-wise, but we only care 8502 * about not overloading the IO subsystem. (things like an 8503 * e2fsck being done on the RAID array should execute fast) 8504 */ 8505 cond_resched(); 8506 8507 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); 8508 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 8509 /((jiffies-mddev->resync_mark)/HZ +1) +1; 8510 8511 if (currspeed > speed_min(mddev)) { 8512 if (currspeed > speed_max(mddev)) { 8513 msleep(500); 8514 goto repeat; 8515 } 8516 if (!is_mddev_idle(mddev, 0)) { 8517 /* 8518 * Give other IO more of a chance. 8519 * The faster the devices, the less we wait. 8520 */ 8521 wait_event(mddev->recovery_wait, 8522 !atomic_read(&mddev->recovery_active)); 8523 } 8524 } 8525 } 8526 pr_info("md: %s: %s %s.\n",mdname(mddev), desc, 8527 test_bit(MD_RECOVERY_INTR, &mddev->recovery) 8528 ? "interrupted" : "done"); 8529 /* 8530 * this also signals 'finished resyncing' to md_stop 8531 */ 8532 blk_finish_plug(&plug); 8533 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 8534 8535 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8536 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8537 mddev->curr_resync > 3) { 8538 mddev->curr_resync_completed = mddev->curr_resync; 8539 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 8540 } 8541 mddev->pers->sync_request(mddev, max_sectors, &skipped); 8542 8543 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 8544 mddev->curr_resync > 3) { 8545 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8546 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8547 if (mddev->curr_resync >= mddev->recovery_cp) { 8548 pr_debug("md: checkpointing %s of %s.\n", 8549 desc, mdname(mddev)); 8550 if (test_bit(MD_RECOVERY_ERROR, 8551 &mddev->recovery)) 8552 mddev->recovery_cp = 8553 mddev->curr_resync_completed; 8554 else 8555 mddev->recovery_cp = 8556 mddev->curr_resync; 8557 } 8558 } else 8559 mddev->recovery_cp = MaxSector; 8560 } else { 8561 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8562 mddev->curr_resync = MaxSector; 8563 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8564 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { 8565 rcu_read_lock(); 8566 rdev_for_each_rcu(rdev, mddev) 8567 if (rdev->raid_disk >= 0 && 8568 mddev->delta_disks >= 0 && 8569 !test_bit(Journal, &rdev->flags) && 8570 !test_bit(Faulty, &rdev->flags) && 8571 !test_bit(In_sync, &rdev->flags) && 8572 rdev->recovery_offset < mddev->curr_resync) 8573 rdev->recovery_offset = mddev->curr_resync; 8574 rcu_read_unlock(); 8575 } 8576 } 8577 } 8578 skip: 8579 /* set CHANGE_PENDING here since maybe another update is needed, 8580 * so other nodes are informed. It should be harmless for normal 8581 * raid */ 8582 set_mask_bits(&mddev->sb_flags, 0, 8583 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); 8584 8585 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8586 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8587 mddev->delta_disks > 0 && 8588 mddev->pers->finish_reshape && 8589 mddev->pers->size && 8590 mddev->queue) { 8591 mddev_lock_nointr(mddev); 8592 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); 8593 mddev_unlock(mddev); 8594 if (!mddev_is_clustered(mddev)) { 8595 set_capacity(mddev->gendisk, mddev->array_sectors); 8596 revalidate_disk(mddev->gendisk); 8597 } 8598 } 8599 8600 spin_lock(&mddev->lock); 8601 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8602 /* We completed so min/max setting can be forgotten if used. */ 8603 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 8604 mddev->resync_min = 0; 8605 mddev->resync_max = MaxSector; 8606 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 8607 mddev->resync_min = mddev->curr_resync_completed; 8608 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 8609 mddev->curr_resync = 0; 8610 spin_unlock(&mddev->lock); 8611 8612 wake_up(&resync_wait); 8613 md_wakeup_thread(mddev->thread); 8614 return; 8615 } 8616 EXPORT_SYMBOL_GPL(md_do_sync); 8617 8618 static int remove_and_add_spares(struct mddev *mddev, 8619 struct md_rdev *this) 8620 { 8621 struct md_rdev *rdev; 8622 int spares = 0; 8623 int removed = 0; 8624 bool remove_some = false; 8625 8626 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 8627 /* Mustn't remove devices when resync thread is running */ 8628 return 0; 8629 8630 rdev_for_each(rdev, mddev) { 8631 if ((this == NULL || rdev == this) && 8632 rdev->raid_disk >= 0 && 8633 !test_bit(Blocked, &rdev->flags) && 8634 test_bit(Faulty, &rdev->flags) && 8635 atomic_read(&rdev->nr_pending)==0) { 8636 /* Faulty non-Blocked devices with nr_pending == 0 8637 * never get nr_pending incremented, 8638 * never get Faulty cleared, and never get Blocked set. 8639 * So we can synchronize_rcu now rather than once per device 8640 */ 8641 remove_some = true; 8642 set_bit(RemoveSynchronized, &rdev->flags); 8643 } 8644 } 8645 8646 if (remove_some) 8647 synchronize_rcu(); 8648 rdev_for_each(rdev, mddev) { 8649 if ((this == NULL || rdev == this) && 8650 rdev->raid_disk >= 0 && 8651 !test_bit(Blocked, &rdev->flags) && 8652 ((test_bit(RemoveSynchronized, &rdev->flags) || 8653 (!test_bit(In_sync, &rdev->flags) && 8654 !test_bit(Journal, &rdev->flags))) && 8655 atomic_read(&rdev->nr_pending)==0)) { 8656 if (mddev->pers->hot_remove_disk( 8657 mddev, rdev) == 0) { 8658 sysfs_unlink_rdev(mddev, rdev); 8659 rdev->saved_raid_disk = rdev->raid_disk; 8660 rdev->raid_disk = -1; 8661 removed++; 8662 } 8663 } 8664 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags)) 8665 clear_bit(RemoveSynchronized, &rdev->flags); 8666 } 8667 8668 if (removed && mddev->kobj.sd) 8669 sysfs_notify(&mddev->kobj, NULL, "degraded"); 8670 8671 if (this && removed) 8672 goto no_add; 8673 8674 rdev_for_each(rdev, mddev) { 8675 if (this && this != rdev) 8676 continue; 8677 if (test_bit(Candidate, &rdev->flags)) 8678 continue; 8679 if (rdev->raid_disk >= 0 && 8680 !test_bit(In_sync, &rdev->flags) && 8681 !test_bit(Journal, &rdev->flags) && 8682 !test_bit(Faulty, &rdev->flags)) 8683 spares++; 8684 if (rdev->raid_disk >= 0) 8685 continue; 8686 if (test_bit(Faulty, &rdev->flags)) 8687 continue; 8688 if (!test_bit(Journal, &rdev->flags)) { 8689 if (mddev->ro && 8690 ! (rdev->saved_raid_disk >= 0 && 8691 !test_bit(Bitmap_sync, &rdev->flags))) 8692 continue; 8693 8694 rdev->recovery_offset = 0; 8695 } 8696 if (mddev->pers-> 8697 hot_add_disk(mddev, rdev) == 0) { 8698 if (sysfs_link_rdev(mddev, rdev)) 8699 /* failure here is OK */; 8700 if (!test_bit(Journal, &rdev->flags)) 8701 spares++; 8702 md_new_event(mddev); 8703 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 8704 } 8705 } 8706 no_add: 8707 if (removed) 8708 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 8709 return spares; 8710 } 8711 8712 static void md_start_sync(struct work_struct *ws) 8713 { 8714 struct mddev *mddev = container_of(ws, struct mddev, del_work); 8715 8716 mddev->sync_thread = md_register_thread(md_do_sync, 8717 mddev, 8718 "resync"); 8719 if (!mddev->sync_thread) { 8720 pr_warn("%s: could not start resync thread...\n", 8721 mdname(mddev)); 8722 /* leave the spares where they are, it shouldn't hurt */ 8723 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8724 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8725 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 8726 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 8727 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8728 wake_up(&resync_wait); 8729 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 8730 &mddev->recovery)) 8731 if (mddev->sysfs_action) 8732 sysfs_notify_dirent_safe(mddev->sysfs_action); 8733 } else 8734 md_wakeup_thread(mddev->sync_thread); 8735 sysfs_notify_dirent_safe(mddev->sysfs_action); 8736 md_new_event(mddev); 8737 } 8738 8739 /* 8740 * This routine is regularly called by all per-raid-array threads to 8741 * deal with generic issues like resync and super-block update. 8742 * Raid personalities that don't have a thread (linear/raid0) do not 8743 * need this as they never do any recovery or update the superblock. 8744 * 8745 * It does not do any resync itself, but rather "forks" off other threads 8746 * to do that as needed. 8747 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 8748 * "->recovery" and create a thread at ->sync_thread. 8749 * When the thread finishes it sets MD_RECOVERY_DONE 8750 * and wakeups up this thread which will reap the thread and finish up. 8751 * This thread also removes any faulty devices (with nr_pending == 0). 8752 * 8753 * The overall approach is: 8754 * 1/ if the superblock needs updating, update it. 8755 * 2/ If a recovery thread is running, don't do anything else. 8756 * 3/ If recovery has finished, clean up, possibly marking spares active. 8757 * 4/ If there are any faulty devices, remove them. 8758 * 5/ If array is degraded, try to add spares devices 8759 * 6/ If array has spares or is not in-sync, start a resync thread. 8760 */ 8761 void md_check_recovery(struct mddev *mddev) 8762 { 8763 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { 8764 /* Write superblock - thread that called mddev_suspend() 8765 * holds reconfig_mutex for us. 8766 */ 8767 set_bit(MD_UPDATING_SB, &mddev->flags); 8768 smp_mb__after_atomic(); 8769 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) 8770 md_update_sb(mddev, 0); 8771 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); 8772 wake_up(&mddev->sb_wait); 8773 } 8774 8775 if (mddev->suspended) 8776 return; 8777 8778 if (mddev->bitmap) 8779 md_bitmap_daemon_work(mddev); 8780 8781 if (signal_pending(current)) { 8782 if (mddev->pers->sync_request && !mddev->external) { 8783 pr_debug("md: %s in immediate safe mode\n", 8784 mdname(mddev)); 8785 mddev->safemode = 2; 8786 } 8787 flush_signals(current); 8788 } 8789 8790 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 8791 return; 8792 if ( ! ( 8793 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) || 8794 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 8795 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 8796 (mddev->external == 0 && mddev->safemode == 1) || 8797 (mddev->safemode == 2 8798 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 8799 )) 8800 return; 8801 8802 if (mddev_trylock(mddev)) { 8803 int spares = 0; 8804 8805 if (!mddev->external && mddev->safemode == 1) 8806 mddev->safemode = 0; 8807 8808 if (mddev->ro) { 8809 struct md_rdev *rdev; 8810 if (!mddev->external && mddev->in_sync) 8811 /* 'Blocked' flag not needed as failed devices 8812 * will be recorded if array switched to read/write. 8813 * Leaving it set will prevent the device 8814 * from being removed. 8815 */ 8816 rdev_for_each(rdev, mddev) 8817 clear_bit(Blocked, &rdev->flags); 8818 /* On a read-only array we can: 8819 * - remove failed devices 8820 * - add already-in_sync devices if the array itself 8821 * is in-sync. 8822 * As we only add devices that are already in-sync, 8823 * we can activate the spares immediately. 8824 */ 8825 remove_and_add_spares(mddev, NULL); 8826 /* There is no thread, but we need to call 8827 * ->spare_active and clear saved_raid_disk 8828 */ 8829 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8830 md_reap_sync_thread(mddev); 8831 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8832 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8833 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 8834 goto unlock; 8835 } 8836 8837 if (mddev_is_clustered(mddev)) { 8838 struct md_rdev *rdev; 8839 /* kick the device if another node issued a 8840 * remove disk. 8841 */ 8842 rdev_for_each(rdev, mddev) { 8843 if (test_and_clear_bit(ClusterRemove, &rdev->flags) && 8844 rdev->raid_disk < 0) 8845 md_kick_rdev_from_array(rdev); 8846 } 8847 } 8848 8849 if (!mddev->external && !mddev->in_sync) { 8850 spin_lock(&mddev->lock); 8851 set_in_sync(mddev); 8852 spin_unlock(&mddev->lock); 8853 } 8854 8855 if (mddev->sb_flags) 8856 md_update_sb(mddev, 0); 8857 8858 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 8859 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 8860 /* resync/recovery still happening */ 8861 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8862 goto unlock; 8863 } 8864 if (mddev->sync_thread) { 8865 md_reap_sync_thread(mddev); 8866 goto unlock; 8867 } 8868 /* Set RUNNING before clearing NEEDED to avoid 8869 * any transients in the value of "sync_action". 8870 */ 8871 mddev->curr_resync_completed = 0; 8872 spin_lock(&mddev->lock); 8873 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8874 spin_unlock(&mddev->lock); 8875 /* Clear some bits that don't mean anything, but 8876 * might be left set 8877 */ 8878 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 8879 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 8880 8881 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 8882 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 8883 goto not_running; 8884 /* no recovery is running. 8885 * remove any failed drives, then 8886 * add spares if possible. 8887 * Spares are also removed and re-added, to allow 8888 * the personality to fail the re-add. 8889 */ 8890 8891 if (mddev->reshape_position != MaxSector) { 8892 if (mddev->pers->check_reshape == NULL || 8893 mddev->pers->check_reshape(mddev) != 0) 8894 /* Cannot proceed */ 8895 goto not_running; 8896 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8897 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8898 } else if ((spares = remove_and_add_spares(mddev, NULL))) { 8899 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8900 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 8901 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 8902 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8903 } else if (mddev->recovery_cp < MaxSector) { 8904 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8905 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8906 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 8907 /* nothing to be done ... */ 8908 goto not_running; 8909 8910 if (mddev->pers->sync_request) { 8911 if (spares) { 8912 /* We are adding a device or devices to an array 8913 * which has the bitmap stored on all devices. 8914 * So make sure all bitmap pages get written 8915 */ 8916 md_bitmap_write_all(mddev->bitmap); 8917 } 8918 INIT_WORK(&mddev->del_work, md_start_sync); 8919 queue_work(md_misc_wq, &mddev->del_work); 8920 goto unlock; 8921 } 8922 not_running: 8923 if (!mddev->sync_thread) { 8924 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8925 wake_up(&resync_wait); 8926 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 8927 &mddev->recovery)) 8928 if (mddev->sysfs_action) 8929 sysfs_notify_dirent_safe(mddev->sysfs_action); 8930 } 8931 unlock: 8932 wake_up(&mddev->sb_wait); 8933 mddev_unlock(mddev); 8934 } 8935 } 8936 EXPORT_SYMBOL(md_check_recovery); 8937 8938 void md_reap_sync_thread(struct mddev *mddev) 8939 { 8940 struct md_rdev *rdev; 8941 sector_t old_dev_sectors = mddev->dev_sectors; 8942 bool is_reshaped = false; 8943 8944 /* resync has finished, collect result */ 8945 md_unregister_thread(&mddev->sync_thread); 8946 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8947 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 8948 /* success...*/ 8949 /* activate any spares */ 8950 if (mddev->pers->spare_active(mddev)) { 8951 sysfs_notify(&mddev->kobj, NULL, 8952 "degraded"); 8953 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 8954 } 8955 } 8956 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8957 mddev->pers->finish_reshape) { 8958 mddev->pers->finish_reshape(mddev); 8959 if (mddev_is_clustered(mddev)) 8960 is_reshaped = true; 8961 } 8962 8963 /* If array is no-longer degraded, then any saved_raid_disk 8964 * information must be scrapped. 8965 */ 8966 if (!mddev->degraded) 8967 rdev_for_each(rdev, mddev) 8968 rdev->saved_raid_disk = -1; 8969 8970 md_update_sb(mddev, 1); 8971 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can 8972 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by 8973 * clustered raid */ 8974 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) 8975 md_cluster_ops->resync_finish(mddev); 8976 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8977 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 8978 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8979 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8980 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 8981 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 8982 /* 8983 * We call md_cluster_ops->update_size here because sync_size could 8984 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared, 8985 * so it is time to update size across cluster. 8986 */ 8987 if (mddev_is_clustered(mddev) && is_reshaped 8988 && !test_bit(MD_CLOSING, &mddev->flags)) 8989 md_cluster_ops->update_size(mddev, old_dev_sectors); 8990 wake_up(&resync_wait); 8991 /* flag recovery needed just to double check */ 8992 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8993 sysfs_notify_dirent_safe(mddev->sysfs_action); 8994 md_new_event(mddev); 8995 if (mddev->event_work.func) 8996 queue_work(md_misc_wq, &mddev->event_work); 8997 } 8998 EXPORT_SYMBOL(md_reap_sync_thread); 8999 9000 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) 9001 { 9002 sysfs_notify_dirent_safe(rdev->sysfs_state); 9003 wait_event_timeout(rdev->blocked_wait, 9004 !test_bit(Blocked, &rdev->flags) && 9005 !test_bit(BlockedBadBlocks, &rdev->flags), 9006 msecs_to_jiffies(5000)); 9007 rdev_dec_pending(rdev, mddev); 9008 } 9009 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 9010 9011 void md_finish_reshape(struct mddev *mddev) 9012 { 9013 /* called be personality module when reshape completes. */ 9014 struct md_rdev *rdev; 9015 9016 rdev_for_each(rdev, mddev) { 9017 if (rdev->data_offset > rdev->new_data_offset) 9018 rdev->sectors += rdev->data_offset - rdev->new_data_offset; 9019 else 9020 rdev->sectors -= rdev->new_data_offset - rdev->data_offset; 9021 rdev->data_offset = rdev->new_data_offset; 9022 } 9023 } 9024 EXPORT_SYMBOL(md_finish_reshape); 9025 9026 /* Bad block management */ 9027 9028 /* Returns 1 on success, 0 on failure */ 9029 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 9030 int is_new) 9031 { 9032 struct mddev *mddev = rdev->mddev; 9033 int rv; 9034 if (is_new) 9035 s += rdev->new_data_offset; 9036 else 9037 s += rdev->data_offset; 9038 rv = badblocks_set(&rdev->badblocks, s, sectors, 0); 9039 if (rv == 0) { 9040 /* Make sure they get written out promptly */ 9041 if (test_bit(ExternalBbl, &rdev->flags)) 9042 sysfs_notify(&rdev->kobj, NULL, 9043 "unacknowledged_bad_blocks"); 9044 sysfs_notify_dirent_safe(rdev->sysfs_state); 9045 set_mask_bits(&mddev->sb_flags, 0, 9046 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING)); 9047 md_wakeup_thread(rdev->mddev->thread); 9048 return 1; 9049 } else 9050 return 0; 9051 } 9052 EXPORT_SYMBOL_GPL(rdev_set_badblocks); 9053 9054 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 9055 int is_new) 9056 { 9057 int rv; 9058 if (is_new) 9059 s += rdev->new_data_offset; 9060 else 9061 s += rdev->data_offset; 9062 rv = badblocks_clear(&rdev->badblocks, s, sectors); 9063 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags)) 9064 sysfs_notify(&rdev->kobj, NULL, "bad_blocks"); 9065 return rv; 9066 } 9067 EXPORT_SYMBOL_GPL(rdev_clear_badblocks); 9068 9069 static int md_notify_reboot(struct notifier_block *this, 9070 unsigned long code, void *x) 9071 { 9072 struct list_head *tmp; 9073 struct mddev *mddev; 9074 int need_delay = 0; 9075 9076 for_each_mddev(mddev, tmp) { 9077 if (mddev_trylock(mddev)) { 9078 if (mddev->pers) 9079 __md_stop_writes(mddev); 9080 if (mddev->persistent) 9081 mddev->safemode = 2; 9082 mddev_unlock(mddev); 9083 } 9084 need_delay = 1; 9085 } 9086 /* 9087 * certain more exotic SCSI devices are known to be 9088 * volatile wrt too early system reboots. While the 9089 * right place to handle this issue is the given 9090 * driver, we do want to have a safe RAID driver ... 9091 */ 9092 if (need_delay) 9093 mdelay(1000*1); 9094 9095 return NOTIFY_DONE; 9096 } 9097 9098 static struct notifier_block md_notifier = { 9099 .notifier_call = md_notify_reboot, 9100 .next = NULL, 9101 .priority = INT_MAX, /* before any real devices */ 9102 }; 9103 9104 static void md_geninit(void) 9105 { 9106 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 9107 9108 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); 9109 } 9110 9111 static int __init md_init(void) 9112 { 9113 int ret = -ENOMEM; 9114 9115 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); 9116 if (!md_wq) 9117 goto err_wq; 9118 9119 md_misc_wq = alloc_workqueue("md_misc", 0, 0); 9120 if (!md_misc_wq) 9121 goto err_misc_wq; 9122 9123 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0) 9124 goto err_md; 9125 9126 if ((ret = register_blkdev(0, "mdp")) < 0) 9127 goto err_mdp; 9128 mdp_major = ret; 9129 9130 blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE, 9131 md_probe, NULL, NULL); 9132 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 9133 md_probe, NULL, NULL); 9134 9135 register_reboot_notifier(&md_notifier); 9136 raid_table_header = register_sysctl_table(raid_root_table); 9137 9138 md_geninit(); 9139 return 0; 9140 9141 err_mdp: 9142 unregister_blkdev(MD_MAJOR, "md"); 9143 err_md: 9144 destroy_workqueue(md_misc_wq); 9145 err_misc_wq: 9146 destroy_workqueue(md_wq); 9147 err_wq: 9148 return ret; 9149 } 9150 9151 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) 9152 { 9153 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 9154 struct md_rdev *rdev2; 9155 int role, ret; 9156 char b[BDEVNAME_SIZE]; 9157 9158 /* 9159 * If size is changed in another node then we need to 9160 * do resize as well. 9161 */ 9162 if (mddev->dev_sectors != le64_to_cpu(sb->size)) { 9163 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size)); 9164 if (ret) 9165 pr_info("md-cluster: resize failed\n"); 9166 else 9167 md_bitmap_update_sb(mddev->bitmap); 9168 } 9169 9170 /* Check for change of roles in the active devices */ 9171 rdev_for_each(rdev2, mddev) { 9172 if (test_bit(Faulty, &rdev2->flags)) 9173 continue; 9174 9175 /* Check if the roles changed */ 9176 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]); 9177 9178 if (test_bit(Candidate, &rdev2->flags)) { 9179 if (role == 0xfffe) { 9180 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b)); 9181 md_kick_rdev_from_array(rdev2); 9182 continue; 9183 } 9184 else 9185 clear_bit(Candidate, &rdev2->flags); 9186 } 9187 9188 if (role != rdev2->raid_disk) { 9189 /* 9190 * got activated except reshape is happening. 9191 */ 9192 if (rdev2->raid_disk == -1 && role != 0xffff && 9193 !(le32_to_cpu(sb->feature_map) & 9194 MD_FEATURE_RESHAPE_ACTIVE)) { 9195 rdev2->saved_raid_disk = role; 9196 ret = remove_and_add_spares(mddev, rdev2); 9197 pr_info("Activated spare: %s\n", 9198 bdevname(rdev2->bdev,b)); 9199 /* wakeup mddev->thread here, so array could 9200 * perform resync with the new activated disk */ 9201 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9202 md_wakeup_thread(mddev->thread); 9203 9204 } 9205 /* device faulty 9206 * We just want to do the minimum to mark the disk 9207 * as faulty. The recovery is performed by the 9208 * one who initiated the error. 9209 */ 9210 if ((role == 0xfffe) || (role == 0xfffd)) { 9211 md_error(mddev, rdev2); 9212 clear_bit(Blocked, &rdev2->flags); 9213 } 9214 } 9215 } 9216 9217 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) 9218 update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); 9219 9220 /* 9221 * Since mddev->delta_disks has already updated in update_raid_disks, 9222 * so it is time to check reshape. 9223 */ 9224 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && 9225 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 9226 /* 9227 * reshape is happening in the remote node, we need to 9228 * update reshape_position and call start_reshape. 9229 */ 9230 mddev->reshape_position = sb->reshape_position; 9231 if (mddev->pers->update_reshape_pos) 9232 mddev->pers->update_reshape_pos(mddev); 9233 if (mddev->pers->start_reshape) 9234 mddev->pers->start_reshape(mddev); 9235 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && 9236 mddev->reshape_position != MaxSector && 9237 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 9238 /* reshape is just done in another node. */ 9239 mddev->reshape_position = MaxSector; 9240 if (mddev->pers->update_reshape_pos) 9241 mddev->pers->update_reshape_pos(mddev); 9242 } 9243 9244 /* Finally set the event to be up to date */ 9245 mddev->events = le64_to_cpu(sb->events); 9246 } 9247 9248 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) 9249 { 9250 int err; 9251 struct page *swapout = rdev->sb_page; 9252 struct mdp_superblock_1 *sb; 9253 9254 /* Store the sb page of the rdev in the swapout temporary 9255 * variable in case we err in the future 9256 */ 9257 rdev->sb_page = NULL; 9258 err = alloc_disk_sb(rdev); 9259 if (err == 0) { 9260 ClearPageUptodate(rdev->sb_page); 9261 rdev->sb_loaded = 0; 9262 err = super_types[mddev->major_version]. 9263 load_super(rdev, NULL, mddev->minor_version); 9264 } 9265 if (err < 0) { 9266 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n", 9267 __func__, __LINE__, rdev->desc_nr, err); 9268 if (rdev->sb_page) 9269 put_page(rdev->sb_page); 9270 rdev->sb_page = swapout; 9271 rdev->sb_loaded = 1; 9272 return err; 9273 } 9274 9275 sb = page_address(rdev->sb_page); 9276 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET 9277 * is not set 9278 */ 9279 9280 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET)) 9281 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 9282 9283 /* The other node finished recovery, call spare_active to set 9284 * device In_sync and mddev->degraded 9285 */ 9286 if (rdev->recovery_offset == MaxSector && 9287 !test_bit(In_sync, &rdev->flags) && 9288 mddev->pers->spare_active(mddev)) 9289 sysfs_notify(&mddev->kobj, NULL, "degraded"); 9290 9291 put_page(swapout); 9292 return 0; 9293 } 9294 9295 void md_reload_sb(struct mddev *mddev, int nr) 9296 { 9297 struct md_rdev *rdev; 9298 int err; 9299 9300 /* Find the rdev */ 9301 rdev_for_each_rcu(rdev, mddev) { 9302 if (rdev->desc_nr == nr) 9303 break; 9304 } 9305 9306 if (!rdev || rdev->desc_nr != nr) { 9307 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr); 9308 return; 9309 } 9310 9311 err = read_rdev(mddev, rdev); 9312 if (err < 0) 9313 return; 9314 9315 check_sb_changes(mddev, rdev); 9316 9317 /* Read all rdev's to update recovery_offset */ 9318 rdev_for_each_rcu(rdev, mddev) { 9319 if (!test_bit(Faulty, &rdev->flags)) 9320 read_rdev(mddev, rdev); 9321 } 9322 } 9323 EXPORT_SYMBOL(md_reload_sb); 9324 9325 #ifndef MODULE 9326 9327 /* 9328 * Searches all registered partitions for autorun RAID arrays 9329 * at boot time. 9330 */ 9331 9332 static DEFINE_MUTEX(detected_devices_mutex); 9333 static LIST_HEAD(all_detected_devices); 9334 struct detected_devices_node { 9335 struct list_head list; 9336 dev_t dev; 9337 }; 9338 9339 void md_autodetect_dev(dev_t dev) 9340 { 9341 struct detected_devices_node *node_detected_dev; 9342 9343 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 9344 if (node_detected_dev) { 9345 node_detected_dev->dev = dev; 9346 mutex_lock(&detected_devices_mutex); 9347 list_add_tail(&node_detected_dev->list, &all_detected_devices); 9348 mutex_unlock(&detected_devices_mutex); 9349 } 9350 } 9351 9352 static void autostart_arrays(int part) 9353 { 9354 struct md_rdev *rdev; 9355 struct detected_devices_node *node_detected_dev; 9356 dev_t dev; 9357 int i_scanned, i_passed; 9358 9359 i_scanned = 0; 9360 i_passed = 0; 9361 9362 pr_info("md: Autodetecting RAID arrays.\n"); 9363 9364 mutex_lock(&detected_devices_mutex); 9365 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 9366 i_scanned++; 9367 node_detected_dev = list_entry(all_detected_devices.next, 9368 struct detected_devices_node, list); 9369 list_del(&node_detected_dev->list); 9370 dev = node_detected_dev->dev; 9371 kfree(node_detected_dev); 9372 mutex_unlock(&detected_devices_mutex); 9373 rdev = md_import_device(dev,0, 90); 9374 mutex_lock(&detected_devices_mutex); 9375 if (IS_ERR(rdev)) 9376 continue; 9377 9378 if (test_bit(Faulty, &rdev->flags)) 9379 continue; 9380 9381 set_bit(AutoDetected, &rdev->flags); 9382 list_add(&rdev->same_set, &pending_raid_disks); 9383 i_passed++; 9384 } 9385 mutex_unlock(&detected_devices_mutex); 9386 9387 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed); 9388 9389 autorun_devices(part); 9390 } 9391 9392 #endif /* !MODULE */ 9393 9394 static __exit void md_exit(void) 9395 { 9396 struct mddev *mddev; 9397 struct list_head *tmp; 9398 int delay = 1; 9399 9400 blk_unregister_region(MKDEV(MD_MAJOR,0), 512); 9401 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 9402 9403 unregister_blkdev(MD_MAJOR,"md"); 9404 unregister_blkdev(mdp_major, "mdp"); 9405 unregister_reboot_notifier(&md_notifier); 9406 unregister_sysctl_table(raid_table_header); 9407 9408 /* We cannot unload the modules while some process is 9409 * waiting for us in select() or poll() - wake them up 9410 */ 9411 md_unloading = 1; 9412 while (waitqueue_active(&md_event_waiters)) { 9413 /* not safe to leave yet */ 9414 wake_up(&md_event_waiters); 9415 msleep(delay); 9416 delay += delay; 9417 } 9418 remove_proc_entry("mdstat", NULL); 9419 9420 for_each_mddev(mddev, tmp) { 9421 export_array(mddev); 9422 mddev->ctime = 0; 9423 mddev->hold_active = 0; 9424 /* 9425 * for_each_mddev() will call mddev_put() at the end of each 9426 * iteration. As the mddev is now fully clear, this will 9427 * schedule the mddev for destruction by a workqueue, and the 9428 * destroy_workqueue() below will wait for that to complete. 9429 */ 9430 } 9431 destroy_workqueue(md_misc_wq); 9432 destroy_workqueue(md_wq); 9433 } 9434 9435 subsys_initcall(md_init); 9436 module_exit(md_exit) 9437 9438 static int get_ro(char *buffer, const struct kernel_param *kp) 9439 { 9440 return sprintf(buffer, "%d", start_readonly); 9441 } 9442 static int set_ro(const char *val, const struct kernel_param *kp) 9443 { 9444 return kstrtouint(val, 10, (unsigned int *)&start_readonly); 9445 } 9446 9447 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 9448 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 9449 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 9450 module_param(create_on_open, bool, S_IRUSR|S_IWUSR); 9451 9452 MODULE_LICENSE("GPL"); 9453 MODULE_DESCRIPTION("MD RAID framework"); 9454 MODULE_ALIAS("md"); 9455 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 9456