1 /* 2 md.h : kernel internal structure of the Linux MD driver 3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman 4 5 This program is free software; you can redistribute it and/or modify 6 it under the terms of the GNU General Public License as published by 7 the Free Software Foundation; either version 2, or (at your option) 8 any later version. 9 10 You should have received a copy of the GNU General Public License 11 (for example /usr/src/linux/COPYING); if not, write to the Free 12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 13 */ 14 15 #ifndef _MD_MD_H 16 #define _MD_MD_H 17 18 #include <linux/blkdev.h> 19 #include <linux/backing-dev.h> 20 #include <linux/badblocks.h> 21 #include <linux/kobject.h> 22 #include <linux/list.h> 23 #include <linux/mm.h> 24 #include <linux/mutex.h> 25 #include <linux/timer.h> 26 #include <linux/wait.h> 27 #include <linux/workqueue.h> 28 #include "md-cluster.h" 29 30 #define MaxSector (~(sector_t)0) 31 32 /* 33 * These flags should really be called "NO_RETRY" rather than 34 * "FAILFAST" because they don't make any promise about time lapse, 35 * only about the number of retries, which will be zero. 36 * REQ_FAILFAST_DRIVER is not included because 37 * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.") 38 * seems to suggest that the errors it avoids retrying should usually 39 * be retried. 40 */ 41 #define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT) 42 /* 43 * MD's 'extended' device 44 */ 45 struct md_rdev { 46 struct list_head same_set; /* RAID devices within the same set */ 47 48 sector_t sectors; /* Device size (in 512bytes sectors) */ 49 struct mddev *mddev; /* RAID array if running */ 50 int last_events; /* IO event timestamp */ 51 52 /* 53 * If meta_bdev is non-NULL, it means that a separate device is 54 * being used to store the metadata (superblock/bitmap) which 55 * would otherwise be contained on the same device as the data (bdev). 56 */ 57 struct block_device *meta_bdev; 58 struct block_device *bdev; /* block device handle */ 59 60 struct page *sb_page, *bb_page; 61 int sb_loaded; 62 __u64 sb_events; 63 sector_t data_offset; /* start of data in array */ 64 sector_t new_data_offset;/* only relevant while reshaping */ 65 sector_t sb_start; /* offset of the super block (in 512byte sectors) */ 66 int sb_size; /* bytes in the superblock */ 67 int preferred_minor; /* autorun support */ 68 69 struct kobject kobj; 70 71 /* A device can be in one of three states based on two flags: 72 * Not working: faulty==1 in_sync==0 73 * Fully working: faulty==0 in_sync==1 74 * Working, but not 75 * in sync with array 76 * faulty==0 in_sync==0 77 * 78 * It can never have faulty==1, in_sync==1 79 * This reduces the burden of testing multiple flags in many cases 80 */ 81 82 unsigned long flags; /* bit set of 'enum flag_bits' bits. */ 83 wait_queue_head_t blocked_wait; 84 85 int desc_nr; /* descriptor index in the superblock */ 86 int raid_disk; /* role of device in array */ 87 int new_raid_disk; /* role that the device will have in 88 * the array after a level-change completes. 89 */ 90 int saved_raid_disk; /* role that device used to have in the 91 * array and could again if we did a partial 92 * resync from the bitmap 93 */ 94 union { 95 sector_t recovery_offset;/* If this device has been partially 96 * recovered, this is where we were 97 * up to. 98 */ 99 sector_t journal_tail; /* If this device is a journal device, 100 * this is the journal tail (journal 101 * recovery start point) 102 */ 103 }; 104 105 atomic_t nr_pending; /* number of pending requests. 106 * only maintained for arrays that 107 * support hot removal 108 */ 109 atomic_t read_errors; /* number of consecutive read errors that 110 * we have tried to ignore. 111 */ 112 time64_t last_read_error; /* monotonic time since our 113 * last read error 114 */ 115 atomic_t corrected_errors; /* number of corrected read errors, 116 * for reporting to userspace and storing 117 * in superblock. 118 */ 119 struct work_struct del_work; /* used for delayed sysfs removal */ 120 121 struct kernfs_node *sysfs_state; /* handle for 'state' 122 * sysfs entry */ 123 124 struct badblocks badblocks; 125 126 struct { 127 short offset; /* Offset from superblock to start of PPL. 128 * Not used by external metadata. */ 129 unsigned int size; /* Size in sectors of the PPL space */ 130 sector_t sector; /* First sector of the PPL space */ 131 } ppl; 132 }; 133 enum flag_bits { 134 Faulty, /* device is known to have a fault */ 135 In_sync, /* device is in_sync with rest of array */ 136 Bitmap_sync, /* ..actually, not quite In_sync. Need a 137 * bitmap-based recovery to get fully in sync. 138 * The bit is only meaningful before device 139 * has been passed to pers->hot_add_disk. 140 */ 141 WriteMostly, /* Avoid reading if at all possible */ 142 AutoDetected, /* added by auto-detect */ 143 Blocked, /* An error occurred but has not yet 144 * been acknowledged by the metadata 145 * handler, so don't allow writes 146 * until it is cleared */ 147 WriteErrorSeen, /* A write error has been seen on this 148 * device 149 */ 150 FaultRecorded, /* Intermediate state for clearing 151 * Blocked. The Fault is/will-be 152 * recorded in the metadata, but that 153 * metadata hasn't been stored safely 154 * on disk yet. 155 */ 156 BlockedBadBlocks, /* A writer is blocked because they 157 * found an unacknowledged bad-block. 158 * This can safely be cleared at any 159 * time, and the writer will re-check. 160 * It may be set at any time, and at 161 * worst the writer will timeout and 162 * re-check. So setting it as 163 * accurately as possible is good, but 164 * not absolutely critical. 165 */ 166 WantReplacement, /* This device is a candidate to be 167 * hot-replaced, either because it has 168 * reported some faults, or because 169 * of explicit request. 170 */ 171 Replacement, /* This device is a replacement for 172 * a want_replacement device with same 173 * raid_disk number. 174 */ 175 Candidate, /* For clustered environments only: 176 * This device is seen locally but not 177 * by the whole cluster 178 */ 179 Journal, /* This device is used as journal for 180 * raid-5/6. 181 * Usually, this device should be faster 182 * than other devices in the array 183 */ 184 ClusterRemove, 185 RemoveSynchronized, /* synchronize_rcu() was called after 186 * this device was known to be faulty, 187 * so it is safe to remove without 188 * another synchronize_rcu() call. 189 */ 190 ExternalBbl, /* External metadata provides bad 191 * block management for a disk 192 */ 193 FailFast, /* Minimal retries should be attempted on 194 * this device, so use REQ_FAILFAST_DEV. 195 * Also don't try to repair failed reads. 196 * It is expects that no bad block log 197 * is present. 198 */ 199 LastDev, /* Seems to be the last working dev as 200 * it didn't fail, so don't use FailFast 201 * any more for metadata 202 */ 203 }; 204 205 static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, 206 sector_t *first_bad, int *bad_sectors) 207 { 208 if (unlikely(rdev->badblocks.count)) { 209 int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s, 210 sectors, 211 first_bad, bad_sectors); 212 if (rv) 213 *first_bad -= rdev->data_offset; 214 return rv; 215 } 216 return 0; 217 } 218 extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 219 int is_new); 220 extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 221 int is_new); 222 struct md_cluster_info; 223 224 /* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */ 225 enum mddev_flags { 226 MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */ 227 MD_CLOSING, /* If set, we are closing the array, do not open 228 * it then */ 229 MD_JOURNAL_CLEAN, /* A raid with journal is already clean */ 230 MD_HAS_JOURNAL, /* The raid array has journal feature set */ 231 MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node 232 * already took resync lock, need to 233 * release the lock */ 234 MD_FAILFAST_SUPPORTED, /* Using MD_FAILFAST on metadata writes is 235 * supported as calls to md_error() will 236 * never cause the array to become failed. 237 */ 238 MD_HAS_PPL, /* The raid array has PPL feature set */ 239 MD_HAS_MULTIPLE_PPLS, /* The raid array has multiple PPLs feature set */ 240 }; 241 242 enum mddev_sb_flags { 243 MD_SB_CHANGE_DEVS, /* Some device status has changed */ 244 MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */ 245 MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */ 246 MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */ 247 }; 248 249 struct mddev { 250 void *private; 251 struct md_personality *pers; 252 dev_t unit; 253 int md_minor; 254 struct list_head disks; 255 unsigned long flags; 256 unsigned long sb_flags; 257 258 int suspended; 259 atomic_t active_io; 260 int ro; 261 int sysfs_active; /* set when sysfs deletes 262 * are happening, so run/ 263 * takeover/stop are not safe 264 */ 265 struct gendisk *gendisk; 266 267 struct kobject kobj; 268 int hold_active; 269 #define UNTIL_IOCTL 1 270 #define UNTIL_STOP 2 271 272 /* Superblock information */ 273 int major_version, 274 minor_version, 275 patch_version; 276 int persistent; 277 int external; /* metadata is 278 * managed externally */ 279 char metadata_type[17]; /* externally set*/ 280 int chunk_sectors; 281 time64_t ctime, utime; 282 int level, layout; 283 char clevel[16]; 284 int raid_disks; 285 int max_disks; 286 sector_t dev_sectors; /* used size of 287 * component devices */ 288 sector_t array_sectors; /* exported array size */ 289 int external_size; /* size managed 290 * externally */ 291 __u64 events; 292 /* If the last 'event' was simply a clean->dirty transition, and 293 * we didn't write it to the spares, then it is safe and simple 294 * to just decrement the event count on a dirty->clean transition. 295 * So we record that possibility here. 296 */ 297 int can_decrease_events; 298 299 char uuid[16]; 300 301 /* If the array is being reshaped, we need to record the 302 * new shape and an indication of where we are up to. 303 * This is written to the superblock. 304 * If reshape_position is MaxSector, then no reshape is happening (yet). 305 */ 306 sector_t reshape_position; 307 int delta_disks, new_level, new_layout; 308 int new_chunk_sectors; 309 int reshape_backwards; 310 311 struct md_thread *thread; /* management thread */ 312 struct md_thread *sync_thread; /* doing resync or reconstruct */ 313 314 /* 'last_sync_action' is initialized to "none". It is set when a 315 * sync operation (i.e "data-check", "requested-resync", "resync", 316 * "recovery", or "reshape") is started. It holds this value even 317 * when the sync thread is "frozen" (interrupted) or "idle" (stopped 318 * or finished). It is overwritten when a new sync operation is begun. 319 */ 320 char *last_sync_action; 321 sector_t curr_resync; /* last block scheduled */ 322 /* As resync requests can complete out of order, we cannot easily track 323 * how much resync has been completed. So we occasionally pause until 324 * everything completes, then set curr_resync_completed to curr_resync. 325 * As such it may be well behind the real resync mark, but it is a value 326 * we are certain of. 327 */ 328 sector_t curr_resync_completed; 329 unsigned long resync_mark; /* a recent timestamp */ 330 sector_t resync_mark_cnt;/* blocks written at resync_mark */ 331 sector_t curr_mark_cnt; /* blocks scheduled now */ 332 333 sector_t resync_max_sectors; /* may be set by personality */ 334 335 atomic64_t resync_mismatches; /* count of sectors where 336 * parity/replica mismatch found 337 */ 338 339 /* allow user-space to request suspension of IO to regions of the array */ 340 sector_t suspend_lo; 341 sector_t suspend_hi; 342 /* if zero, use the system-wide default */ 343 int sync_speed_min; 344 int sync_speed_max; 345 346 /* resync even though the same disks are shared among md-devices */ 347 int parallel_resync; 348 349 int ok_start_degraded; 350 351 unsigned long recovery; 352 /* If a RAID personality determines that recovery (of a particular 353 * device) will fail due to a read error on the source device, it 354 * takes a copy of this number and does not attempt recovery again 355 * until this number changes. 356 */ 357 int recovery_disabled; 358 359 int in_sync; /* know to not need resync */ 360 /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so 361 * that we are never stopping an array while it is open. 362 * 'reconfig_mutex' protects all other reconfiguration. 363 * These locks are separate due to conflicting interactions 364 * with bdev->bd_mutex. 365 * Lock ordering is: 366 * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk 367 * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open 368 */ 369 struct mutex open_mutex; 370 struct mutex reconfig_mutex; 371 atomic_t active; /* general refcount */ 372 atomic_t openers; /* number of active opens */ 373 374 int changed; /* True if we might need to 375 * reread partition info */ 376 int degraded; /* whether md should consider 377 * adding a spare 378 */ 379 380 atomic_t recovery_active; /* blocks scheduled, but not written */ 381 wait_queue_head_t recovery_wait; 382 sector_t recovery_cp; 383 sector_t resync_min; /* user requested sync 384 * starts here */ 385 sector_t resync_max; /* resync should pause 386 * when it gets here */ 387 388 struct kernfs_node *sysfs_state; /* handle for 'array_state' 389 * file in sysfs. 390 */ 391 struct kernfs_node *sysfs_action; /* handle for 'sync_action' */ 392 393 struct work_struct del_work; /* used for delayed sysfs removal */ 394 395 /* "lock" protects: 396 * flush_bio transition from NULL to !NULL 397 * rdev superblocks, events 398 * clearing MD_CHANGE_* 399 * in_sync - and related safemode and MD_CHANGE changes 400 * pers (also protected by reconfig_mutex and pending IO). 401 * clearing ->bitmap 402 * clearing ->bitmap_info.file 403 * changing ->resync_{min,max} 404 * setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max}) 405 */ 406 spinlock_t lock; 407 wait_queue_head_t sb_wait; /* for waiting on superblock updates */ 408 atomic_t pending_writes; /* number of active superblock writes */ 409 410 unsigned int safemode; /* if set, update "clean" superblock 411 * when no writes pending. 412 */ 413 unsigned int safemode_delay; 414 struct timer_list safemode_timer; 415 struct percpu_ref writes_pending; 416 int sync_checkers; /* # of threads checking writes_pending */ 417 struct request_queue *queue; /* for plugging ... */ 418 419 struct bitmap *bitmap; /* the bitmap for the device */ 420 struct { 421 struct file *file; /* the bitmap file */ 422 loff_t offset; /* offset from superblock of 423 * start of bitmap. May be 424 * negative, but not '0' 425 * For external metadata, offset 426 * from start of device. 427 */ 428 unsigned long space; /* space available at this offset */ 429 loff_t default_offset; /* this is the offset to use when 430 * hot-adding a bitmap. It should 431 * eventually be settable by sysfs. 432 */ 433 unsigned long default_space; /* space available at 434 * default offset */ 435 struct mutex mutex; 436 unsigned long chunksize; 437 unsigned long daemon_sleep; /* how many jiffies between updates? */ 438 unsigned long max_write_behind; /* write-behind mode */ 439 int external; 440 int nodes; /* Maximum number of nodes in the cluster */ 441 char cluster_name[64]; /* Name of the cluster */ 442 } bitmap_info; 443 444 atomic_t max_corr_read_errors; /* max read retries */ 445 struct list_head all_mddevs; 446 447 struct attribute_group *to_remove; 448 449 struct bio_set *bio_set; 450 struct bio_set *sync_set; /* for sync operations like 451 * metadata and bitmap writes 452 */ 453 454 /* Generic flush handling. 455 * The last to finish preflush schedules a worker to submit 456 * the rest of the request (without the REQ_PREFLUSH flag). 457 */ 458 struct bio *flush_bio; 459 atomic_t flush_pending; 460 struct work_struct flush_work; 461 struct work_struct event_work; /* used by dm to report failure event */ 462 void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); 463 struct md_cluster_info *cluster_info; 464 unsigned int good_device_nr; /* good device num within cluster raid */ 465 }; 466 467 enum recovery_flags { 468 /* 469 * If neither SYNC or RESHAPE are set, then it is a recovery. 470 */ 471 MD_RECOVERY_RUNNING, /* a thread is running, or about to be started */ 472 MD_RECOVERY_SYNC, /* actually doing a resync, not a recovery */ 473 MD_RECOVERY_RECOVER, /* doing recovery, or need to try it. */ 474 MD_RECOVERY_INTR, /* resync needs to be aborted for some reason */ 475 MD_RECOVERY_DONE, /* thread is done and is waiting to be reaped */ 476 MD_RECOVERY_NEEDED, /* we might need to start a resync/recover */ 477 MD_RECOVERY_REQUESTED, /* user-space has requested a sync (used with SYNC) */ 478 MD_RECOVERY_CHECK, /* user-space request for check-only, no repair */ 479 MD_RECOVERY_RESHAPE, /* A reshape is happening */ 480 MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */ 481 MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */ 482 }; 483 484 static inline int __must_check mddev_lock(struct mddev *mddev) 485 { 486 return mutex_lock_interruptible(&mddev->reconfig_mutex); 487 } 488 489 /* Sometimes we need to take the lock in a situation where 490 * failure due to interrupts is not acceptable. 491 */ 492 static inline void mddev_lock_nointr(struct mddev *mddev) 493 { 494 mutex_lock(&mddev->reconfig_mutex); 495 } 496 497 static inline int mddev_is_locked(struct mddev *mddev) 498 { 499 return mutex_is_locked(&mddev->reconfig_mutex); 500 } 501 502 static inline int mddev_trylock(struct mddev *mddev) 503 { 504 return mutex_trylock(&mddev->reconfig_mutex); 505 } 506 extern void mddev_unlock(struct mddev *mddev); 507 508 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) 509 { 510 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); 511 } 512 513 static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) 514 { 515 atomic_add(nr_sectors, &bio->bi_disk->sync_io); 516 } 517 518 struct md_personality 519 { 520 char *name; 521 int level; 522 struct list_head list; 523 struct module *owner; 524 bool (*make_request)(struct mddev *mddev, struct bio *bio); 525 int (*run)(struct mddev *mddev); 526 void (*free)(struct mddev *mddev, void *priv); 527 void (*status)(struct seq_file *seq, struct mddev *mddev); 528 /* error_handler must set ->faulty and clear ->in_sync 529 * if appropriate, and should abort recovery if needed 530 */ 531 void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev); 532 int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev); 533 int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev); 534 int (*spare_active) (struct mddev *mddev); 535 sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped); 536 int (*resize) (struct mddev *mddev, sector_t sectors); 537 sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks); 538 int (*check_reshape) (struct mddev *mddev); 539 int (*start_reshape) (struct mddev *mddev); 540 void (*finish_reshape) (struct mddev *mddev); 541 /* quiesce moves between quiescence states 542 * 0 - fully active 543 * 1 - no new requests allowed 544 * others - reserved 545 */ 546 void (*quiesce) (struct mddev *mddev, int state); 547 /* takeover is used to transition an array from one 548 * personality to another. The new personality must be able 549 * to handle the data in the current layout. 550 * e.g. 2drive raid1 -> 2drive raid5 551 * ndrive raid5 -> degraded n+1drive raid6 with special layout 552 * If the takeover succeeds, a new 'private' structure is returned. 553 * This needs to be installed and then ->run used to activate the 554 * array. 555 */ 556 void *(*takeover) (struct mddev *mddev); 557 /* congested implements bdi.congested_fn(). 558 * Will not be called while array is 'suspended' */ 559 int (*congested)(struct mddev *mddev, int bits); 560 /* Changes the consistency policy of an active array. */ 561 int (*change_consistency_policy)(struct mddev *mddev, const char *buf); 562 }; 563 564 struct md_sysfs_entry { 565 struct attribute attr; 566 ssize_t (*show)(struct mddev *, char *); 567 ssize_t (*store)(struct mddev *, const char *, size_t); 568 }; 569 extern struct attribute_group md_bitmap_group; 570 571 static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name) 572 { 573 if (sd) 574 return sysfs_get_dirent(sd, name); 575 return sd; 576 } 577 static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd) 578 { 579 if (sd) 580 sysfs_notify_dirent(sd); 581 } 582 583 static inline char * mdname (struct mddev * mddev) 584 { 585 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; 586 } 587 588 static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) 589 { 590 char nm[20]; 591 if (!test_bit(Replacement, &rdev->flags) && 592 !test_bit(Journal, &rdev->flags) && 593 mddev->kobj.sd) { 594 sprintf(nm, "rd%d", rdev->raid_disk); 595 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 596 } else 597 return 0; 598 } 599 600 static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) 601 { 602 char nm[20]; 603 if (!test_bit(Replacement, &rdev->flags) && 604 !test_bit(Journal, &rdev->flags) && 605 mddev->kobj.sd) { 606 sprintf(nm, "rd%d", rdev->raid_disk); 607 sysfs_remove_link(&mddev->kobj, nm); 608 } 609 } 610 611 /* 612 * iterates through some rdev ringlist. It's safe to remove the 613 * current 'rdev'. Dont touch 'tmp' though. 614 */ 615 #define rdev_for_each_list(rdev, tmp, head) \ 616 list_for_each_entry_safe(rdev, tmp, head, same_set) 617 618 /* 619 * iterates through the 'same array disks' ringlist 620 */ 621 #define rdev_for_each(rdev, mddev) \ 622 list_for_each_entry(rdev, &((mddev)->disks), same_set) 623 624 #define rdev_for_each_safe(rdev, tmp, mddev) \ 625 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set) 626 627 #define rdev_for_each_rcu(rdev, mddev) \ 628 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) 629 630 struct md_thread { 631 void (*run) (struct md_thread *thread); 632 struct mddev *mddev; 633 wait_queue_head_t wqueue; 634 unsigned long flags; 635 struct task_struct *tsk; 636 unsigned long timeout; 637 void *private; 638 }; 639 640 #define THREAD_WAKEUP 0 641 642 static inline void safe_put_page(struct page *p) 643 { 644 if (p) put_page(p); 645 } 646 647 extern int register_md_personality(struct md_personality *p); 648 extern int unregister_md_personality(struct md_personality *p); 649 extern int register_md_cluster_operations(struct md_cluster_operations *ops, 650 struct module *module); 651 extern int unregister_md_cluster_operations(void); 652 extern int md_setup_cluster(struct mddev *mddev, int nodes); 653 extern void md_cluster_stop(struct mddev *mddev); 654 extern struct md_thread *md_register_thread( 655 void (*run)(struct md_thread *thread), 656 struct mddev *mddev, 657 const char *name); 658 extern void md_unregister_thread(struct md_thread **threadp); 659 extern void md_wakeup_thread(struct md_thread *thread); 660 extern void md_check_recovery(struct mddev *mddev); 661 extern void md_reap_sync_thread(struct mddev *mddev); 662 extern int mddev_init_writes_pending(struct mddev *mddev); 663 extern bool md_write_start(struct mddev *mddev, struct bio *bi); 664 extern void md_write_inc(struct mddev *mddev, struct bio *bi); 665 extern void md_write_end(struct mddev *mddev); 666 extern void md_done_sync(struct mddev *mddev, int blocks, int ok); 667 extern void md_error(struct mddev *mddev, struct md_rdev *rdev); 668 extern void md_finish_reshape(struct mddev *mddev); 669 670 extern int mddev_congested(struct mddev *mddev, int bits); 671 extern void md_flush_request(struct mddev *mddev, struct bio *bio); 672 extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, 673 sector_t sector, int size, struct page *page); 674 extern int md_super_wait(struct mddev *mddev); 675 extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 676 struct page *page, int op, int op_flags, 677 bool metadata_op); 678 extern void md_do_sync(struct md_thread *thread); 679 extern void md_new_event(struct mddev *mddev); 680 extern void md_allow_write(struct mddev *mddev); 681 extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); 682 extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); 683 extern int md_check_no_bitmap(struct mddev *mddev); 684 extern int md_integrity_register(struct mddev *mddev); 685 extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev); 686 extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); 687 688 extern void mddev_init(struct mddev *mddev); 689 extern int md_run(struct mddev *mddev); 690 extern void md_stop(struct mddev *mddev); 691 extern void md_stop_writes(struct mddev *mddev); 692 extern int md_rdev_init(struct md_rdev *rdev); 693 extern void md_rdev_clear(struct md_rdev *rdev); 694 695 extern void mddev_suspend(struct mddev *mddev); 696 extern void mddev_resume(struct mddev *mddev); 697 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 698 struct mddev *mddev); 699 700 extern void md_reload_sb(struct mddev *mddev, int raid_disk); 701 extern void md_update_sb(struct mddev *mddev, int force); 702 extern void md_kick_rdev_from_array(struct md_rdev * rdev); 703 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); 704 705 static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) 706 { 707 int faulty = test_bit(Faulty, &rdev->flags); 708 if (atomic_dec_and_test(&rdev->nr_pending) && faulty) { 709 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 710 md_wakeup_thread(mddev->thread); 711 } 712 } 713 714 extern struct md_cluster_operations *md_cluster_ops; 715 static inline int mddev_is_clustered(struct mddev *mddev) 716 { 717 return mddev->cluster_info && mddev->bitmap_info.nodes > 1; 718 } 719 720 /* clear unsupported mddev_flags */ 721 static inline void mddev_clear_unsupported_flags(struct mddev *mddev, 722 unsigned long unsupported_flags) 723 { 724 mddev->flags &= ~unsupported_flags; 725 } 726 727 static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio) 728 { 729 if (bio_op(bio) == REQ_OP_WRITE_SAME && 730 !bio->bi_disk->queue->limits.max_write_same_sectors) 731 mddev->queue->limits.max_write_same_sectors = 0; 732 } 733 734 static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio) 735 { 736 if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 737 !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 738 mddev->queue->limits.max_write_zeroes_sectors = 0; 739 } 740 #endif /* _MD_MD_H */ 741