xref: /openbmc/linux/drivers/md/md.h (revision dc6a81c3)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3    md.h : kernel internal structure of the Linux MD driver
4           Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
5 
6 */
7 
8 #ifndef _MD_MD_H
9 #define _MD_MD_H
10 
11 #include <linux/blkdev.h>
12 #include <linux/backing-dev.h>
13 #include <linux/badblocks.h>
14 #include <linux/kobject.h>
15 #include <linux/list.h>
16 #include <linux/mm.h>
17 #include <linux/mutex.h>
18 #include <linux/timer.h>
19 #include <linux/wait.h>
20 #include <linux/workqueue.h>
21 #include "md-cluster.h"
22 
23 #define MaxSector (~(sector_t)0)
24 
25 /*
26  * These flags should really be called "NO_RETRY" rather than
27  * "FAILFAST" because they don't make any promise about time lapse,
28  * only about the number of retries, which will be zero.
29  * REQ_FAILFAST_DRIVER is not included because
30  * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.")
31  * seems to suggest that the errors it avoids retrying should usually
32  * be retried.
33  */
34 #define	MD_FAILFAST	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
35 
36 /*
37  * The struct embedded in rdev is used to serialize IO.
38  */
39 struct serial_in_rdev {
40 	struct rb_root_cached serial_rb;
41 	spinlock_t serial_lock;
42 	wait_queue_head_t serial_io_wait;
43 };
44 
45 /*
46  * MD's 'extended' device
47  */
48 struct md_rdev {
49 	struct list_head same_set;	/* RAID devices within the same set */
50 
51 	sector_t sectors;		/* Device size (in 512bytes sectors) */
52 	struct mddev *mddev;		/* RAID array if running */
53 	int last_events;		/* IO event timestamp */
54 
55 	/*
56 	 * If meta_bdev is non-NULL, it means that a separate device is
57 	 * being used to store the metadata (superblock/bitmap) which
58 	 * would otherwise be contained on the same device as the data (bdev).
59 	 */
60 	struct block_device *meta_bdev;
61 	struct block_device *bdev;	/* block device handle */
62 
63 	struct page	*sb_page, *bb_page;
64 	int		sb_loaded;
65 	__u64		sb_events;
66 	sector_t	data_offset;	/* start of data in array */
67 	sector_t	new_data_offset;/* only relevant while reshaping */
68 	sector_t	sb_start;	/* offset of the super block (in 512byte sectors) */
69 	int		sb_size;	/* bytes in the superblock */
70 	int		preferred_minor;	/* autorun support */
71 
72 	struct kobject	kobj;
73 
74 	/* A device can be in one of three states based on two flags:
75 	 * Not working:   faulty==1 in_sync==0
76 	 * Fully working: faulty==0 in_sync==1
77 	 * Working, but not
78 	 * in sync with array
79 	 *                faulty==0 in_sync==0
80 	 *
81 	 * It can never have faulty==1, in_sync==1
82 	 * This reduces the burden of testing multiple flags in many cases
83 	 */
84 
85 	unsigned long	flags;	/* bit set of 'enum flag_bits' bits. */
86 	wait_queue_head_t blocked_wait;
87 
88 	int desc_nr;			/* descriptor index in the superblock */
89 	int raid_disk;			/* role of device in array */
90 	int new_raid_disk;		/* role that the device will have in
91 					 * the array after a level-change completes.
92 					 */
93 	int saved_raid_disk;		/* role that device used to have in the
94 					 * array and could again if we did a partial
95 					 * resync from the bitmap
96 					 */
97 	union {
98 		sector_t recovery_offset;/* If this device has been partially
99 					 * recovered, this is where we were
100 					 * up to.
101 					 */
102 		sector_t journal_tail;	/* If this device is a journal device,
103 					 * this is the journal tail (journal
104 					 * recovery start point)
105 					 */
106 	};
107 
108 	atomic_t	nr_pending;	/* number of pending requests.
109 					 * only maintained for arrays that
110 					 * support hot removal
111 					 */
112 	atomic_t	read_errors;	/* number of consecutive read errors that
113 					 * we have tried to ignore.
114 					 */
115 	time64_t	last_read_error;	/* monotonic time since our
116 						 * last read error
117 						 */
118 	atomic_t	corrected_errors; /* number of corrected read errors,
119 					   * for reporting to userspace and storing
120 					   * in superblock.
121 					   */
122 
123 	struct serial_in_rdev *serial;  /* used for raid1 io serialization */
124 
125 	struct work_struct del_work;	/* used for delayed sysfs removal */
126 
127 	struct kernfs_node *sysfs_state; /* handle for 'state'
128 					   * sysfs entry */
129 
130 	struct badblocks badblocks;
131 
132 	struct {
133 		short offset;	/* Offset from superblock to start of PPL.
134 				 * Not used by external metadata. */
135 		unsigned int size;	/* Size in sectors of the PPL space */
136 		sector_t sector;	/* First sector of the PPL space */
137 	} ppl;
138 };
139 enum flag_bits {
140 	Faulty,			/* device is known to have a fault */
141 	In_sync,		/* device is in_sync with rest of array */
142 	Bitmap_sync,		/* ..actually, not quite In_sync.  Need a
143 				 * bitmap-based recovery to get fully in sync.
144 				 * The bit is only meaningful before device
145 				 * has been passed to pers->hot_add_disk.
146 				 */
147 	WriteMostly,		/* Avoid reading if at all possible */
148 	AutoDetected,		/* added by auto-detect */
149 	Blocked,		/* An error occurred but has not yet
150 				 * been acknowledged by the metadata
151 				 * handler, so don't allow writes
152 				 * until it is cleared */
153 	WriteErrorSeen,		/* A write error has been seen on this
154 				 * device
155 				 */
156 	FaultRecorded,		/* Intermediate state for clearing
157 				 * Blocked.  The Fault is/will-be
158 				 * recorded in the metadata, but that
159 				 * metadata hasn't been stored safely
160 				 * on disk yet.
161 				 */
162 	BlockedBadBlocks,	/* A writer is blocked because they
163 				 * found an unacknowledged bad-block.
164 				 * This can safely be cleared at any
165 				 * time, and the writer will re-check.
166 				 * It may be set at any time, and at
167 				 * worst the writer will timeout and
168 				 * re-check.  So setting it as
169 				 * accurately as possible is good, but
170 				 * not absolutely critical.
171 				 */
172 	WantReplacement,	/* This device is a candidate to be
173 				 * hot-replaced, either because it has
174 				 * reported some faults, or because
175 				 * of explicit request.
176 				 */
177 	Replacement,		/* This device is a replacement for
178 				 * a want_replacement device with same
179 				 * raid_disk number.
180 				 */
181 	Candidate,		/* For clustered environments only:
182 				 * This device is seen locally but not
183 				 * by the whole cluster
184 				 */
185 	Journal,		/* This device is used as journal for
186 				 * raid-5/6.
187 				 * Usually, this device should be faster
188 				 * than other devices in the array
189 				 */
190 	ClusterRemove,
191 	RemoveSynchronized,	/* synchronize_rcu() was called after
192 				 * this device was known to be faulty,
193 				 * so it is safe to remove without
194 				 * another synchronize_rcu() call.
195 				 */
196 	ExternalBbl,            /* External metadata provides bad
197 				 * block management for a disk
198 				 */
199 	FailFast,		/* Minimal retries should be attempted on
200 				 * this device, so use REQ_FAILFAST_DEV.
201 				 * Also don't try to repair failed reads.
202 				 * It is expects that no bad block log
203 				 * is present.
204 				 */
205 	LastDev,		/* Seems to be the last working dev as
206 				 * it didn't fail, so don't use FailFast
207 				 * any more for metadata
208 				 */
209 	CollisionCheck,		/*
210 				 * check if there is collision between raid1
211 				 * serial bios.
212 				 */
213 };
214 
215 static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
216 			      sector_t *first_bad, int *bad_sectors)
217 {
218 	if (unlikely(rdev->badblocks.count)) {
219 		int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s,
220 					sectors,
221 					first_bad, bad_sectors);
222 		if (rv)
223 			*first_bad -= rdev->data_offset;
224 		return rv;
225 	}
226 	return 0;
227 }
228 extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
229 			      int is_new);
230 extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
231 				int is_new);
232 struct md_cluster_info;
233 
234 /* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */
235 enum mddev_flags {
236 	MD_ARRAY_FIRST_USE,	/* First use of array, needs initialization */
237 	MD_CLOSING,		/* If set, we are closing the array, do not open
238 				 * it then */
239 	MD_JOURNAL_CLEAN,	/* A raid with journal is already clean */
240 	MD_HAS_JOURNAL,		/* The raid array has journal feature set */
241 	MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node
242 				   * already took resync lock, need to
243 				   * release the lock */
244 	MD_FAILFAST_SUPPORTED,	/* Using MD_FAILFAST on metadata writes is
245 				 * supported as calls to md_error() will
246 				 * never cause the array to become failed.
247 				 */
248 	MD_HAS_PPL,		/* The raid array has PPL feature set */
249 	MD_HAS_MULTIPLE_PPLS,	/* The raid array has multiple PPLs feature set */
250 	MD_ALLOW_SB_UPDATE,	/* md_check_recovery is allowed to update
251 				 * the metadata without taking reconfig_mutex.
252 				 */
253 	MD_UPDATING_SB,		/* md_check_recovery is updating the metadata
254 				 * without explicitly holding reconfig_mutex.
255 				 */
256 	MD_NOT_READY,		/* do_md_run() is active, so 'array_state'
257 				 * must not report that array is ready yet
258 				 */
259 	MD_BROKEN,              /* This is used in RAID-0/LINEAR only, to stop
260 				 * I/O in case an array member is gone/failed.
261 				 */
262 };
263 
264 enum mddev_sb_flags {
265 	MD_SB_CHANGE_DEVS,		/* Some device status has changed */
266 	MD_SB_CHANGE_CLEAN,	/* transition to or from 'clean' */
267 	MD_SB_CHANGE_PENDING,	/* switch from 'clean' to 'active' in progress */
268 	MD_SB_NEED_REWRITE,	/* metadata write needs to be repeated */
269 };
270 
271 #define NR_SERIAL_INFOS		8
272 /* record current range of serialize IOs */
273 struct serial_info {
274 	struct rb_node node;
275 	sector_t start;		/* start sector of rb node */
276 	sector_t last;		/* end sector of rb node */
277 	sector_t _subtree_last; /* highest sector in subtree of rb node */
278 };
279 
280 struct mddev {
281 	void				*private;
282 	struct md_personality		*pers;
283 	dev_t				unit;
284 	int				md_minor;
285 	struct list_head		disks;
286 	unsigned long			flags;
287 	unsigned long			sb_flags;
288 
289 	int				suspended;
290 	atomic_t			active_io;
291 	int				ro;
292 	int				sysfs_active; /* set when sysfs deletes
293 						       * are happening, so run/
294 						       * takeover/stop are not safe
295 						       */
296 	struct gendisk			*gendisk;
297 
298 	struct kobject			kobj;
299 	int				hold_active;
300 #define	UNTIL_IOCTL	1
301 #define	UNTIL_STOP	2
302 
303 	/* Superblock information */
304 	int				major_version,
305 					minor_version,
306 					patch_version;
307 	int				persistent;
308 	int				external;	/* metadata is
309 							 * managed externally */
310 	char				metadata_type[17]; /* externally set*/
311 	int				chunk_sectors;
312 	time64_t			ctime, utime;
313 	int				level, layout;
314 	char				clevel[16];
315 	int				raid_disks;
316 	int				max_disks;
317 	sector_t			dev_sectors;	/* used size of
318 							 * component devices */
319 	sector_t			array_sectors; /* exported array size */
320 	int				external_size; /* size managed
321 							* externally */
322 	__u64				events;
323 	/* If the last 'event' was simply a clean->dirty transition, and
324 	 * we didn't write it to the spares, then it is safe and simple
325 	 * to just decrement the event count on a dirty->clean transition.
326 	 * So we record that possibility here.
327 	 */
328 	int				can_decrease_events;
329 
330 	char				uuid[16];
331 
332 	/* If the array is being reshaped, we need to record the
333 	 * new shape and an indication of where we are up to.
334 	 * This is written to the superblock.
335 	 * If reshape_position is MaxSector, then no reshape is happening (yet).
336 	 */
337 	sector_t			reshape_position;
338 	int				delta_disks, new_level, new_layout;
339 	int				new_chunk_sectors;
340 	int				reshape_backwards;
341 
342 	struct md_thread		*thread;	/* management thread */
343 	struct md_thread		*sync_thread;	/* doing resync or reconstruct */
344 
345 	/* 'last_sync_action' is initialized to "none".  It is set when a
346 	 * sync operation (i.e "data-check", "requested-resync", "resync",
347 	 * "recovery", or "reshape") is started.  It holds this value even
348 	 * when the sync thread is "frozen" (interrupted) or "idle" (stopped
349 	 * or finished).  It is overwritten when a new sync operation is begun.
350 	 */
351 	char				*last_sync_action;
352 	sector_t			curr_resync;	/* last block scheduled */
353 	/* As resync requests can complete out of order, we cannot easily track
354 	 * how much resync has been completed.  So we occasionally pause until
355 	 * everything completes, then set curr_resync_completed to curr_resync.
356 	 * As such it may be well behind the real resync mark, but it is a value
357 	 * we are certain of.
358 	 */
359 	sector_t			curr_resync_completed;
360 	unsigned long			resync_mark;	/* a recent timestamp */
361 	sector_t			resync_mark_cnt;/* blocks written at resync_mark */
362 	sector_t			curr_mark_cnt; /* blocks scheduled now */
363 
364 	sector_t			resync_max_sectors; /* may be set by personality */
365 
366 	atomic64_t			resync_mismatches; /* count of sectors where
367 							    * parity/replica mismatch found
368 							    */
369 
370 	/* allow user-space to request suspension of IO to regions of the array */
371 	sector_t			suspend_lo;
372 	sector_t			suspend_hi;
373 	/* if zero, use the system-wide default */
374 	int				sync_speed_min;
375 	int				sync_speed_max;
376 
377 	/* resync even though the same disks are shared among md-devices */
378 	int				parallel_resync;
379 
380 	int				ok_start_degraded;
381 
382 	unsigned long			recovery;
383 	/* If a RAID personality determines that recovery (of a particular
384 	 * device) will fail due to a read error on the source device, it
385 	 * takes a copy of this number and does not attempt recovery again
386 	 * until this number changes.
387 	 */
388 	int				recovery_disabled;
389 
390 	int				in_sync;	/* know to not need resync */
391 	/* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
392 	 * that we are never stopping an array while it is open.
393 	 * 'reconfig_mutex' protects all other reconfiguration.
394 	 * These locks are separate due to conflicting interactions
395 	 * with bdev->bd_mutex.
396 	 * Lock ordering is:
397 	 *  reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
398 	 *  bd_mutex -> open_mutex:  e.g. __blkdev_get -> md_open
399 	 */
400 	struct mutex			open_mutex;
401 	struct mutex			reconfig_mutex;
402 	atomic_t			active;		/* general refcount */
403 	atomic_t			openers;	/* number of active opens */
404 
405 	int				changed;	/* True if we might need to
406 							 * reread partition info */
407 	int				degraded;	/* whether md should consider
408 							 * adding a spare
409 							 */
410 
411 	atomic_t			recovery_active; /* blocks scheduled, but not written */
412 	wait_queue_head_t		recovery_wait;
413 	sector_t			recovery_cp;
414 	sector_t			resync_min;	/* user requested sync
415 							 * starts here */
416 	sector_t			resync_max;	/* resync should pause
417 							 * when it gets here */
418 
419 	struct kernfs_node		*sysfs_state;	/* handle for 'array_state'
420 							 * file in sysfs.
421 							 */
422 	struct kernfs_node		*sysfs_action;  /* handle for 'sync_action' */
423 
424 	struct work_struct del_work;	/* used for delayed sysfs removal */
425 
426 	/* "lock" protects:
427 	 *   flush_bio transition from NULL to !NULL
428 	 *   rdev superblocks, events
429 	 *   clearing MD_CHANGE_*
430 	 *   in_sync - and related safemode and MD_CHANGE changes
431 	 *   pers (also protected by reconfig_mutex and pending IO).
432 	 *   clearing ->bitmap
433 	 *   clearing ->bitmap_info.file
434 	 *   changing ->resync_{min,max}
435 	 *   setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max})
436 	 */
437 	spinlock_t			lock;
438 	wait_queue_head_t		sb_wait;	/* for waiting on superblock updates */
439 	atomic_t			pending_writes;	/* number of active superblock writes */
440 
441 	unsigned int			safemode;	/* if set, update "clean" superblock
442 							 * when no writes pending.
443 							 */
444 	unsigned int			safemode_delay;
445 	struct timer_list		safemode_timer;
446 	struct percpu_ref		writes_pending;
447 	int				sync_checkers;	/* # of threads checking writes_pending */
448 	struct request_queue		*queue;	/* for plugging ... */
449 
450 	struct bitmap			*bitmap; /* the bitmap for the device */
451 	struct {
452 		struct file		*file; /* the bitmap file */
453 		loff_t			offset; /* offset from superblock of
454 						 * start of bitmap. May be
455 						 * negative, but not '0'
456 						 * For external metadata, offset
457 						 * from start of device.
458 						 */
459 		unsigned long		space; /* space available at this offset */
460 		loff_t			default_offset; /* this is the offset to use when
461 							 * hot-adding a bitmap.  It should
462 							 * eventually be settable by sysfs.
463 							 */
464 		unsigned long		default_space; /* space available at
465 							* default offset */
466 		struct mutex		mutex;
467 		unsigned long		chunksize;
468 		unsigned long		daemon_sleep; /* how many jiffies between updates? */
469 		unsigned long		max_write_behind; /* write-behind mode */
470 		int			external;
471 		int			nodes; /* Maximum number of nodes in the cluster */
472 		char                    cluster_name[64]; /* Name of the cluster */
473 	} bitmap_info;
474 
475 	atomic_t			max_corr_read_errors; /* max read retries */
476 	struct list_head		all_mddevs;
477 
478 	struct attribute_group		*to_remove;
479 
480 	struct bio_set			bio_set;
481 	struct bio_set			sync_set; /* for sync operations like
482 						   * metadata and bitmap writes
483 						   */
484 
485 	/* Generic flush handling.
486 	 * The last to finish preflush schedules a worker to submit
487 	 * the rest of the request (without the REQ_PREFLUSH flag).
488 	 */
489 	struct bio *flush_bio;
490 	atomic_t flush_pending;
491 	ktime_t start_flush, last_flush; /* last_flush is when the last completed
492 					  * flush was started.
493 					  */
494 	struct work_struct flush_work;
495 	struct work_struct event_work;	/* used by dm to report failure event */
496 	mempool_t *serial_info_pool;
497 	void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
498 	struct md_cluster_info		*cluster_info;
499 	unsigned int			good_device_nr;	/* good device num within cluster raid */
500 
501 	bool	has_superblocks:1;
502 	bool	fail_last_dev:1;
503 	bool	serialize_policy:1;
504 };
505 
506 enum recovery_flags {
507 	/*
508 	 * If neither SYNC or RESHAPE are set, then it is a recovery.
509 	 */
510 	MD_RECOVERY_RUNNING,	/* a thread is running, or about to be started */
511 	MD_RECOVERY_SYNC,	/* actually doing a resync, not a recovery */
512 	MD_RECOVERY_RECOVER,	/* doing recovery, or need to try it. */
513 	MD_RECOVERY_INTR,	/* resync needs to be aborted for some reason */
514 	MD_RECOVERY_DONE,	/* thread is done and is waiting to be reaped */
515 	MD_RECOVERY_NEEDED,	/* we might need to start a resync/recover */
516 	MD_RECOVERY_REQUESTED,	/* user-space has requested a sync (used with SYNC) */
517 	MD_RECOVERY_CHECK,	/* user-space request for check-only, no repair */
518 	MD_RECOVERY_RESHAPE,	/* A reshape is happening */
519 	MD_RECOVERY_FROZEN,	/* User request to abort, and not restart, any action */
520 	MD_RECOVERY_ERROR,	/* sync-action interrupted because io-error */
521 	MD_RECOVERY_WAIT,	/* waiting for pers->start() to finish */
522 	MD_RESYNCING_REMOTE,	/* remote node is running resync thread */
523 };
524 
525 static inline int __must_check mddev_lock(struct mddev *mddev)
526 {
527 	return mutex_lock_interruptible(&mddev->reconfig_mutex);
528 }
529 
530 /* Sometimes we need to take the lock in a situation where
531  * failure due to interrupts is not acceptable.
532  */
533 static inline void mddev_lock_nointr(struct mddev *mddev)
534 {
535 	mutex_lock(&mddev->reconfig_mutex);
536 }
537 
538 static inline int mddev_trylock(struct mddev *mddev)
539 {
540 	return mutex_trylock(&mddev->reconfig_mutex);
541 }
542 extern void mddev_unlock(struct mddev *mddev);
543 
544 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
545 {
546 	atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
547 }
548 
549 static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
550 {
551 	atomic_add(nr_sectors, &bio->bi_disk->sync_io);
552 }
553 
554 struct md_personality
555 {
556 	char *name;
557 	int level;
558 	struct list_head list;
559 	struct module *owner;
560 	bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio);
561 	/*
562 	 * start up works that do NOT require md_thread. tasks that
563 	 * requires md_thread should go into start()
564 	 */
565 	int (*run)(struct mddev *mddev);
566 	/* start up works that require md threads */
567 	int (*start)(struct mddev *mddev);
568 	void (*free)(struct mddev *mddev, void *priv);
569 	void (*status)(struct seq_file *seq, struct mddev *mddev);
570 	/* error_handler must set ->faulty and clear ->in_sync
571 	 * if appropriate, and should abort recovery if needed
572 	 */
573 	void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
574 	int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
575 	int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
576 	int (*spare_active) (struct mddev *mddev);
577 	sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
578 	int (*resize) (struct mddev *mddev, sector_t sectors);
579 	sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
580 	int (*check_reshape) (struct mddev *mddev);
581 	int (*start_reshape) (struct mddev *mddev);
582 	void (*finish_reshape) (struct mddev *mddev);
583 	void (*update_reshape_pos) (struct mddev *mddev);
584 	/* quiesce suspends or resumes internal processing.
585 	 * 1 - stop new actions and wait for action io to complete
586 	 * 0 - return to normal behaviour
587 	 */
588 	void (*quiesce) (struct mddev *mddev, int quiesce);
589 	/* takeover is used to transition an array from one
590 	 * personality to another.  The new personality must be able
591 	 * to handle the data in the current layout.
592 	 * e.g. 2drive raid1 -> 2drive raid5
593 	 *      ndrive raid5 -> degraded n+1drive raid6 with special layout
594 	 * If the takeover succeeds, a new 'private' structure is returned.
595 	 * This needs to be installed and then ->run used to activate the
596 	 * array.
597 	 */
598 	void *(*takeover) (struct mddev *mddev);
599 	/* congested implements bdi.congested_fn().
600 	 * Will not be called while array is 'suspended' */
601 	int (*congested)(struct mddev *mddev, int bits);
602 	/* Changes the consistency policy of an active array. */
603 	int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
604 };
605 
606 struct md_sysfs_entry {
607 	struct attribute attr;
608 	ssize_t (*show)(struct mddev *, char *);
609 	ssize_t (*store)(struct mddev *, const char *, size_t);
610 };
611 extern struct attribute_group md_bitmap_group;
612 
613 static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)
614 {
615 	if (sd)
616 		return sysfs_get_dirent(sd, name);
617 	return sd;
618 }
619 static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd)
620 {
621 	if (sd)
622 		sysfs_notify_dirent(sd);
623 }
624 
625 static inline char * mdname (struct mddev * mddev)
626 {
627 	return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
628 }
629 
630 static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
631 {
632 	char nm[20];
633 	if (!test_bit(Replacement, &rdev->flags) &&
634 	    !test_bit(Journal, &rdev->flags) &&
635 	    mddev->kobj.sd) {
636 		sprintf(nm, "rd%d", rdev->raid_disk);
637 		return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
638 	} else
639 		return 0;
640 }
641 
642 static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
643 {
644 	char nm[20];
645 	if (!test_bit(Replacement, &rdev->flags) &&
646 	    !test_bit(Journal, &rdev->flags) &&
647 	    mddev->kobj.sd) {
648 		sprintf(nm, "rd%d", rdev->raid_disk);
649 		sysfs_remove_link(&mddev->kobj, nm);
650 	}
651 }
652 
653 /*
654  * iterates through some rdev ringlist. It's safe to remove the
655  * current 'rdev'. Dont touch 'tmp' though.
656  */
657 #define rdev_for_each_list(rdev, tmp, head)				\
658 	list_for_each_entry_safe(rdev, tmp, head, same_set)
659 
660 /*
661  * iterates through the 'same array disks' ringlist
662  */
663 #define rdev_for_each(rdev, mddev)				\
664 	list_for_each_entry(rdev, &((mddev)->disks), same_set)
665 
666 #define rdev_for_each_safe(rdev, tmp, mddev)				\
667 	list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
668 
669 #define rdev_for_each_rcu(rdev, mddev)				\
670 	list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
671 
672 struct md_thread {
673 	void			(*run) (struct md_thread *thread);
674 	struct mddev		*mddev;
675 	wait_queue_head_t	wqueue;
676 	unsigned long		flags;
677 	struct task_struct	*tsk;
678 	unsigned long		timeout;
679 	void			*private;
680 };
681 
682 #define THREAD_WAKEUP  0
683 
684 static inline void safe_put_page(struct page *p)
685 {
686 	if (p) put_page(p);
687 }
688 
689 extern int register_md_personality(struct md_personality *p);
690 extern int unregister_md_personality(struct md_personality *p);
691 extern int register_md_cluster_operations(struct md_cluster_operations *ops,
692 		struct module *module);
693 extern int unregister_md_cluster_operations(void);
694 extern int md_setup_cluster(struct mddev *mddev, int nodes);
695 extern void md_cluster_stop(struct mddev *mddev);
696 extern struct md_thread *md_register_thread(
697 	void (*run)(struct md_thread *thread),
698 	struct mddev *mddev,
699 	const char *name);
700 extern void md_unregister_thread(struct md_thread **threadp);
701 extern void md_wakeup_thread(struct md_thread *thread);
702 extern void md_check_recovery(struct mddev *mddev);
703 extern void md_reap_sync_thread(struct mddev *mddev);
704 extern int mddev_init_writes_pending(struct mddev *mddev);
705 extern bool md_write_start(struct mddev *mddev, struct bio *bi);
706 extern void md_write_inc(struct mddev *mddev, struct bio *bi);
707 extern void md_write_end(struct mddev *mddev);
708 extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
709 extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
710 extern void md_finish_reshape(struct mddev *mddev);
711 
712 extern int mddev_congested(struct mddev *mddev, int bits);
713 extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
714 extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
715 			   sector_t sector, int size, struct page *page);
716 extern int md_super_wait(struct mddev *mddev);
717 extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
718 			struct page *page, int op, int op_flags,
719 			bool metadata_op);
720 extern void md_do_sync(struct md_thread *thread);
721 extern void md_new_event(struct mddev *mddev);
722 extern void md_allow_write(struct mddev *mddev);
723 extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
724 extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
725 extern int md_check_no_bitmap(struct mddev *mddev);
726 extern int md_integrity_register(struct mddev *mddev);
727 extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
728 extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
729 
730 extern void mddev_init(struct mddev *mddev);
731 extern int md_run(struct mddev *mddev);
732 extern int md_start(struct mddev *mddev);
733 extern void md_stop(struct mddev *mddev);
734 extern void md_stop_writes(struct mddev *mddev);
735 extern int md_rdev_init(struct md_rdev *rdev);
736 extern void md_rdev_clear(struct md_rdev *rdev);
737 
738 extern void md_handle_request(struct mddev *mddev, struct bio *bio);
739 extern void mddev_suspend(struct mddev *mddev);
740 extern void mddev_resume(struct mddev *mddev);
741 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
742 				   struct mddev *mddev);
743 
744 extern void md_reload_sb(struct mddev *mddev, int raid_disk);
745 extern void md_update_sb(struct mddev *mddev, int force);
746 extern void md_kick_rdev_from_array(struct md_rdev * rdev);
747 extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
748 				     bool is_suspend);
749 extern void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
750 				      bool is_suspend);
751 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
752 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
753 
754 static inline bool is_mddev_broken(struct md_rdev *rdev, const char *md_type)
755 {
756 	int flags = rdev->bdev->bd_disk->flags;
757 
758 	if (!(flags & GENHD_FL_UP)) {
759 		if (!test_and_set_bit(MD_BROKEN, &rdev->mddev->flags))
760 			pr_warn("md: %s: %s array has a missing/failed member\n",
761 				mdname(rdev->mddev), md_type);
762 		return true;
763 	}
764 	return false;
765 }
766 
767 static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
768 {
769 	int faulty = test_bit(Faulty, &rdev->flags);
770 	if (atomic_dec_and_test(&rdev->nr_pending) && faulty) {
771 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
772 		md_wakeup_thread(mddev->thread);
773 	}
774 }
775 
776 extern struct md_cluster_operations *md_cluster_ops;
777 static inline int mddev_is_clustered(struct mddev *mddev)
778 {
779 	return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
780 }
781 
782 /* clear unsupported mddev_flags */
783 static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
784 	unsigned long unsupported_flags)
785 {
786 	mddev->flags &= ~unsupported_flags;
787 }
788 
789 static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
790 {
791 	if (bio_op(bio) == REQ_OP_WRITE_SAME &&
792 	    !bio->bi_disk->queue->limits.max_write_same_sectors)
793 		mddev->queue->limits.max_write_same_sectors = 0;
794 }
795 
796 static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
797 {
798 	if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
799 	    !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
800 		mddev->queue->limits.max_write_zeroes_sectors = 0;
801 }
802 #endif /* _MD_MD_H */
803