xref: /openbmc/linux/drivers/md/raid5.h (revision ae3473231e77a3f1909d48cd144cebe5e1d049b3)
1 #ifndef _RAID5_H
2 #define _RAID5_H
3 
4 #include <linux/raid/xor.h>
5 #include <linux/dmaengine.h>
6 
7 /*
8  *
9  * Each stripe contains one buffer per device.  Each buffer can be in
10  * one of a number of states stored in "flags".  Changes between
11  * these states happen *almost* exclusively under the protection of the
12  * STRIPE_ACTIVE flag.  Some very specific changes can happen in bi_end_io, and
13  * these are not protected by STRIPE_ACTIVE.
14  *
15  * The flag bits that are used to represent these states are:
16  *   R5_UPTODATE and R5_LOCKED
17  *
18  * State Empty == !UPTODATE, !LOCK
19  *        We have no data, and there is no active request
20  * State Want == !UPTODATE, LOCK
21  *        A read request is being submitted for this block
22  * State Dirty == UPTODATE, LOCK
23  *        Some new data is in this buffer, and it is being written out
24  * State Clean == UPTODATE, !LOCK
25  *        We have valid data which is the same as on disc
26  *
27  * The possible state transitions are:
28  *
29  *  Empty -> Want   - on read or write to get old data for  parity calc
30  *  Empty -> Dirty  - on compute_parity to satisfy write/sync request.
31  *  Empty -> Clean  - on compute_block when computing a block for failed drive
32  *  Want  -> Empty  - on failed read
33  *  Want  -> Clean  - on successful completion of read request
34  *  Dirty -> Clean  - on successful completion of write request
35  *  Dirty -> Clean  - on failed write
36  *  Clean -> Dirty  - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
37  *
38  * The Want->Empty, Want->Clean, Dirty->Clean, transitions
39  * all happen in b_end_io at interrupt time.
40  * Each sets the Uptodate bit before releasing the Lock bit.
41  * This leaves one multi-stage transition:
42  *    Want->Dirty->Clean
43  * This is safe because thinking that a Clean buffer is actually dirty
44  * will at worst delay some action, and the stripe will be scheduled
45  * for attention after the transition is complete.
46  *
47  * There is one possibility that is not covered by these states.  That
48  * is if one drive has failed and there is a spare being rebuilt.  We
49  * can't distinguish between a clean block that has been generated
50  * from parity calculations, and a clean block that has been
51  * successfully written to the spare ( or to parity when resyncing).
52  * To distinguish these states we have a stripe bit STRIPE_INSYNC that
53  * is set whenever a write is scheduled to the spare, or to the parity
54  * disc if there is no spare.  A sync request clears this bit, and
55  * when we find it set with no buffers locked, we know the sync is
56  * complete.
57  *
58  * Buffers for the md device that arrive via make_request are attached
59  * to the appropriate stripe in one of two lists linked on b_reqnext.
60  * One list (bh_read) for read requests, one (bh_write) for write.
61  * There should never be more than one buffer on the two lists
62  * together, but we are not guaranteed of that so we allow for more.
63  *
64  * If a buffer is on the read list when the associated cache buffer is
65  * Uptodate, the data is copied into the read buffer and it's b_end_io
66  * routine is called.  This may happen in the end_request routine only
67  * if the buffer has just successfully been read.  end_request should
68  * remove the buffers from the list and then set the Uptodate bit on
69  * the buffer.  Other threads may do this only if they first check
70  * that the Uptodate bit is set.  Once they have checked that they may
71  * take buffers off the read queue.
72  *
73  * When a buffer on the write list is committed for write it is copied
74  * into the cache buffer, which is then marked dirty, and moved onto a
75  * third list, the written list (bh_written).  Once both the parity
76  * block and the cached buffer are successfully written, any buffer on
77  * a written list can be returned with b_end_io.
78  *
79  * The write list and read list both act as fifos.  The read list,
80  * write list and written list are protected by the device_lock.
81  * The device_lock is only for list manipulations and will only be
82  * held for a very short time.  It can be claimed from interrupts.
83  *
84  *
85  * Stripes in the stripe cache can be on one of two lists (or on
86  * neither).  The "inactive_list" contains stripes which are not
87  * currently being used for any request.  They can freely be reused
88  * for another stripe.  The "handle_list" contains stripes that need
89  * to be handled in some way.  Both of these are fifo queues.  Each
90  * stripe is also (potentially) linked to a hash bucket in the hash
91  * table so that it can be found by sector number.  Stripes that are
92  * not hashed must be on the inactive_list, and will normally be at
93  * the front.  All stripes start life this way.
94  *
95  * The inactive_list, handle_list and hash bucket lists are all protected by the
96  * device_lock.
97  *  - stripes have a reference counter. If count==0, they are on a list.
98  *  - If a stripe might need handling, STRIPE_HANDLE is set.
99  *  - When refcount reaches zero, then if STRIPE_HANDLE it is put on
100  *    handle_list else inactive_list
101  *
102  * This, combined with the fact that STRIPE_HANDLE is only ever
103  * cleared while a stripe has a non-zero count means that if the
104  * refcount is 0 and STRIPE_HANDLE is set, then it is on the
105  * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
106  * the stripe is on inactive_list.
107  *
108  * The possible transitions are:
109  *  activate an unhashed/inactive stripe (get_active_stripe())
110  *     lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
111  *  activate a hashed, possibly active stripe (get_active_stripe())
112  *     lockdev check-hash if(!cnt++)unlink-stripe unlockdev
113  *  attach a request to an active stripe (add_stripe_bh())
114  *     lockdev attach-buffer unlockdev
115  *  handle a stripe (handle_stripe())
116  *     setSTRIPE_ACTIVE,  clrSTRIPE_HANDLE ...
117  *		(lockdev check-buffers unlockdev) ..
118  *		change-state ..
119  *		record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
120  *  release an active stripe (release_stripe())
121  *     lockdev if (!--cnt) { if  STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
122  *
123  * The refcount counts each thread that have activated the stripe,
124  * plus raid5d if it is handling it, plus one for each active request
125  * on a cached buffer, and plus one if the stripe is undergoing stripe
126  * operations.
127  *
128  * The stripe operations are:
129  * -copying data between the stripe cache and user application buffers
130  * -computing blocks to save a disk access, or to recover a missing block
131  * -updating the parity on a write operation (reconstruct write and
132  *  read-modify-write)
133  * -checking parity correctness
134  * -running i/o to disk
135  * These operations are carried out by raid5_run_ops which uses the async_tx
136  * api to (optionally) offload operations to dedicated hardware engines.
137  * When requesting an operation handle_stripe sets the pending bit for the
138  * operation and increments the count.  raid5_run_ops is then run whenever
139  * the count is non-zero.
140  * There are some critical dependencies between the operations that prevent some
141  * from being requested while another is in flight.
142  * 1/ Parity check operations destroy the in cache version of the parity block,
143  *    so we prevent parity dependent operations like writes and compute_blocks
144  *    from starting while a check is in progress.  Some dma engines can perform
145  *    the check without damaging the parity block, in these cases the parity
146  *    block is re-marked up to date (assuming the check was successful) and is
147  *    not re-read from disk.
148  * 2/ When a write operation is requested we immediately lock the affected
149  *    blocks, and mark them as not up to date.  This causes new read requests
150  *    to be held off, as well as parity checks and compute block operations.
151  * 3/ Once a compute block operation has been requested handle_stripe treats
152  *    that block as if it is up to date.  raid5_run_ops guaruntees that any
153  *    operation that is dependent on the compute block result is initiated after
154  *    the compute block completes.
155  */
156 
157 /*
158  * Operations state - intermediate states that are visible outside of
159  *   STRIPE_ACTIVE.
160  * In general _idle indicates nothing is running, _run indicates a data
161  * processing operation is active, and _result means the data processing result
162  * is stable and can be acted upon.  For simple operations like biofill and
163  * compute that only have an _idle and _run state they are indicated with
164  * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
165  */
166 /**
167  * enum check_states - handles syncing / repairing a stripe
168  * @check_state_idle - check operations are quiesced
169  * @check_state_run - check operation is running
170  * @check_state_result - set outside lock when check result is valid
171  * @check_state_compute_run - check failed and we are repairing
172  * @check_state_compute_result - set outside lock when compute result is valid
173  */
174 enum check_states {
175 	check_state_idle = 0,
176 	check_state_run, /* xor parity check */
177 	check_state_run_q, /* q-parity check */
178 	check_state_run_pq, /* pq dual parity check */
179 	check_state_check_result,
180 	check_state_compute_run, /* parity repair */
181 	check_state_compute_result,
182 };
183 
184 /**
185  * enum reconstruct_states - handles writing or expanding a stripe
186  */
187 enum reconstruct_states {
188 	reconstruct_state_idle = 0,
189 	reconstruct_state_prexor_drain_run,	/* prexor-write */
190 	reconstruct_state_drain_run,		/* write */
191 	reconstruct_state_run,			/* expand */
192 	reconstruct_state_prexor_drain_result,
193 	reconstruct_state_drain_result,
194 	reconstruct_state_result,
195 };
196 
197 struct stripe_head {
198 	struct hlist_node	hash;
199 	struct list_head	lru;	      /* inactive_list or handle_list */
200 	struct llist_node	release_list;
201 	struct r5conf		*raid_conf;
202 	short			generation;	/* increments with every
203 						 * reshape */
204 	sector_t		sector;		/* sector of this row */
205 	short			pd_idx;		/* parity disk index */
206 	short			qd_idx;		/* 'Q' disk index for raid6 */
207 	short			ddf_layout;/* use DDF ordering to calculate Q */
208 	short			hash_lock_index;
209 	unsigned long		state;		/* state flags */
210 	atomic_t		count;	      /* nr of active thread/requests */
211 	int			bm_seq;	/* sequence number for bitmap flushes */
212 	int			disks;		/* disks in stripe */
213 	int			overwrite_disks; /* total overwrite disks in stripe,
214 						  * this is only checked when stripe
215 						  * has STRIPE_BATCH_READY
216 						  */
217 	enum check_states	check_state;
218 	enum reconstruct_states reconstruct_state;
219 	spinlock_t		stripe_lock;
220 	int			cpu;
221 	struct r5worker_group	*group;
222 
223 	struct stripe_head	*batch_head; /* protected by stripe lock */
224 	spinlock_t		batch_lock; /* only header's lock is useful */
225 	struct list_head	batch_list; /* protected by head's batch lock*/
226 
227 	struct r5l_io_unit	*log_io;
228 	struct list_head	log_list;
229 	sector_t		log_start; /* first meta block on the journal */
230 	struct list_head	r5c; /* for r5c_cache->stripe_in_journal */
231 	/**
232 	 * struct stripe_operations
233 	 * @target - STRIPE_OP_COMPUTE_BLK target
234 	 * @target2 - 2nd compute target in the raid6 case
235 	 * @zero_sum_result - P and Q verification flags
236 	 * @request - async service request flags for raid_run_ops
237 	 */
238 	struct stripe_operations {
239 		int 		     target, target2;
240 		enum sum_check_flags zero_sum_result;
241 	} ops;
242 	struct r5dev {
243 		/* rreq and rvec are used for the replacement device when
244 		 * writing data to both devices.
245 		 */
246 		struct bio	req, rreq;
247 		struct bio_vec	vec, rvec;
248 		struct page	*page, *orig_page;
249 		struct bio	*toread, *read, *towrite, *written;
250 		sector_t	sector;			/* sector of this page */
251 		unsigned long	flags;
252 		u32		log_checksum;
253 	} dev[1]; /* allocated with extra space depending of RAID geometry */
254 };
255 
256 /* stripe_head_state - collects and tracks the dynamic state of a stripe_head
257  *     for handle_stripe.
258  */
259 struct stripe_head_state {
260 	/* 'syncing' means that we need to read all devices, either
261 	 * to check/correct parity, or to reconstruct a missing device.
262 	 * 'replacing' means we are replacing one or more drives and
263 	 * the source is valid at this point so we don't need to
264 	 * read all devices, just the replacement targets.
265 	 */
266 	int syncing, expanding, expanded, replacing;
267 	int locked, uptodate, to_read, to_write, failed, written;
268 	int to_fill, compute, req_compute, non_overwrite;
269 	int injournal, just_cached;
270 	int failed_num[2];
271 	int p_failed, q_failed;
272 	int dec_preread_active;
273 	unsigned long ops_request;
274 
275 	struct bio_list return_bi;
276 	struct md_rdev *blocked_rdev;
277 	int handle_bad_blocks;
278 	int log_failed;
279 	int waiting_extra_page;
280 };
281 
282 /* Flags for struct r5dev.flags */
283 enum r5dev_flags {
284 	R5_UPTODATE,	/* page contains current data */
285 	R5_LOCKED,	/* IO has been submitted on "req" */
286 	R5_DOUBLE_LOCKED,/* Cannot clear R5_LOCKED until 2 writes complete */
287 	R5_OVERWRITE,	/* towrite covers whole page */
288 /* and some that are internal to handle_stripe */
289 	R5_Insync,	/* rdev && rdev->in_sync at start */
290 	R5_Wantread,	/* want to schedule a read */
291 	R5_Wantwrite,
292 	R5_Overlap,	/* There is a pending overlapping request
293 			 * on this block */
294 	R5_ReadNoMerge, /* prevent bio from merging in block-layer */
295 	R5_ReadError,	/* seen a read error here recently */
296 	R5_ReWrite,	/* have tried to over-write the readerror */
297 
298 	R5_Expanded,	/* This block now has post-expand data */
299 	R5_Wantcompute,	/* compute_block in progress treat as
300 			 * uptodate
301 			 */
302 	R5_Wantfill,	/* dev->toread contains a bio that needs
303 			 * filling
304 			 */
305 	R5_Wantdrain,	/* dev->towrite needs to be drained */
306 	R5_WantFUA,	/* Write should be FUA */
307 	R5_SyncIO,	/* The IO is sync */
308 	R5_WriteError,	/* got a write error - need to record it */
309 	R5_MadeGood,	/* A bad block has been fixed by writing to it */
310 	R5_ReadRepl,	/* Will/did read from replacement rather than orig */
311 	R5_MadeGoodRepl,/* A bad block on the replacement device has been
312 			 * fixed by writing to it */
313 	R5_NeedReplace,	/* This device has a replacement which is not
314 			 * up-to-date at this stripe. */
315 	R5_WantReplace, /* We need to update the replacement, we have read
316 			 * data in, and now is a good time to write it out.
317 			 */
318 	R5_Discard,	/* Discard the stripe */
319 	R5_SkipCopy,	/* Don't copy data from bio to stripe cache */
320 	R5_InJournal,	/* data being written is in the journal device.
321 			 * if R5_InJournal is set for parity pd_idx, all the
322 			 * data and parity being written are in the journal
323 			 * device
324 			 */
325 };
326 
327 /*
328  * Stripe state
329  */
330 enum {
331 	STRIPE_ACTIVE,
332 	STRIPE_HANDLE,
333 	STRIPE_SYNC_REQUESTED,
334 	STRIPE_SYNCING,
335 	STRIPE_INSYNC,
336 	STRIPE_REPLACED,
337 	STRIPE_PREREAD_ACTIVE,
338 	STRIPE_DELAYED,
339 	STRIPE_DEGRADED,
340 	STRIPE_BIT_DELAY,
341 	STRIPE_EXPANDING,
342 	STRIPE_EXPAND_SOURCE,
343 	STRIPE_EXPAND_READY,
344 	STRIPE_IO_STARTED,	/* do not count towards 'bypass_count' */
345 	STRIPE_FULL_WRITE,	/* all blocks are set to be overwritten */
346 	STRIPE_BIOFILL_RUN,
347 	STRIPE_COMPUTE_RUN,
348 	STRIPE_OPS_REQ_PENDING,
349 	STRIPE_ON_UNPLUG_LIST,
350 	STRIPE_DISCARD,
351 	STRIPE_ON_RELEASE_LIST,
352 	STRIPE_BATCH_READY,
353 	STRIPE_BATCH_ERR,
354 	STRIPE_BITMAP_PENDING,	/* Being added to bitmap, don't add
355 				 * to batch yet.
356 				 */
357 	STRIPE_LOG_TRAPPED,	/* trapped into log (see raid5-cache.c)
358 				 * this bit is used in two scenarios:
359 				 *
360 				 * 1. write-out phase
361 				 *  set in first entry of r5l_write_stripe
362 				 *  clear in second entry of r5l_write_stripe
363 				 *  used to bypass logic in handle_stripe
364 				 *
365 				 * 2. caching phase
366 				 *  set in r5c_try_caching_write()
367 				 *  clear when journal write is done
368 				 *  used to initiate r5c_cache_data()
369 				 *  also used to bypass logic in handle_stripe
370 				 */
371 	STRIPE_R5C_CACHING,	/* the stripe is in caching phase
372 				 * see more detail in the raid5-cache.c
373 				 */
374 	STRIPE_R5C_PARTIAL_STRIPE,	/* in r5c cache (to-be/being handled or
375 					 * in conf->r5c_partial_stripe_list)
376 					 */
377 	STRIPE_R5C_FULL_STRIPE,	/* in r5c cache (to-be/being handled or
378 				 * in conf->r5c_full_stripe_list)
379 				 */
380 	STRIPE_R5C_PREFLUSH,	/* need to flush journal device */
381 };
382 
383 #define STRIPE_EXPAND_SYNC_FLAGS \
384 	((1 << STRIPE_EXPAND_SOURCE) |\
385 	(1 << STRIPE_EXPAND_READY) |\
386 	(1 << STRIPE_EXPANDING) |\
387 	(1 << STRIPE_SYNC_REQUESTED))
388 /*
389  * Operation request flags
390  */
391 enum {
392 	STRIPE_OP_BIOFILL,
393 	STRIPE_OP_COMPUTE_BLK,
394 	STRIPE_OP_PREXOR,
395 	STRIPE_OP_BIODRAIN,
396 	STRIPE_OP_RECONSTRUCT,
397 	STRIPE_OP_CHECK,
398 };
399 
400 /*
401  * RAID parity calculation preferences
402  */
403 enum {
404 	PARITY_DISABLE_RMW = 0,
405 	PARITY_ENABLE_RMW,
406 	PARITY_PREFER_RMW,
407 };
408 
409 /*
410  * Pages requested from set_syndrome_sources()
411  */
412 enum {
413 	SYNDROME_SRC_ALL,
414 	SYNDROME_SRC_WANT_DRAIN,
415 	SYNDROME_SRC_WRITTEN,
416 };
417 /*
418  * Plugging:
419  *
420  * To improve write throughput, we need to delay the handling of some
421  * stripes until there has been a chance that several write requests
422  * for the one stripe have all been collected.
423  * In particular, any write request that would require pre-reading
424  * is put on a "delayed" queue until there are no stripes currently
425  * in a pre-read phase.  Further, if the "delayed" queue is empty when
426  * a stripe is put on it then we "plug" the queue and do not process it
427  * until an unplug call is made. (the unplug_io_fn() is called).
428  *
429  * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
430  * it to the count of prereading stripes.
431  * When write is initiated, or the stripe refcnt == 0 (just in case) we
432  * clear the PREREAD_ACTIVE flag and decrement the count
433  * Whenever the 'handle' queue is empty and the device is not plugged, we
434  * move any strips from delayed to handle and clear the DELAYED flag and set
435  * PREREAD_ACTIVE.
436  * In stripe_handle, if we find pre-reading is necessary, we do it if
437  * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
438  * HANDLE gets cleared if stripe_handle leaves nothing locked.
439  */
440 
441 struct disk_info {
442 	struct md_rdev	*rdev, *replacement;
443 	struct page	*extra_page; /* extra page to use in prexor */
444 };
445 
446 /*
447  * Stripe cache
448  */
449 
450 #define NR_STRIPES		256
451 #define STRIPE_SIZE		PAGE_SIZE
452 #define STRIPE_SHIFT		(PAGE_SHIFT - 9)
453 #define STRIPE_SECTORS		(STRIPE_SIZE>>9)
454 #define	IO_THRESHOLD		1
455 #define BYPASS_THRESHOLD	1
456 #define NR_HASH			(PAGE_SIZE / sizeof(struct hlist_head))
457 #define HASH_MASK		(NR_HASH - 1)
458 #define MAX_STRIPE_BATCH	8
459 
460 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
461  * order without overlap.  There may be several bio's per stripe+device, and
462  * a bio could span several devices.
463  * When walking this list for a particular stripe+device, we must never proceed
464  * beyond a bio that extends past this device, as the next bio might no longer
465  * be valid.
466  * This function is used to determine the 'next' bio in the list, given the
467  * sector of the current stripe+device
468  */
469 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
470 {
471 	int sectors = bio_sectors(bio);
472 
473 	if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
474 		return bio->bi_next;
475 	else
476 		return NULL;
477 }
478 
479 /*
480  * We maintain a biased count of active stripes in the bottom 16 bits of
481  * bi_phys_segments, and a count of processed stripes in the upper 16 bits
482  */
483 static inline int raid5_bi_processed_stripes(struct bio *bio)
484 {
485 	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
486 
487 	return (atomic_read(segments) >> 16) & 0xffff;
488 }
489 
490 static inline int raid5_dec_bi_active_stripes(struct bio *bio)
491 {
492 	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
493 
494 	return atomic_sub_return(1, segments) & 0xffff;
495 }
496 
497 static inline void raid5_inc_bi_active_stripes(struct bio *bio)
498 {
499 	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
500 
501 	atomic_inc(segments);
502 }
503 
504 static inline void raid5_set_bi_processed_stripes(struct bio *bio,
505 	unsigned int cnt)
506 {
507 	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
508 	int old, new;
509 
510 	do {
511 		old = atomic_read(segments);
512 		new = (old & 0xffff) | (cnt << 16);
513 	} while (atomic_cmpxchg(segments, old, new) != old);
514 }
515 
516 static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
517 {
518 	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
519 
520 	atomic_set(segments, cnt);
521 }
522 
523 /* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
524  * This is because we sometimes take all the spinlocks
525  * and creating that much locking depth can cause
526  * problems.
527  */
528 #define NR_STRIPE_HASH_LOCKS 8
529 #define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
530 
531 struct r5worker {
532 	struct work_struct work;
533 	struct r5worker_group *group;
534 	struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
535 	bool working;
536 };
537 
538 struct r5worker_group {
539 	struct list_head handle_list;
540 	struct r5conf *conf;
541 	struct r5worker *workers;
542 	int stripes_cnt;
543 };
544 
545 enum r5_cache_state {
546 	R5_INACTIVE_BLOCKED,	/* release of inactive stripes blocked,
547 				 * waiting for 25% to be free
548 				 */
549 	R5_ALLOC_MORE,		/* It might help to allocate another
550 				 * stripe.
551 				 */
552 	R5_DID_ALLOC,		/* A stripe was allocated, don't allocate
553 				 * more until at least one has been
554 				 * released.  This avoids flooding
555 				 * the cache.
556 				 */
557 	R5C_LOG_TIGHT,		/* log device space tight, need to
558 				 * prioritize stripes at last_checkpoint
559 				 */
560 	R5C_LOG_CRITICAL,	/* log device is running out of space,
561 				 * only process stripes that are already
562 				 * occupying the log
563 				 */
564 	R5C_EXTRA_PAGE_IN_USE,	/* a stripe is using disk_info.extra_page
565 				 * for prexor
566 				 */
567 };
568 
569 struct r5conf {
570 	struct hlist_head	*stripe_hashtbl;
571 	/* only protect corresponding hash list and inactive_list */
572 	spinlock_t		hash_locks[NR_STRIPE_HASH_LOCKS];
573 	struct mddev		*mddev;
574 	int			chunk_sectors;
575 	int			level, algorithm, rmw_level;
576 	int			max_degraded;
577 	int			raid_disks;
578 	int			max_nr_stripes;
579 	int			min_nr_stripes;
580 
581 	/* reshape_progress is the leading edge of a 'reshape'
582 	 * It has value MaxSector when no reshape is happening
583 	 * If delta_disks < 0, it is the last sector we started work on,
584 	 * else is it the next sector to work on.
585 	 */
586 	sector_t		reshape_progress;
587 	/* reshape_safe is the trailing edge of a reshape.  We know that
588 	 * before (or after) this address, all reshape has completed.
589 	 */
590 	sector_t		reshape_safe;
591 	int			previous_raid_disks;
592 	int			prev_chunk_sectors;
593 	int			prev_algo;
594 	short			generation; /* increments with every reshape */
595 	seqcount_t		gen_lock;	/* lock against generation changes */
596 	unsigned long		reshape_checkpoint; /* Time we last updated
597 						     * metadata */
598 	long long		min_offset_diff; /* minimum difference between
599 						  * data_offset and
600 						  * new_data_offset across all
601 						  * devices.  May be negative,
602 						  * but is closest to zero.
603 						  */
604 
605 	struct list_head	handle_list; /* stripes needing handling */
606 	struct list_head	hold_list; /* preread ready stripes */
607 	struct list_head	delayed_list; /* stripes that have plugged requests */
608 	struct list_head	bitmap_list; /* stripes delaying awaiting bitmap update */
609 	struct bio		*retry_read_aligned; /* currently retrying aligned bios   */
610 	struct bio		*retry_read_aligned_list; /* aligned bios retry list  */
611 	atomic_t		preread_active_stripes; /* stripes with scheduled io */
612 	atomic_t		active_aligned_reads;
613 	atomic_t		pending_full_writes; /* full write backlog */
614 	int			bypass_count; /* bypassed prereads */
615 	int			bypass_threshold; /* preread nice */
616 	int			skip_copy; /* Don't copy data from bio to stripe cache */
617 	struct list_head	*last_hold; /* detect hold_list promotions */
618 
619 	/* bios to have bi_end_io called after metadata is synced */
620 	struct bio_list		return_bi;
621 
622 	atomic_t		reshape_stripes; /* stripes with pending writes for reshape */
623 	/* unfortunately we need two cache names as we temporarily have
624 	 * two caches.
625 	 */
626 	int			active_name;
627 	char			cache_name[2][32];
628 	struct kmem_cache	*slab_cache; /* for allocating stripes */
629 	struct mutex		cache_size_mutex; /* Protect changes to cache size */
630 
631 	int			seq_flush, seq_write;
632 	int			quiesce;
633 
634 	int			fullsync;  /* set to 1 if a full sync is needed,
635 					    * (fresh device added).
636 					    * Cleared when a sync completes.
637 					    */
638 	int			recovery_disabled;
639 	/* per cpu variables */
640 	struct raid5_percpu {
641 		struct page	*spare_page; /* Used when checking P/Q in raid6 */
642 		struct flex_array *scribble;   /* space for constructing buffer
643 					      * lists and performing address
644 					      * conversions
645 					      */
646 	} __percpu *percpu;
647 	int scribble_disks;
648 	int scribble_sectors;
649 	struct hlist_node node;
650 
651 	/*
652 	 * Free stripes pool
653 	 */
654 	atomic_t		active_stripes;
655 	struct list_head	inactive_list[NR_STRIPE_HASH_LOCKS];
656 
657 	atomic_t		r5c_cached_full_stripes;
658 	struct list_head	r5c_full_stripe_list;
659 	atomic_t		r5c_cached_partial_stripes;
660 	struct list_head	r5c_partial_stripe_list;
661 
662 	atomic_t		empty_inactive_list_nr;
663 	struct llist_head	released_stripes;
664 	wait_queue_head_t	wait_for_quiescent;
665 	wait_queue_head_t	wait_for_stripe;
666 	wait_queue_head_t	wait_for_overlap;
667 	unsigned long		cache_state;
668 	struct shrinker		shrinker;
669 	int			pool_size; /* number of disks in stripeheads in pool */
670 	spinlock_t		device_lock;
671 	struct disk_info	*disks;
672 
673 	/* When taking over an array from a different personality, we store
674 	 * the new thread here until we fully activate the array.
675 	 */
676 	struct md_thread	*thread;
677 	struct list_head	temp_inactive_list[NR_STRIPE_HASH_LOCKS];
678 	struct r5worker_group	*worker_groups;
679 	int			group_cnt;
680 	int			worker_cnt_per_group;
681 	struct r5l_log		*log;
682 };
683 
684 
685 /*
686  * Our supported algorithms
687  */
688 #define ALGORITHM_LEFT_ASYMMETRIC	0 /* Rotating Parity N with Data Restart */
689 #define ALGORITHM_RIGHT_ASYMMETRIC	1 /* Rotating Parity 0 with Data Restart */
690 #define ALGORITHM_LEFT_SYMMETRIC	2 /* Rotating Parity N with Data Continuation */
691 #define ALGORITHM_RIGHT_SYMMETRIC	3 /* Rotating Parity 0 with Data Continuation */
692 
693 /* Define non-rotating (raid4) algorithms.  These allow
694  * conversion of raid4 to raid5.
695  */
696 #define ALGORITHM_PARITY_0		4 /* P or P,Q are initial devices */
697 #define ALGORITHM_PARITY_N		5 /* P or P,Q are final devices. */
698 
699 /* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
700  * Firstly, the exact positioning of the parity block is slightly
701  * different between the 'LEFT_*' modes of md and the "_N_*" modes
702  * of DDF.
703  * Secondly, or order of datablocks over which the Q syndrome is computed
704  * is different.
705  * Consequently we have different layouts for DDF/raid6 than md/raid6.
706  * These layouts are from the DDFv1.2 spec.
707  * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
708  * leaves RLQ=3 as 'Vendor Specific'
709  */
710 
711 #define ALGORITHM_ROTATING_ZERO_RESTART	8 /* DDF PRL=6 RLQ=1 */
712 #define ALGORITHM_ROTATING_N_RESTART	9 /* DDF PRL=6 RLQ=2 */
713 #define ALGORITHM_ROTATING_N_CONTINUE	10 /*DDF PRL=6 RLQ=3 */
714 
715 /* For every RAID5 algorithm we define a RAID6 algorithm
716  * with exactly the same layout for data and parity, and
717  * with the Q block always on the last device (N-1).
718  * This allows trivial conversion from RAID5 to RAID6
719  */
720 #define ALGORITHM_LEFT_ASYMMETRIC_6	16
721 #define ALGORITHM_RIGHT_ASYMMETRIC_6	17
722 #define ALGORITHM_LEFT_SYMMETRIC_6	18
723 #define ALGORITHM_RIGHT_SYMMETRIC_6	19
724 #define ALGORITHM_PARITY_0_6		20
725 #define ALGORITHM_PARITY_N_6		ALGORITHM_PARITY_N
726 
727 static inline int algorithm_valid_raid5(int layout)
728 {
729 	return (layout >= 0) &&
730 		(layout <= 5);
731 }
732 static inline int algorithm_valid_raid6(int layout)
733 {
734 	return (layout >= 0 && layout <= 5)
735 		||
736 		(layout >= 8 && layout <= 10)
737 		||
738 		(layout >= 16 && layout <= 20);
739 }
740 
741 static inline int algorithm_is_DDF(int layout)
742 {
743 	return layout >= 8 && layout <= 10;
744 }
745 
746 extern void md_raid5_kick_device(struct r5conf *conf);
747 extern int raid5_set_cache_size(struct mddev *mddev, int size);
748 extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
749 extern void raid5_release_stripe(struct stripe_head *sh);
750 extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
751 				     int previous, int *dd_idx,
752 				     struct stripe_head *sh);
753 extern struct stripe_head *
754 raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
755 			int previous, int noblock, int noquiesce);
756 extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
757 extern void r5l_exit_log(struct r5l_log *log);
758 extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
759 extern void r5l_write_stripe_run(struct r5l_log *log);
760 extern void r5l_flush_stripe_to_raid(struct r5l_log *log);
761 extern void r5l_stripe_write_finished(struct stripe_head *sh);
762 extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
763 extern void r5l_quiesce(struct r5l_log *log, int state);
764 extern bool r5l_log_disk_error(struct r5conf *conf);
765 extern bool r5c_is_writeback(struct r5l_log *log);
766 extern int
767 r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh,
768 		      struct stripe_head_state *s, int disks);
769 extern void
770 r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh,
771 			    struct stripe_head_state *s);
772 extern void r5c_release_extra_page(struct stripe_head *sh);
773 extern void r5c_use_extra_page(struct stripe_head *sh);
774 extern void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
775 extern void r5c_handle_cached_data_endio(struct r5conf *conf,
776 	struct stripe_head *sh, int disks, struct bio_list *return_bi);
777 extern int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh,
778 			  struct stripe_head_state *s);
779 extern void r5c_make_stripe_write_out(struct stripe_head *sh);
780 extern void r5c_flush_cache(struct r5conf *conf, int num);
781 extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
782 extern void r5c_check_cached_full_stripe(struct r5conf *conf);
783 extern struct md_sysfs_entry r5c_journal_mode;
784 #endif
785