xref: /openbmc/linux/drivers/md/raid5.h (revision fd5e9fccbd504c5179ab57ff695c610bca8809d6)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2ef740c37SChristoph Hellwig #ifndef _RAID5_H
3ef740c37SChristoph Hellwig #define _RAID5_H
4ef740c37SChristoph Hellwig 
5ef740c37SChristoph Hellwig #include <linux/raid/xor.h>
6ad283ea4SDan Williams #include <linux/dmaengine.h>
7770b1d21SDavidlohr Bueso #include <linux/local_lock.h>
8ef740c37SChristoph Hellwig 
9ef740c37SChristoph Hellwig /*
10ef740c37SChristoph Hellwig  *
11c4c1663bSNeilBrown  * Each stripe contains one buffer per device.  Each buffer can be in
12ef740c37SChristoph Hellwig  * one of a number of states stored in "flags".  Changes between
13c4c1663bSNeilBrown  * these states happen *almost* exclusively under the protection of the
14c4c1663bSNeilBrown  * STRIPE_ACTIVE flag.  Some very specific changes can happen in bi_end_io, and
15c4c1663bSNeilBrown  * these are not protected by STRIPE_ACTIVE.
16ef740c37SChristoph Hellwig  *
17ef740c37SChristoph Hellwig  * The flag bits that are used to represent these states are:
18ef740c37SChristoph Hellwig  *   R5_UPTODATE and R5_LOCKED
19ef740c37SChristoph Hellwig  *
20ef740c37SChristoph Hellwig  * State Empty == !UPTODATE, !LOCK
21ef740c37SChristoph Hellwig  *        We have no data, and there is no active request
22ef740c37SChristoph Hellwig  * State Want == !UPTODATE, LOCK
23ef740c37SChristoph Hellwig  *        A read request is being submitted for this block
24ef740c37SChristoph Hellwig  * State Dirty == UPTODATE, LOCK
25ef740c37SChristoph Hellwig  *        Some new data is in this buffer, and it is being written out
26ef740c37SChristoph Hellwig  * State Clean == UPTODATE, !LOCK
27ef740c37SChristoph Hellwig  *        We have valid data which is the same as on disc
28ef740c37SChristoph Hellwig  *
29ef740c37SChristoph Hellwig  * The possible state transitions are:
30ef740c37SChristoph Hellwig  *
31ef740c37SChristoph Hellwig  *  Empty -> Want   - on read or write to get old data for  parity calc
32ede7ee8bSNeilBrown  *  Empty -> Dirty  - on compute_parity to satisfy write/sync request.
33ef740c37SChristoph Hellwig  *  Empty -> Clean  - on compute_block when computing a block for failed drive
34ef740c37SChristoph Hellwig  *  Want  -> Empty  - on failed read
35ef740c37SChristoph Hellwig  *  Want  -> Clean  - on successful completion of read request
36ef740c37SChristoph Hellwig  *  Dirty -> Clean  - on successful completion of write request
37ef740c37SChristoph Hellwig  *  Dirty -> Clean  - on failed write
38ef740c37SChristoph Hellwig  *  Clean -> Dirty  - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
39ef740c37SChristoph Hellwig  *
40ef740c37SChristoph Hellwig  * The Want->Empty, Want->Clean, Dirty->Clean, transitions
41ef740c37SChristoph Hellwig  * all happen in b_end_io at interrupt time.
42ef740c37SChristoph Hellwig  * Each sets the Uptodate bit before releasing the Lock bit.
43ef740c37SChristoph Hellwig  * This leaves one multi-stage transition:
44ef740c37SChristoph Hellwig  *    Want->Dirty->Clean
45ef740c37SChristoph Hellwig  * This is safe because thinking that a Clean buffer is actually dirty
46ef740c37SChristoph Hellwig  * will at worst delay some action, and the stripe will be scheduled
47ef740c37SChristoph Hellwig  * for attention after the transition is complete.
48ef740c37SChristoph Hellwig  *
49ef740c37SChristoph Hellwig  * There is one possibility that is not covered by these states.  That
50ef740c37SChristoph Hellwig  * is if one drive has failed and there is a spare being rebuilt.  We
51ef740c37SChristoph Hellwig  * can't distinguish between a clean block that has been generated
52ef740c37SChristoph Hellwig  * from parity calculations, and a clean block that has been
53ef740c37SChristoph Hellwig  * successfully written to the spare ( or to parity when resyncing).
54aa5e5dc2SMichael Opdenacker  * To distinguish these states we have a stripe bit STRIPE_INSYNC that
55ef740c37SChristoph Hellwig  * is set whenever a write is scheduled to the spare, or to the parity
56ef740c37SChristoph Hellwig  * disc if there is no spare.  A sync request clears this bit, and
57ef740c37SChristoph Hellwig  * when we find it set with no buffers locked, we know the sync is
58ef740c37SChristoph Hellwig  * complete.
59ef740c37SChristoph Hellwig  *
60ef740c37SChristoph Hellwig  * Buffers for the md device that arrive via make_request are attached
61ef740c37SChristoph Hellwig  * to the appropriate stripe in one of two lists linked on b_reqnext.
62ef740c37SChristoph Hellwig  * One list (bh_read) for read requests, one (bh_write) for write.
63ef740c37SChristoph Hellwig  * There should never be more than one buffer on the two lists
64ef740c37SChristoph Hellwig  * together, but we are not guaranteed of that so we allow for more.
65ef740c37SChristoph Hellwig  *
66ef740c37SChristoph Hellwig  * If a buffer is on the read list when the associated cache buffer is
67ef740c37SChristoph Hellwig  * Uptodate, the data is copied into the read buffer and it's b_end_io
68ef740c37SChristoph Hellwig  * routine is called.  This may happen in the end_request routine only
69ef740c37SChristoph Hellwig  * if the buffer has just successfully been read.  end_request should
70ef740c37SChristoph Hellwig  * remove the buffers from the list and then set the Uptodate bit on
71ef740c37SChristoph Hellwig  * the buffer.  Other threads may do this only if they first check
72ef740c37SChristoph Hellwig  * that the Uptodate bit is set.  Once they have checked that they may
73ef740c37SChristoph Hellwig  * take buffers off the read queue.
74ef740c37SChristoph Hellwig  *
75ef740c37SChristoph Hellwig  * When a buffer on the write list is committed for write it is copied
76ef740c37SChristoph Hellwig  * into the cache buffer, which is then marked dirty, and moved onto a
77ef740c37SChristoph Hellwig  * third list, the written list (bh_written).  Once both the parity
78ef740c37SChristoph Hellwig  * block and the cached buffer are successfully written, any buffer on
79ef740c37SChristoph Hellwig  * a written list can be returned with b_end_io.
80ef740c37SChristoph Hellwig  *
81c4c1663bSNeilBrown  * The write list and read list both act as fifos.  The read list,
82c4c1663bSNeilBrown  * write list and written list are protected by the device_lock.
83c4c1663bSNeilBrown  * The device_lock is only for list manipulations and will only be
84c4c1663bSNeilBrown  * held for a very short time.  It can be claimed from interrupts.
85ef740c37SChristoph Hellwig  *
86ef740c37SChristoph Hellwig  *
87ef740c37SChristoph Hellwig  * Stripes in the stripe cache can be on one of two lists (or on
88ef740c37SChristoph Hellwig  * neither).  The "inactive_list" contains stripes which are not
89ef740c37SChristoph Hellwig  * currently being used for any request.  They can freely be reused
90ef740c37SChristoph Hellwig  * for another stripe.  The "handle_list" contains stripes that need
91ef740c37SChristoph Hellwig  * to be handled in some way.  Both of these are fifo queues.  Each
92ef740c37SChristoph Hellwig  * stripe is also (potentially) linked to a hash bucket in the hash
93ef740c37SChristoph Hellwig  * table so that it can be found by sector number.  Stripes that are
94ef740c37SChristoph Hellwig  * not hashed must be on the inactive_list, and will normally be at
95ef740c37SChristoph Hellwig  * the front.  All stripes start life this way.
96ef740c37SChristoph Hellwig  *
97ef740c37SChristoph Hellwig  * The inactive_list, handle_list and hash bucket lists are all protected by the
98ef740c37SChristoph Hellwig  * device_lock.
99ef740c37SChristoph Hellwig  *  - stripes have a reference counter. If count==0, they are on a list.
100ef740c37SChristoph Hellwig  *  - If a stripe might need handling, STRIPE_HANDLE is set.
101ef740c37SChristoph Hellwig  *  - When refcount reaches zero, then if STRIPE_HANDLE it is put on
102ef740c37SChristoph Hellwig  *    handle_list else inactive_list
103ef740c37SChristoph Hellwig  *
104ef740c37SChristoph Hellwig  * This, combined with the fact that STRIPE_HANDLE is only ever
105ef740c37SChristoph Hellwig  * cleared while a stripe has a non-zero count means that if the
106ef740c37SChristoph Hellwig  * refcount is 0 and STRIPE_HANDLE is set, then it is on the
107ef740c37SChristoph Hellwig  * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
108ef740c37SChristoph Hellwig  * the stripe is on inactive_list.
109ef740c37SChristoph Hellwig  *
110ef740c37SChristoph Hellwig  * The possible transitions are:
111ef740c37SChristoph Hellwig  *  activate an unhashed/inactive stripe (get_active_stripe())
112ef740c37SChristoph Hellwig  *     lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
113ef740c37SChristoph Hellwig  *  activate a hashed, possibly active stripe (get_active_stripe())
114ef740c37SChristoph Hellwig  *     lockdev check-hash if(!cnt++)unlink-stripe unlockdev
115ef740c37SChristoph Hellwig  *  attach a request to an active stripe (add_stripe_bh())
116ef740c37SChristoph Hellwig  *     lockdev attach-buffer unlockdev
117ef740c37SChristoph Hellwig  *  handle a stripe (handle_stripe())
118c4c1663bSNeilBrown  *     setSTRIPE_ACTIVE,  clrSTRIPE_HANDLE ...
119ef740c37SChristoph Hellwig  *		(lockdev check-buffers unlockdev) ..
120ef740c37SChristoph Hellwig  *		change-state ..
121c4c1663bSNeilBrown  *		record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
122ef740c37SChristoph Hellwig  *  release an active stripe (release_stripe())
123ef740c37SChristoph Hellwig  *     lockdev if (!--cnt) { if  STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
124ef740c37SChristoph Hellwig  *
125ef740c37SChristoph Hellwig  * The refcount counts each thread that have activated the stripe,
126ef740c37SChristoph Hellwig  * plus raid5d if it is handling it, plus one for each active request
127ef740c37SChristoph Hellwig  * on a cached buffer, and plus one if the stripe is undergoing stripe
128ef740c37SChristoph Hellwig  * operations.
129ef740c37SChristoph Hellwig  *
130c4c1663bSNeilBrown  * The stripe operations are:
131ef740c37SChristoph Hellwig  * -copying data between the stripe cache and user application buffers
132ef740c37SChristoph Hellwig  * -computing blocks to save a disk access, or to recover a missing block
133ef740c37SChristoph Hellwig  * -updating the parity on a write operation (reconstruct write and
134ef740c37SChristoph Hellwig  *  read-modify-write)
135ef740c37SChristoph Hellwig  * -checking parity correctness
136ef740c37SChristoph Hellwig  * -running i/o to disk
137ef740c37SChristoph Hellwig  * These operations are carried out by raid5_run_ops which uses the async_tx
138ef740c37SChristoph Hellwig  * api to (optionally) offload operations to dedicated hardware engines.
139ef740c37SChristoph Hellwig  * When requesting an operation handle_stripe sets the pending bit for the
140ef740c37SChristoph Hellwig  * operation and increments the count.  raid5_run_ops is then run whenever
141ef740c37SChristoph Hellwig  * the count is non-zero.
142ef740c37SChristoph Hellwig  * There are some critical dependencies between the operations that prevent some
143ef740c37SChristoph Hellwig  * from being requested while another is in flight.
144ef740c37SChristoph Hellwig  * 1/ Parity check operations destroy the in cache version of the parity block,
145ef740c37SChristoph Hellwig  *    so we prevent parity dependent operations like writes and compute_blocks
146ef740c37SChristoph Hellwig  *    from starting while a check is in progress.  Some dma engines can perform
147ef740c37SChristoph Hellwig  *    the check without damaging the parity block, in these cases the parity
148ef740c37SChristoph Hellwig  *    block is re-marked up to date (assuming the check was successful) and is
149ef740c37SChristoph Hellwig  *    not re-read from disk.
150ef740c37SChristoph Hellwig  * 2/ When a write operation is requested we immediately lock the affected
151ef740c37SChristoph Hellwig  *    blocks, and mark them as not up to date.  This causes new read requests
152ef740c37SChristoph Hellwig  *    to be held off, as well as parity checks and compute block operations.
153ef740c37SChristoph Hellwig  * 3/ Once a compute block operation has been requested handle_stripe treats
154ef740c37SChristoph Hellwig  *    that block as if it is up to date.  raid5_run_ops guaruntees that any
155ef740c37SChristoph Hellwig  *    operation that is dependent on the compute block result is initiated after
156ef740c37SChristoph Hellwig  *    the compute block completes.
157ef740c37SChristoph Hellwig  */
158ef740c37SChristoph Hellwig 
159ef740c37SChristoph Hellwig /*
160c4c1663bSNeilBrown  * Operations state - intermediate states that are visible outside of
161c4c1663bSNeilBrown  *   STRIPE_ACTIVE.
162ef740c37SChristoph Hellwig  * In general _idle indicates nothing is running, _run indicates a data
163ef740c37SChristoph Hellwig  * processing operation is active, and _result means the data processing result
164ef740c37SChristoph Hellwig  * is stable and can be acted upon.  For simple operations like biofill and
165ef740c37SChristoph Hellwig  * compute that only have an _idle and _run state they are indicated with
166ef740c37SChristoph Hellwig  * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
167ef740c37SChristoph Hellwig  */
168ef740c37SChristoph Hellwig /**
169ef740c37SChristoph Hellwig  * enum check_states - handles syncing / repairing a stripe
170ef740c37SChristoph Hellwig  * @check_state_idle - check operations are quiesced
171ef740c37SChristoph Hellwig  * @check_state_run - check operation is running
172ef740c37SChristoph Hellwig  * @check_state_result - set outside lock when check result is valid
173ef740c37SChristoph Hellwig  * @check_state_compute_run - check failed and we are repairing
174ef740c37SChristoph Hellwig  * @check_state_compute_result - set outside lock when compute result is valid
175ef740c37SChristoph Hellwig  */
176ef740c37SChristoph Hellwig enum check_states {
177ef740c37SChristoph Hellwig 	check_state_idle = 0,
178ac6b53b6SDan Williams 	check_state_run, /* xor parity check */
179ac6b53b6SDan Williams 	check_state_run_q, /* q-parity check */
180ac6b53b6SDan Williams 	check_state_run_pq, /* pq dual parity check */
181ef740c37SChristoph Hellwig 	check_state_check_result,
182ef740c37SChristoph Hellwig 	check_state_compute_run, /* parity repair */
183ef740c37SChristoph Hellwig 	check_state_compute_result,
184ef740c37SChristoph Hellwig };
185ef740c37SChristoph Hellwig 
186ef740c37SChristoph Hellwig /**
187ef740c37SChristoph Hellwig  * enum reconstruct_states - handles writing or expanding a stripe
188ef740c37SChristoph Hellwig  */
189ef740c37SChristoph Hellwig enum reconstruct_states {
190ef740c37SChristoph Hellwig 	reconstruct_state_idle = 0,
191ef740c37SChristoph Hellwig 	reconstruct_state_prexor_drain_run,	/* prexor-write */
192ef740c37SChristoph Hellwig 	reconstruct_state_drain_run,		/* write */
193ef740c37SChristoph Hellwig 	reconstruct_state_run,			/* expand */
194ef740c37SChristoph Hellwig 	reconstruct_state_prexor_drain_result,
195ef740c37SChristoph Hellwig 	reconstruct_state_drain_result,
196ef740c37SChristoph Hellwig 	reconstruct_state_result,
197ef740c37SChristoph Hellwig };
198ef740c37SChristoph Hellwig 
199046169f0SYufen Yu #define DEFAULT_STRIPE_SIZE	4096
200ef740c37SChristoph Hellwig struct stripe_head {
201ef740c37SChristoph Hellwig 	struct hlist_node	hash;
202ef740c37SChristoph Hellwig 	struct list_head	lru;	      /* inactive_list or handle_list */
203773ca82fSShaohua Li 	struct llist_node	release_list;
204d1688a6dSNeilBrown 	struct r5conf		*raid_conf;
20586b42c71SNeilBrown 	short			generation;	/* increments with every
20686b42c71SNeilBrown 						 * reshape */
207ef740c37SChristoph Hellwig 	sector_t		sector;		/* sector of this row */
208d0dabf7eSNeilBrown 	short			pd_idx;		/* parity disk index */
209d0dabf7eSNeilBrown 	short			qd_idx;		/* 'Q' disk index for raid6 */
21067cc2b81SNeilBrown 	short			ddf_layout;/* use DDF ordering to calculate Q */
211566c09c5SShaohua Li 	short			hash_lock_index;
212ef740c37SChristoph Hellwig 	unsigned long		state;		/* state flags */
213ef740c37SChristoph Hellwig 	atomic_t		count;	      /* nr of active thread/requests */
214ef740c37SChristoph Hellwig 	int			bm_seq;	/* sequence number for bitmap flushes */
215ef740c37SChristoph Hellwig 	int			disks;		/* disks in stripe */
2167a87f434Sshli@kernel.org 	int			overwrite_disks; /* total overwrite disks in stripe,
2177a87f434Sshli@kernel.org 						  * this is only checked when stripe
2187a87f434Sshli@kernel.org 						  * has STRIPE_BATCH_READY
2197a87f434Sshli@kernel.org 						  */
220ef740c37SChristoph Hellwig 	enum check_states	check_state;
221ef740c37SChristoph Hellwig 	enum reconstruct_states reconstruct_state;
222b17459c0SShaohua Li 	spinlock_t		stripe_lock;
223851c30c9SShaohua Li 	int			cpu;
224bfc90cb0SShaohua Li 	struct r5worker_group	*group;
22559fc630bSshli@kernel.org 
22659fc630bSshli@kernel.org 	struct stripe_head	*batch_head; /* protected by stripe lock */
22759fc630bSshli@kernel.org 	spinlock_t		batch_lock; /* only header's lock is useful */
22859fc630bSshli@kernel.org 	struct list_head	batch_list; /* protected by head's batch lock*/
229f6bed0efSShaohua Li 
2303418d036SArtur Paszkiewicz 	union {
231f6bed0efSShaohua Li 		struct r5l_io_unit	*log_io;
2323418d036SArtur Paszkiewicz 		struct ppl_io_unit	*ppl_io;
2333418d036SArtur Paszkiewicz 	};
2343418d036SArtur Paszkiewicz 
235f6bed0efSShaohua Li 	struct list_head	log_list;
236a39f7afdSSong Liu 	sector_t		log_start; /* first meta block on the journal */
237a39f7afdSSong Liu 	struct list_head	r5c; /* for r5c_cache->stripe_in_journal */
2383418d036SArtur Paszkiewicz 
2393418d036SArtur Paszkiewicz 	struct page		*ppl_page; /* partial parity of this stripe */
240417b8d4aSDan Williams 	/**
241417b8d4aSDan Williams 	 * struct stripe_operations
242ef740c37SChristoph Hellwig 	 * @target - STRIPE_OP_COMPUTE_BLK target
243417b8d4aSDan Williams 	 * @target2 - 2nd compute target in the raid6 case
244417b8d4aSDan Williams 	 * @zero_sum_result - P and Q verification flags
245417b8d4aSDan Williams 	 * @request - async service request flags for raid_run_ops
246ef740c37SChristoph Hellwig 	 */
247ef740c37SChristoph Hellwig 	struct stripe_operations {
248ac6b53b6SDan Williams 		int 		     target, target2;
249ad283ea4SDan Williams 		enum sum_check_flags zero_sum_result;
250ef740c37SChristoph Hellwig 	} ops;
251046169f0SYufen Yu 
252046169f0SYufen Yu #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
253046169f0SYufen Yu 	/* These pages will be used by bios in dev[i] */
254046169f0SYufen Yu 	struct page	**pages;
255046169f0SYufen Yu 	int	nr_pages;	/* page array size */
256046169f0SYufen Yu 	int	stripes_per_page;
257046169f0SYufen Yu #endif
258ef740c37SChristoph Hellwig 	struct r5dev {
259671488ccSNeilBrown 		/* rreq and rvec are used for the replacement device when
260671488ccSNeilBrown 		 * writing data to both devices.
261671488ccSNeilBrown 		 */
262671488ccSNeilBrown 		struct bio	req, rreq;
263671488ccSNeilBrown 		struct bio_vec	vec, rvec;
264d592a996SShaohua Li 		struct page	*page, *orig_page;
2657aba13b7SYufen Yu 		unsigned int    offset;     /* offset of the page */
266ef740c37SChristoph Hellwig 		struct bio	*toread, *read, *towrite, *written;
267ef740c37SChristoph Hellwig 		sector_t	sector;			/* sector of this page */
268ef740c37SChristoph Hellwig 		unsigned long	flags;
269f6bed0efSShaohua Li 		u32		log_checksum;
2702cd259a7SMariusz Dabrowski 		unsigned short	write_hint;
2712f088dfcSKees Cook 	} dev[]; /* allocated depending of RAID geometry ("disks" member) */
272ef740c37SChristoph Hellwig };
273ef740c37SChristoph Hellwig 
274ef740c37SChristoph Hellwig /* stripe_head_state - collects and tracks the dynamic state of a stripe_head
275c4c1663bSNeilBrown  *     for handle_stripe.
276ef740c37SChristoph Hellwig  */
277ef740c37SChristoph Hellwig struct stripe_head_state {
2789a3e1101SNeilBrown 	/* 'syncing' means that we need to read all devices, either
2799a3e1101SNeilBrown 	 * to check/correct parity, or to reconstruct a missing device.
2809a3e1101SNeilBrown 	 * 'replacing' means we are replacing one or more drives and
2819a3e1101SNeilBrown 	 * the source is valid at this point so we don't need to
2829a3e1101SNeilBrown 	 * read all devices, just the replacement targets.
2839a3e1101SNeilBrown 	 */
2849a3e1101SNeilBrown 	int syncing, expanding, expanded, replacing;
285ef740c37SChristoph Hellwig 	int locked, uptodate, to_read, to_write, failed, written;
286ef740c37SChristoph Hellwig 	int to_fill, compute, req_compute, non_overwrite;
2871e6d690bSSong Liu 	int injournal, just_cached;
288f2b3b44dSNeilBrown 	int failed_num[2];
289f2b3b44dSNeilBrown 	int p_failed, q_failed;
290c5709ef6SNeilBrown 	int dec_preread_active;
291c5709ef6SNeilBrown 	unsigned long ops_request;
292c5709ef6SNeilBrown 
2933cb03002SNeilBrown 	struct md_rdev *blocked_rdev;
294bc2607f3SNeilBrown 	int handle_bad_blocks;
2956e74a9cfSShaohua Li 	int log_failed;
296d7bd398eSSong Liu 	int waiting_extra_page;
297ef740c37SChristoph Hellwig };
298ef740c37SChristoph Hellwig 
299671488ccSNeilBrown /* Flags for struct r5dev.flags */
300671488ccSNeilBrown enum r5dev_flags {
301671488ccSNeilBrown 	R5_UPTODATE,	/* page contains current data */
302671488ccSNeilBrown 	R5_LOCKED,	/* IO has been submitted on "req" */
303977df362SNeilBrown 	R5_DOUBLE_LOCKED,/* Cannot clear R5_LOCKED until 2 writes complete */
304671488ccSNeilBrown 	R5_OVERWRITE,	/* towrite covers whole page */
305ef740c37SChristoph Hellwig /* and some that are internal to handle_stripe */
306671488ccSNeilBrown 	R5_Insync,	/* rdev && rdev->in_sync at start */
307671488ccSNeilBrown 	R5_Wantread,	/* want to schedule a read */
308671488ccSNeilBrown 	R5_Wantwrite,
309671488ccSNeilBrown 	R5_Overlap,	/* There is a pending overlapping request
310671488ccSNeilBrown 			 * on this block */
3113f9e7c14Smajianpeng 	R5_ReadNoMerge, /* prevent bio from merging in block-layer */
312671488ccSNeilBrown 	R5_ReadError,	/* seen a read error here recently */
313671488ccSNeilBrown 	R5_ReWrite,	/* have tried to over-write the readerror */
314ef740c37SChristoph Hellwig 
315671488ccSNeilBrown 	R5_Expanded,	/* This block now has post-expand data */
316671488ccSNeilBrown 	R5_Wantcompute,	/* compute_block in progress treat as
317ef740c37SChristoph Hellwig 			 * uptodate
318ef740c37SChristoph Hellwig 			 */
319671488ccSNeilBrown 	R5_Wantfill,	/* dev->toread contains a bio that needs
320ef740c37SChristoph Hellwig 			 * filling
321ef740c37SChristoph Hellwig 			 */
322671488ccSNeilBrown 	R5_Wantdrain,	/* dev->towrite needs to be drained */
323671488ccSNeilBrown 	R5_WantFUA,	/* Write should be FUA */
324bc0934f0SShaohua Li 	R5_SyncIO,	/* The IO is sync */
325671488ccSNeilBrown 	R5_WriteError,	/* got a write error - need to record it */
326671488ccSNeilBrown 	R5_MadeGood,	/* A bad block has been fixed by writing to it */
327671488ccSNeilBrown 	R5_ReadRepl,	/* Will/did read from replacement rather than orig */
328671488ccSNeilBrown 	R5_MadeGoodRepl,/* A bad block on the replacement device has been
329671488ccSNeilBrown 			 * fixed by writing to it */
3309a3e1101SNeilBrown 	R5_NeedReplace,	/* This device has a replacement which is not
3319a3e1101SNeilBrown 			 * up-to-date at this stripe. */
3329a3e1101SNeilBrown 	R5_WantReplace, /* We need to update the replacement, we have read
3339a3e1101SNeilBrown 			 * data in, and now is a good time to write it out.
3349a3e1101SNeilBrown 			 */
335620125f2SShaohua Li 	R5_Discard,	/* Discard the stripe */
336d592a996SShaohua Li 	R5_SkipCopy,	/* Don't copy data from bio to stripe cache */
3372ded3703SSong Liu 	R5_InJournal,	/* data being written is in the journal device.
3382ded3703SSong Liu 			 * if R5_InJournal is set for parity pd_idx, all the
3392ded3703SSong Liu 			 * data and parity being written are in the journal
3402ded3703SSong Liu 			 * device
3412ded3703SSong Liu 			 */
34286aa1397SSong Liu 	R5_OrigPageUPTDODATE,	/* with write back cache, we read old data into
34386aa1397SSong Liu 				 * dev->orig_page for prexor. When this flag is
34486aa1397SSong Liu 				 * set, orig_page contains latest data in the
34586aa1397SSong Liu 				 * raid disk.
34686aa1397SSong Liu 				 */
347671488ccSNeilBrown };
348ef740c37SChristoph Hellwig 
349ef740c37SChristoph Hellwig /*
350ef740c37SChristoph Hellwig  * Stripe state
351ef740c37SChristoph Hellwig  */
35283206d66SNeilBrown enum {
353c4c1663bSNeilBrown 	STRIPE_ACTIVE,
35483206d66SNeilBrown 	STRIPE_HANDLE,
35583206d66SNeilBrown 	STRIPE_SYNC_REQUESTED,
35683206d66SNeilBrown 	STRIPE_SYNCING,
35783206d66SNeilBrown 	STRIPE_INSYNC,
358f94c0b66SNeilBrown 	STRIPE_REPLACED,
35983206d66SNeilBrown 	STRIPE_PREREAD_ACTIVE,
36083206d66SNeilBrown 	STRIPE_DELAYED,
36183206d66SNeilBrown 	STRIPE_BIT_DELAY,
36283206d66SNeilBrown 	STRIPE_EXPANDING,
36383206d66SNeilBrown 	STRIPE_EXPAND_SOURCE,
36483206d66SNeilBrown 	STRIPE_EXPAND_READY,
36583206d66SNeilBrown 	STRIPE_IO_STARTED,	/* do not count towards 'bypass_count' */
36683206d66SNeilBrown 	STRIPE_FULL_WRITE,	/* all blocks are set to be overwritten */
36783206d66SNeilBrown 	STRIPE_BIOFILL_RUN,
36883206d66SNeilBrown 	STRIPE_COMPUTE_RUN,
3698811b596SShaohua Li 	STRIPE_ON_UNPLUG_LIST,
370f8dfcffdSNeilBrown 	STRIPE_DISCARD,
371773ca82fSShaohua Li 	STRIPE_ON_RELEASE_LIST,
372da41ba65Sshli@kernel.org 	STRIPE_BATCH_READY,
37372ac7330Sshli@kernel.org 	STRIPE_BATCH_ERR,
3742ded3703SSong Liu 	STRIPE_LOG_TRAPPED,	/* trapped into log (see raid5-cache.c)
3752ded3703SSong Liu 				 * this bit is used in two scenarios:
3762ded3703SSong Liu 				 *
3772ded3703SSong Liu 				 * 1. write-out phase
3782ded3703SSong Liu 				 *  set in first entry of r5l_write_stripe
3792ded3703SSong Liu 				 *  clear in second entry of r5l_write_stripe
3802ded3703SSong Liu 				 *  used to bypass logic in handle_stripe
3812ded3703SSong Liu 				 *
3822ded3703SSong Liu 				 * 2. caching phase
3832ded3703SSong Liu 				 *  set in r5c_try_caching_write()
3842ded3703SSong Liu 				 *  clear when journal write is done
3852ded3703SSong Liu 				 *  used to initiate r5c_cache_data()
3862ded3703SSong Liu 				 *  also used to bypass logic in handle_stripe
3872ded3703SSong Liu 				 */
3882ded3703SSong Liu 	STRIPE_R5C_CACHING,	/* the stripe is in caching phase
3892ded3703SSong Liu 				 * see more detail in the raid5-cache.c
3902ded3703SSong Liu 				 */
3911e6d690bSSong Liu 	STRIPE_R5C_PARTIAL_STRIPE,	/* in r5c cache (to-be/being handled or
3921e6d690bSSong Liu 					 * in conf->r5c_partial_stripe_list)
3931e6d690bSSong Liu 					 */
3941e6d690bSSong Liu 	STRIPE_R5C_FULL_STRIPE,	/* in r5c cache (to-be/being handled or
3951e6d690bSSong Liu 				 * in conf->r5c_full_stripe_list)
3961e6d690bSSong Liu 				 */
3973bddb7f8SSong Liu 	STRIPE_R5C_PREFLUSH,	/* need to flush journal device */
39883206d66SNeilBrown };
399417b8d4aSDan Williams 
4001b956f7aSNeilBrown #define STRIPE_EXPAND_SYNC_FLAGS \
401dabc4ec6Sshli@kernel.org 	((1 << STRIPE_EXPAND_SOURCE) |\
402dabc4ec6Sshli@kernel.org 	(1 << STRIPE_EXPAND_READY) |\
403dabc4ec6Sshli@kernel.org 	(1 << STRIPE_EXPANDING) |\
404dabc4ec6Sshli@kernel.org 	(1 << STRIPE_SYNC_REQUESTED))
405ef740c37SChristoph Hellwig /*
406ef740c37SChristoph Hellwig  * Operation request flags
407ef740c37SChristoph Hellwig  */
408ede7ee8bSNeilBrown enum {
409ede7ee8bSNeilBrown 	STRIPE_OP_BIOFILL,
410ede7ee8bSNeilBrown 	STRIPE_OP_COMPUTE_BLK,
411ede7ee8bSNeilBrown 	STRIPE_OP_PREXOR,
412ede7ee8bSNeilBrown 	STRIPE_OP_BIODRAIN,
413ede7ee8bSNeilBrown 	STRIPE_OP_RECONSTRUCT,
414ede7ee8bSNeilBrown 	STRIPE_OP_CHECK,
4153418d036SArtur Paszkiewicz 	STRIPE_OP_PARTIAL_PARITY,
416ede7ee8bSNeilBrown };
417584acdd4SMarkus Stockhausen 
418584acdd4SMarkus Stockhausen /*
419584acdd4SMarkus Stockhausen  * RAID parity calculation preferences
420584acdd4SMarkus Stockhausen  */
421584acdd4SMarkus Stockhausen enum {
422584acdd4SMarkus Stockhausen 	PARITY_DISABLE_RMW = 0,
423584acdd4SMarkus Stockhausen 	PARITY_ENABLE_RMW,
424d06f191fSMarkus Stockhausen 	PARITY_PREFER_RMW,
425584acdd4SMarkus Stockhausen };
426584acdd4SMarkus Stockhausen 
427584acdd4SMarkus Stockhausen /*
428584acdd4SMarkus Stockhausen  * Pages requested from set_syndrome_sources()
429584acdd4SMarkus Stockhausen  */
430584acdd4SMarkus Stockhausen enum {
431584acdd4SMarkus Stockhausen 	SYNDROME_SRC_ALL,
432584acdd4SMarkus Stockhausen 	SYNDROME_SRC_WANT_DRAIN,
433584acdd4SMarkus Stockhausen 	SYNDROME_SRC_WRITTEN,
434584acdd4SMarkus Stockhausen };
435ef740c37SChristoph Hellwig /*
436ef740c37SChristoph Hellwig  * Plugging:
437ef740c37SChristoph Hellwig  *
438ef740c37SChristoph Hellwig  * To improve write throughput, we need to delay the handling of some
439ef740c37SChristoph Hellwig  * stripes until there has been a chance that several write requests
440ef740c37SChristoph Hellwig  * for the one stripe have all been collected.
441ef740c37SChristoph Hellwig  * In particular, any write request that would require pre-reading
442ef740c37SChristoph Hellwig  * is put on a "delayed" queue until there are no stripes currently
443ef740c37SChristoph Hellwig  * in a pre-read phase.  Further, if the "delayed" queue is empty when
444ef740c37SChristoph Hellwig  * a stripe is put on it then we "plug" the queue and do not process it
445ef740c37SChristoph Hellwig  * until an unplug call is made. (the unplug_io_fn() is called).
446ef740c37SChristoph Hellwig  *
447ef740c37SChristoph Hellwig  * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
448ef740c37SChristoph Hellwig  * it to the count of prereading stripes.
449ef740c37SChristoph Hellwig  * When write is initiated, or the stripe refcnt == 0 (just in case) we
450ef740c37SChristoph Hellwig  * clear the PREREAD_ACTIVE flag and decrement the count
451ef740c37SChristoph Hellwig  * Whenever the 'handle' queue is empty and the device is not plugged, we
452ef740c37SChristoph Hellwig  * move any strips from delayed to handle and clear the DELAYED flag and set
453ef740c37SChristoph Hellwig  * PREREAD_ACTIVE.
454ef740c37SChristoph Hellwig  * In stripe_handle, if we find pre-reading is necessary, we do it if
455ef740c37SChristoph Hellwig  * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
456c4c1663bSNeilBrown  * HANDLE gets cleared if stripe_handle leaves nothing locked.
457ef740c37SChristoph Hellwig  */
458ef740c37SChristoph Hellwig 
459f2785b52SNeilBrown /* Note: disk_info.rdev can be set to NULL asynchronously by raid5_remove_disk.
460f2785b52SNeilBrown  * There are three safe ways to access disk_info.rdev.
461f2785b52SNeilBrown  * 1/ when holding mddev->reconfig_mutex
462f2785b52SNeilBrown  * 2/ when resync/recovery/reshape is known to be happening - i.e. in code that
463f2785b52SNeilBrown  *    is called as part of performing resync/recovery/reshape.
464f2785b52SNeilBrown  * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
465f2785b52SNeilBrown  *    and if it is non-NULL, increment rdev->nr_pending before dropping the RCU
466f2785b52SNeilBrown  *    lock.
467f2785b52SNeilBrown  * When .rdev is set to NULL, the nr_pending count checked again and if
468f2785b52SNeilBrown  * it has been incremented, the pointer is put back in .rdev.
469f2785b52SNeilBrown  */
470f2785b52SNeilBrown 
471ef740c37SChristoph Hellwig struct disk_info {
472b0920edeSLogan Gunthorpe 	struct md_rdev	__rcu *rdev;
473b0920edeSLogan Gunthorpe 	struct md_rdev  __rcu *replacement;
474d7bd398eSSong Liu 	struct page	*extra_page; /* extra page to use in prexor */
475ef740c37SChristoph Hellwig };
476ef740c37SChristoph Hellwig 
477937621c3SSong Liu /*
478937621c3SSong Liu  * Stripe cache
479937621c3SSong Liu  */
480937621c3SSong Liu 
481937621c3SSong Liu #define NR_STRIPES		256
482e2368582SYufen Yu 
483e2368582SYufen Yu #if PAGE_SIZE == DEFAULT_STRIPE_SIZE
484937621c3SSong Liu #define STRIPE_SIZE		PAGE_SIZE
485937621c3SSong Liu #define STRIPE_SHIFT		(PAGE_SHIFT - 9)
486937621c3SSong Liu #define STRIPE_SECTORS		(STRIPE_SIZE>>9)
487e2368582SYufen Yu #endif
488e2368582SYufen Yu 
489937621c3SSong Liu #define	IO_THRESHOLD		1
490937621c3SSong Liu #define BYPASS_THRESHOLD	1
491937621c3SSong Liu #define NR_HASH			(PAGE_SIZE / sizeof(struct hlist_head))
492937621c3SSong Liu #define HASH_MASK		(NR_HASH - 1)
493937621c3SSong Liu #define MAX_STRIPE_BATCH	8
494937621c3SSong Liu 
495566c09c5SShaohua Li /* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
496566c09c5SShaohua Li  * This is because we sometimes take all the spinlocks
497566c09c5SShaohua Li  * and creating that much locking depth can cause
498566c09c5SShaohua Li  * problems.
499566c09c5SShaohua Li  */
500566c09c5SShaohua Li #define NR_STRIPE_HASH_LOCKS 8
501566c09c5SShaohua Li #define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
502566c09c5SShaohua Li 
503851c30c9SShaohua Li struct r5worker {
504851c30c9SShaohua Li 	struct work_struct work;
505851c30c9SShaohua Li 	struct r5worker_group *group;
506566c09c5SShaohua Li 	struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
507bfc90cb0SShaohua Li 	bool working;
508851c30c9SShaohua Li };
509851c30c9SShaohua Li 
510851c30c9SShaohua Li struct r5worker_group {
511851c30c9SShaohua Li 	struct list_head handle_list;
512535ae4ebSShaohua Li 	struct list_head loprio_list;
513851c30c9SShaohua Li 	struct r5conf *conf;
514851c30c9SShaohua Li 	struct r5worker *workers;
515bfc90cb0SShaohua Li 	int stripes_cnt;
516851c30c9SShaohua Li };
517851c30c9SShaohua Li 
51878e470c2SHeinz Mauelshagen /*
51978e470c2SHeinz Mauelshagen  * r5c journal modes of the array: write-back or write-through.
52078e470c2SHeinz Mauelshagen  * write-through mode has identical behavior as existing log only
52178e470c2SHeinz Mauelshagen  * implementation.
52278e470c2SHeinz Mauelshagen  */
52378e470c2SHeinz Mauelshagen enum r5c_journal_mode {
52478e470c2SHeinz Mauelshagen 	R5C_JOURNAL_MODE_WRITE_THROUGH = 0,
52578e470c2SHeinz Mauelshagen 	R5C_JOURNAL_MODE_WRITE_BACK = 1,
52678e470c2SHeinz Mauelshagen };
52778e470c2SHeinz Mauelshagen 
528a39f7afdSSong Liu enum r5_cache_state {
529a39f7afdSSong Liu 	R5_INACTIVE_BLOCKED,	/* release of inactive stripes blocked,
530a39f7afdSSong Liu 				 * waiting for 25% to be free
531a39f7afdSSong Liu 				 */
532a39f7afdSSong Liu 	R5_ALLOC_MORE,		/* It might help to allocate another
533a39f7afdSSong Liu 				 * stripe.
534a39f7afdSSong Liu 				 */
535a39f7afdSSong Liu 	R5_DID_ALLOC,		/* A stripe was allocated, don't allocate
536a39f7afdSSong Liu 				 * more until at least one has been
537a39f7afdSSong Liu 				 * released.  This avoids flooding
538a39f7afdSSong Liu 				 * the cache.
539a39f7afdSSong Liu 				 */
540a39f7afdSSong Liu 	R5C_LOG_TIGHT,		/* log device space tight, need to
541a39f7afdSSong Liu 				 * prioritize stripes at last_checkpoint
542a39f7afdSSong Liu 				 */
543a39f7afdSSong Liu 	R5C_LOG_CRITICAL,	/* log device is running out of space,
544a39f7afdSSong Liu 				 * only process stripes that are already
545a39f7afdSSong Liu 				 * occupying the log
546a39f7afdSSong Liu 				 */
547d7bd398eSSong Liu 	R5C_EXTRA_PAGE_IN_USE,	/* a stripe is using disk_info.extra_page
548d7bd398eSSong Liu 				 * for prexor
549d7bd398eSSong Liu 				 */
550a39f7afdSSong Liu };
551a39f7afdSSong Liu 
552aaf9f12eSShaohua Li #define PENDING_IO_MAX 512
553aaf9f12eSShaohua Li #define PENDING_IO_ONE_FLUSH 128
554aaf9f12eSShaohua Li struct r5pending_data {
555aaf9f12eSShaohua Li 	struct list_head sibling;
556aaf9f12eSShaohua Li 	sector_t sector; /* stripe sector */
557aaf9f12eSShaohua Li 	struct bio_list bios;
558aaf9f12eSShaohua Li };
559aaf9f12eSShaohua Li 
5603d9a644cSLogan Gunthorpe struct raid5_percpu {
5613d9a644cSLogan Gunthorpe 	struct page	*spare_page; /* Used when checking P/Q in raid6 */
5623d9a644cSLogan Gunthorpe 	void		*scribble;  /* space for constructing buffer
5633d9a644cSLogan Gunthorpe 				     * lists and performing address
5643d9a644cSLogan Gunthorpe 				     * conversions
5653d9a644cSLogan Gunthorpe 				     */
5663d9a644cSLogan Gunthorpe 	int             scribble_obj_size;
5673d9a644cSLogan Gunthorpe 	local_lock_t    lock;
5683d9a644cSLogan Gunthorpe };
5693d9a644cSLogan Gunthorpe 
570d1688a6dSNeilBrown struct r5conf {
571ef740c37SChristoph Hellwig 	struct hlist_head	*stripe_hashtbl;
572566c09c5SShaohua Li 	/* only protect corresponding hash list and inactive_list */
573566c09c5SShaohua Li 	spinlock_t		hash_locks[NR_STRIPE_HASH_LOCKS];
574fd01b88cSNeilBrown 	struct mddev		*mddev;
57509c9e5faSAndre Noll 	int			chunk_sectors;
576584acdd4SMarkus Stockhausen 	int			level, algorithm, rmw_level;
577ef740c37SChristoph Hellwig 	int			max_degraded;
578ef740c37SChristoph Hellwig 	int			raid_disks;
579ef740c37SChristoph Hellwig 	int			max_nr_stripes;
580edbe83abSNeilBrown 	int			min_nr_stripes;
581e2368582SYufen Yu #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
582e2368582SYufen Yu 	unsigned long	stripe_size;
583e2368582SYufen Yu 	unsigned int	stripe_shift;
584e2368582SYufen Yu 	unsigned long	stripe_sectors;
585e2368582SYufen Yu #endif
586ef740c37SChristoph Hellwig 
587fef9c61fSNeilBrown 	/* reshape_progress is the leading edge of a 'reshape'
588fef9c61fSNeilBrown 	 * It has value MaxSector when no reshape is happening
589fef9c61fSNeilBrown 	 * If delta_disks < 0, it is the last sector we started work on,
590fef9c61fSNeilBrown 	 * else is it the next sector to work on.
591ef740c37SChristoph Hellwig 	 */
592fef9c61fSNeilBrown 	sector_t		reshape_progress;
593fef9c61fSNeilBrown 	/* reshape_safe is the trailing edge of a reshape.  We know that
594fef9c61fSNeilBrown 	 * before (or after) this address, all reshape has completed.
595fef9c61fSNeilBrown 	 */
596fef9c61fSNeilBrown 	sector_t		reshape_safe;
597ef740c37SChristoph Hellwig 	int			previous_raid_disks;
59809c9e5faSAndre Noll 	int			prev_chunk_sectors;
59909c9e5faSAndre Noll 	int			prev_algo;
60086b42c71SNeilBrown 	short			generation; /* increments with every reshape */
6010a87b25fSAhmed S. Darwish 	seqcount_spinlock_t	gen_lock;	/* lock against generation changes */
602c8f517c4SNeilBrown 	unsigned long		reshape_checkpoint; /* Time we last updated
603c8f517c4SNeilBrown 						     * metadata */
604b5254dd5SNeilBrown 	long long		min_offset_diff; /* minimum difference between
605b5254dd5SNeilBrown 						  * data_offset and
606b5254dd5SNeilBrown 						  * new_data_offset across all
607b5254dd5SNeilBrown 						  * devices.  May be negative,
608b5254dd5SNeilBrown 						  * but is closest to zero.
609b5254dd5SNeilBrown 						  */
610ef740c37SChristoph Hellwig 
611ef740c37SChristoph Hellwig 	struct list_head	handle_list; /* stripes needing handling */
612535ae4ebSShaohua Li 	struct list_head	loprio_list; /* low priority stripes */
613ef740c37SChristoph Hellwig 	struct list_head	hold_list; /* preread ready stripes */
614ef740c37SChristoph Hellwig 	struct list_head	delayed_list; /* stripes that have plugged requests */
615ef740c37SChristoph Hellwig 	struct list_head	bitmap_list; /* stripes delaying awaiting bitmap update */
616ef740c37SChristoph Hellwig 	struct bio		*retry_read_aligned; /* currently retrying aligned bios   */
6170472a42bSNeilBrown 	unsigned int		retry_read_offset; /* sector offset into retry_read_aligned */
618ef740c37SChristoph Hellwig 	struct bio		*retry_read_aligned_list; /* aligned bios retry list  */
619ef740c37SChristoph Hellwig 	atomic_t		preread_active_stripes; /* stripes with scheduled io */
620ef740c37SChristoph Hellwig 	atomic_t		active_aligned_reads;
621ef740c37SChristoph Hellwig 	atomic_t		pending_full_writes; /* full write backlog */
622ef740c37SChristoph Hellwig 	int			bypass_count; /* bypassed prereads */
623ef740c37SChristoph Hellwig 	int			bypass_threshold; /* preread nice */
624d592a996SShaohua Li 	int			skip_copy; /* Don't copy data from bio to stripe cache */
625ef740c37SChristoph Hellwig 	struct list_head	*last_hold; /* detect hold_list promotions */
626ef740c37SChristoph Hellwig 
627ef740c37SChristoph Hellwig 	atomic_t		reshape_stripes; /* stripes with pending writes for reshape */
628ef740c37SChristoph Hellwig 	/* unfortunately we need two cache names as we temporarily have
629ef740c37SChristoph Hellwig 	 * two caches.
630ef740c37SChristoph Hellwig 	 */
631ef740c37SChristoph Hellwig 	int			active_name;
632f4be6b43SNeilBrown 	char			cache_name[2][32];
633ef740c37SChristoph Hellwig 	struct kmem_cache	*slab_cache; /* for allocating stripes */
6342d5b569bSNeilBrown 	struct mutex		cache_size_mutex; /* Protect changes to cache size */
635ef740c37SChristoph Hellwig 
636ef740c37SChristoph Hellwig 	int			seq_flush, seq_write;
637ef740c37SChristoph Hellwig 	int			quiesce;
638ef740c37SChristoph Hellwig 
639ef740c37SChristoph Hellwig 	int			fullsync;  /* set to 1 if a full sync is needed,
640ef740c37SChristoph Hellwig 					    * (fresh device added).
641ef740c37SChristoph Hellwig 					    * Cleared when a sync completes.
642ef740c37SChristoph Hellwig 					    */
6437f0da59bSNeilBrown 	int			recovery_disabled;
64436d1c647SDan Williams 	/* per cpu variables */
6453d9a644cSLogan Gunthorpe 	struct raid5_percpu __percpu *percpu;
64627a353c0SShaohua Li 	int scribble_disks;
64727a353c0SShaohua Li 	int scribble_sectors;
64829c6d1bbSSebastian Andrzej Siewior 	struct hlist_node node;
649ef740c37SChristoph Hellwig 
650ef740c37SChristoph Hellwig 	/*
651ef740c37SChristoph Hellwig 	 * Free stripes pool
652ef740c37SChristoph Hellwig 	 */
653ef740c37SChristoph Hellwig 	atomic_t		active_stripes;
654566c09c5SShaohua Li 	struct list_head	inactive_list[NR_STRIPE_HASH_LOCKS];
6551e6d690bSSong Liu 
6561e6d690bSSong Liu 	atomic_t		r5c_cached_full_stripes;
6571e6d690bSSong Liu 	struct list_head	r5c_full_stripe_list;
6581e6d690bSSong Liu 	atomic_t		r5c_cached_partial_stripes;
6591e6d690bSSong Liu 	struct list_head	r5c_partial_stripe_list;
660e33fbb9cSShaohua Li 	atomic_t		r5c_flushing_full_stripes;
661e33fbb9cSShaohua Li 	atomic_t		r5c_flushing_partial_stripes;
6621e6d690bSSong Liu 
6634bda556aSShaohua Li 	atomic_t		empty_inactive_list_nr;
664773ca82fSShaohua Li 	struct llist_head	released_stripes;
665b1b46486SYuanhan Liu 	wait_queue_head_t	wait_for_quiescent;
6666ab2a4b8SShaohua Li 	wait_queue_head_t	wait_for_stripe;
667ef740c37SChristoph Hellwig 	wait_queue_head_t	wait_for_overlap;
6685423399aSNeilBrown 	unsigned long		cache_state;
669edbe83abSNeilBrown 	struct shrinker		shrinker;
670ef740c37SChristoph Hellwig 	int			pool_size; /* number of disks in stripeheads in pool */
671ef740c37SChristoph Hellwig 	spinlock_t		device_lock;
672ef740c37SChristoph Hellwig 	struct disk_info	*disks;
673afeee514SKent Overstreet 	struct bio_set		bio_split;
67491adb564SNeilBrown 
67591adb564SNeilBrown 	/* When taking over an array from a different personality, we store
67691adb564SNeilBrown 	 * the new thread here until we fully activate the array.
67791adb564SNeilBrown 	 */
678*44693154SYu Kuai 	struct md_thread __rcu	*thread;
679566c09c5SShaohua Li 	struct list_head	temp_inactive_list[NR_STRIPE_HASH_LOCKS];
680851c30c9SShaohua Li 	struct r5worker_group	*worker_groups;
681851c30c9SShaohua Li 	int			group_cnt;
682851c30c9SShaohua Li 	int			worker_cnt_per_group;
683f6bed0efSShaohua Li 	struct r5l_log		*log;
6843418d036SArtur Paszkiewicz 	void			*log_private;
685765d704dSShaohua Li 
686765d704dSShaohua Li 	spinlock_t		pending_bios_lock;
687765d704dSShaohua Li 	bool			batch_bio_dispatch;
688aaf9f12eSShaohua Li 	struct r5pending_data	*pending_data;
689aaf9f12eSShaohua Li 	struct list_head	free_list;
690aaf9f12eSShaohua Li 	struct list_head	pending_list;
691aaf9f12eSShaohua Li 	int			pending_data_cnt;
692aaf9f12eSShaohua Li 	struct r5pending_data	*next_pending_data;
693ef740c37SChristoph Hellwig };
694ef740c37SChristoph Hellwig 
695e2368582SYufen Yu #if PAGE_SIZE == DEFAULT_STRIPE_SIZE
696c911c46cSYufen Yu #define RAID5_STRIPE_SIZE(conf)	STRIPE_SIZE
697c911c46cSYufen Yu #define RAID5_STRIPE_SHIFT(conf)	STRIPE_SHIFT
698c911c46cSYufen Yu #define RAID5_STRIPE_SECTORS(conf)	STRIPE_SECTORS
699e2368582SYufen Yu #else
700e2368582SYufen Yu #define RAID5_STRIPE_SIZE(conf)	((conf)->stripe_size)
701e2368582SYufen Yu #define RAID5_STRIPE_SHIFT(conf)	((conf)->stripe_shift)
702e2368582SYufen Yu #define RAID5_STRIPE_SECTORS(conf)	((conf)->stripe_sectors)
703e2368582SYufen Yu #endif
704c911c46cSYufen Yu 
705c911c46cSYufen Yu /* bio's attached to a stripe+device for I/O are linked together in bi_sector
706c911c46cSYufen Yu  * order without overlap.  There may be several bio's per stripe+device, and
707c911c46cSYufen Yu  * a bio could span several devices.
708c911c46cSYufen Yu  * When walking this list for a particular stripe+device, we must never proceed
709c911c46cSYufen Yu  * beyond a bio that extends past this device, as the next bio might no longer
710c911c46cSYufen Yu  * be valid.
711c911c46cSYufen Yu  * This function is used to determine the 'next' bio in the list, given the
712c911c46cSYufen Yu  * sector of the current stripe+device
713c911c46cSYufen Yu  */
r5_next_bio(struct r5conf * conf,struct bio * bio,sector_t sector)714c911c46cSYufen Yu static inline struct bio *r5_next_bio(struct r5conf *conf, struct bio *bio, sector_t sector)
715c911c46cSYufen Yu {
716c911c46cSYufen Yu 	if (bio_end_sector(bio) < sector + RAID5_STRIPE_SECTORS(conf))
717c911c46cSYufen Yu 		return bio->bi_next;
718c911c46cSYufen Yu 	else
719c911c46cSYufen Yu 		return NULL;
720c911c46cSYufen Yu }
7215423399aSNeilBrown 
722ef740c37SChristoph Hellwig /*
723ef740c37SChristoph Hellwig  * Our supported algorithms
724ef740c37SChristoph Hellwig  */
72599c0fb5fSNeilBrown #define ALGORITHM_LEFT_ASYMMETRIC	0 /* Rotating Parity N with Data Restart */
72699c0fb5fSNeilBrown #define ALGORITHM_RIGHT_ASYMMETRIC	1 /* Rotating Parity 0 with Data Restart */
72799c0fb5fSNeilBrown #define ALGORITHM_LEFT_SYMMETRIC	2 /* Rotating Parity N with Data Continuation */
72899c0fb5fSNeilBrown #define ALGORITHM_RIGHT_SYMMETRIC	3 /* Rotating Parity 0 with Data Continuation */
729ef740c37SChristoph Hellwig 
73099c0fb5fSNeilBrown /* Define non-rotating (raid4) algorithms.  These allow
73199c0fb5fSNeilBrown  * conversion of raid4 to raid5.
73299c0fb5fSNeilBrown  */
73399c0fb5fSNeilBrown #define ALGORITHM_PARITY_0		4 /* P or P,Q are initial devices */
73499c0fb5fSNeilBrown #define ALGORITHM_PARITY_N		5 /* P or P,Q are final devices. */
73599c0fb5fSNeilBrown 
73699c0fb5fSNeilBrown /* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
73799c0fb5fSNeilBrown  * Firstly, the exact positioning of the parity block is slightly
73899c0fb5fSNeilBrown  * different between the 'LEFT_*' modes of md and the "_N_*" modes
73999c0fb5fSNeilBrown  * of DDF.
74099c0fb5fSNeilBrown  * Secondly, or order of datablocks over which the Q syndrome is computed
74199c0fb5fSNeilBrown  * is different.
74299c0fb5fSNeilBrown  * Consequently we have different layouts for DDF/raid6 than md/raid6.
74399c0fb5fSNeilBrown  * These layouts are from the DDFv1.2 spec.
74499c0fb5fSNeilBrown  * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
74599c0fb5fSNeilBrown  * leaves RLQ=3 as 'Vendor Specific'
74699c0fb5fSNeilBrown  */
74799c0fb5fSNeilBrown 
74899c0fb5fSNeilBrown #define ALGORITHM_ROTATING_ZERO_RESTART	8 /* DDF PRL=6 RLQ=1 */
74999c0fb5fSNeilBrown #define ALGORITHM_ROTATING_N_RESTART	9 /* DDF PRL=6 RLQ=2 */
75099c0fb5fSNeilBrown #define ALGORITHM_ROTATING_N_CONTINUE	10 /*DDF PRL=6 RLQ=3 */
75199c0fb5fSNeilBrown 
75299c0fb5fSNeilBrown /* For every RAID5 algorithm we define a RAID6 algorithm
75399c0fb5fSNeilBrown  * with exactly the same layout for data and parity, and
75499c0fb5fSNeilBrown  * with the Q block always on the last device (N-1).
75599c0fb5fSNeilBrown  * This allows trivial conversion from RAID5 to RAID6
75699c0fb5fSNeilBrown  */
75799c0fb5fSNeilBrown #define ALGORITHM_LEFT_ASYMMETRIC_6	16
75899c0fb5fSNeilBrown #define ALGORITHM_RIGHT_ASYMMETRIC_6	17
75999c0fb5fSNeilBrown #define ALGORITHM_LEFT_SYMMETRIC_6	18
76099c0fb5fSNeilBrown #define ALGORITHM_RIGHT_SYMMETRIC_6	19
76199c0fb5fSNeilBrown #define ALGORITHM_PARITY_0_6		20
76299c0fb5fSNeilBrown #define ALGORITHM_PARITY_N_6		ALGORITHM_PARITY_N
76399c0fb5fSNeilBrown 
algorithm_valid_raid5(int layout)76499c0fb5fSNeilBrown static inline int algorithm_valid_raid5(int layout)
76599c0fb5fSNeilBrown {
76699c0fb5fSNeilBrown 	return (layout >= 0) &&
76799c0fb5fSNeilBrown 		(layout <= 5);
76899c0fb5fSNeilBrown }
algorithm_valid_raid6(int layout)76999c0fb5fSNeilBrown static inline int algorithm_valid_raid6(int layout)
77099c0fb5fSNeilBrown {
77199c0fb5fSNeilBrown 	return (layout >= 0 && layout <= 5)
77299c0fb5fSNeilBrown 		||
773e4424feeSNeilBrown 		(layout >= 8 && layout <= 10)
77499c0fb5fSNeilBrown 		||
77599c0fb5fSNeilBrown 		(layout >= 16 && layout <= 20);
77699c0fb5fSNeilBrown }
77799c0fb5fSNeilBrown 
algorithm_is_DDF(int layout)77899c0fb5fSNeilBrown static inline int algorithm_is_DDF(int layout)
77999c0fb5fSNeilBrown {
78099c0fb5fSNeilBrown 	return layout >= 8 && layout <= 10;
78199c0fb5fSNeilBrown }
78211d8a6e3SNeilBrown 
783046169f0SYufen Yu #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
784046169f0SYufen Yu /*
785046169f0SYufen Yu  * Return offset of the corresponding page for r5dev.
786046169f0SYufen Yu  */
raid5_get_page_offset(struct stripe_head * sh,int disk_idx)787046169f0SYufen Yu static inline int raid5_get_page_offset(struct stripe_head *sh, int disk_idx)
788046169f0SYufen Yu {
789046169f0SYufen Yu 	return (disk_idx % sh->stripes_per_page) * RAID5_STRIPE_SIZE(sh->raid_conf);
790046169f0SYufen Yu }
791046169f0SYufen Yu 
792046169f0SYufen Yu /*
793046169f0SYufen Yu  * Return corresponding page address for r5dev.
794046169f0SYufen Yu  */
795046169f0SYufen Yu static inline struct page *
raid5_get_dev_page(struct stripe_head * sh,int disk_idx)796046169f0SYufen Yu raid5_get_dev_page(struct stripe_head *sh, int disk_idx)
797046169f0SYufen Yu {
798046169f0SYufen Yu 	return sh->pages[disk_idx / sh->stripes_per_page];
799046169f0SYufen Yu }
800046169f0SYufen Yu #endif
801046169f0SYufen Yu 
8029892fa99SLogan Gunthorpe void md_raid5_kick_device(struct r5conf *conf);
8039892fa99SLogan Gunthorpe int raid5_set_cache_size(struct mddev *mddev, int size);
8049892fa99SLogan Gunthorpe sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
8059892fa99SLogan Gunthorpe void raid5_release_stripe(struct stripe_head *sh);
8069892fa99SLogan Gunthorpe sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
8079892fa99SLogan Gunthorpe 		int previous, int *dd_idx, struct stripe_head *sh);
8082f2d51efSLogan Gunthorpe 
8092f2d51efSLogan Gunthorpe struct stripe_request_ctx;
8102f2d51efSLogan Gunthorpe /* get stripe from previous generation (when reshaping) */
8112f2d51efSLogan Gunthorpe #define R5_GAS_PREVIOUS		(1 << 0)
8122f2d51efSLogan Gunthorpe /* do not block waiting for a free stripe */
8132f2d51efSLogan Gunthorpe #define R5_GAS_NOBLOCK		(1 << 1)
8142f2d51efSLogan Gunthorpe /* do not block waiting for quiesce to be released */
8152f2d51efSLogan Gunthorpe #define R5_GAS_NOQUIESCE	(1 << 2)
8169892fa99SLogan Gunthorpe struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
8172f2d51efSLogan Gunthorpe 		struct stripe_request_ctx *ctx, sector_t sector,
8182f2d51efSLogan Gunthorpe 		unsigned int flags);
8192f2d51efSLogan Gunthorpe 
8209892fa99SLogan Gunthorpe int raid5_calc_degraded(struct r5conf *conf);
8219892fa99SLogan Gunthorpe int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);
822ef740c37SChristoph Hellwig #endif
823