1 #ifndef _RAID5_H 2 #define _RAID5_H 3 4 #include <linux/raid/xor.h> 5 #include <linux/dmaengine.h> 6 7 /* 8 * 9 * Each stripe contains one buffer per disc. Each buffer can be in 10 * one of a number of states stored in "flags". Changes between 11 * these states happen *almost* exclusively under a per-stripe 12 * spinlock. Some very specific changes can happen in bi_end_io, and 13 * these are not protected by the spin lock. 14 * 15 * The flag bits that are used to represent these states are: 16 * R5_UPTODATE and R5_LOCKED 17 * 18 * State Empty == !UPTODATE, !LOCK 19 * We have no data, and there is no active request 20 * State Want == !UPTODATE, LOCK 21 * A read request is being submitted for this block 22 * State Dirty == UPTODATE, LOCK 23 * Some new data is in this buffer, and it is being written out 24 * State Clean == UPTODATE, !LOCK 25 * We have valid data which is the same as on disc 26 * 27 * The possible state transitions are: 28 * 29 * Empty -> Want - on read or write to get old data for parity calc 30 * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE) 31 * Empty -> Clean - on compute_block when computing a block for failed drive 32 * Want -> Empty - on failed read 33 * Want -> Clean - on successful completion of read request 34 * Dirty -> Clean - on successful completion of write request 35 * Dirty -> Clean - on failed write 36 * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW) 37 * 38 * The Want->Empty, Want->Clean, Dirty->Clean, transitions 39 * all happen in b_end_io at interrupt time. 40 * Each sets the Uptodate bit before releasing the Lock bit. 41 * This leaves one multi-stage transition: 42 * Want->Dirty->Clean 43 * This is safe because thinking that a Clean buffer is actually dirty 44 * will at worst delay some action, and the stripe will be scheduled 45 * for attention after the transition is complete. 46 * 47 * There is one possibility that is not covered by these states. That 48 * is if one drive has failed and there is a spare being rebuilt. We 49 * can't distinguish between a clean block that has been generated 50 * from parity calculations, and a clean block that has been 51 * successfully written to the spare ( or to parity when resyncing). 52 * To distingush these states we have a stripe bit STRIPE_INSYNC that 53 * is set whenever a write is scheduled to the spare, or to the parity 54 * disc if there is no spare. A sync request clears this bit, and 55 * when we find it set with no buffers locked, we know the sync is 56 * complete. 57 * 58 * Buffers for the md device that arrive via make_request are attached 59 * to the appropriate stripe in one of two lists linked on b_reqnext. 60 * One list (bh_read) for read requests, one (bh_write) for write. 61 * There should never be more than one buffer on the two lists 62 * together, but we are not guaranteed of that so we allow for more. 63 * 64 * If a buffer is on the read list when the associated cache buffer is 65 * Uptodate, the data is copied into the read buffer and it's b_end_io 66 * routine is called. This may happen in the end_request routine only 67 * if the buffer has just successfully been read. end_request should 68 * remove the buffers from the list and then set the Uptodate bit on 69 * the buffer. Other threads may do this only if they first check 70 * that the Uptodate bit is set. Once they have checked that they may 71 * take buffers off the read queue. 72 * 73 * When a buffer on the write list is committed for write it is copied 74 * into the cache buffer, which is then marked dirty, and moved onto a 75 * third list, the written list (bh_written). Once both the parity 76 * block and the cached buffer are successfully written, any buffer on 77 * a written list can be returned with b_end_io. 78 * 79 * The write list and read list both act as fifos. The read list is 80 * protected by the device_lock. The write and written lists are 81 * protected by the stripe lock. The device_lock, which can be 82 * claimed while the stipe lock is held, is only for list 83 * manipulations and will only be held for a very short time. It can 84 * be claimed from interrupts. 85 * 86 * 87 * Stripes in the stripe cache can be on one of two lists (or on 88 * neither). The "inactive_list" contains stripes which are not 89 * currently being used for any request. They can freely be reused 90 * for another stripe. The "handle_list" contains stripes that need 91 * to be handled in some way. Both of these are fifo queues. Each 92 * stripe is also (potentially) linked to a hash bucket in the hash 93 * table so that it can be found by sector number. Stripes that are 94 * not hashed must be on the inactive_list, and will normally be at 95 * the front. All stripes start life this way. 96 * 97 * The inactive_list, handle_list and hash bucket lists are all protected by the 98 * device_lock. 99 * - stripes on the inactive_list never have their stripe_lock held. 100 * - stripes have a reference counter. If count==0, they are on a list. 101 * - If a stripe might need handling, STRIPE_HANDLE is set. 102 * - When refcount reaches zero, then if STRIPE_HANDLE it is put on 103 * handle_list else inactive_list 104 * 105 * This, combined with the fact that STRIPE_HANDLE is only ever 106 * cleared while a stripe has a non-zero count means that if the 107 * refcount is 0 and STRIPE_HANDLE is set, then it is on the 108 * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then 109 * the stripe is on inactive_list. 110 * 111 * The possible transitions are: 112 * activate an unhashed/inactive stripe (get_active_stripe()) 113 * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev 114 * activate a hashed, possibly active stripe (get_active_stripe()) 115 * lockdev check-hash if(!cnt++)unlink-stripe unlockdev 116 * attach a request to an active stripe (add_stripe_bh()) 117 * lockdev attach-buffer unlockdev 118 * handle a stripe (handle_stripe()) 119 * lockstripe clrSTRIPE_HANDLE ... 120 * (lockdev check-buffers unlockdev) .. 121 * change-state .. 122 * record io/ops needed unlockstripe schedule io/ops 123 * release an active stripe (release_stripe()) 124 * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev 125 * 126 * The refcount counts each thread that have activated the stripe, 127 * plus raid5d if it is handling it, plus one for each active request 128 * on a cached buffer, and plus one if the stripe is undergoing stripe 129 * operations. 130 * 131 * Stripe operations are performed outside the stripe lock, 132 * the stripe operations are: 133 * -copying data between the stripe cache and user application buffers 134 * -computing blocks to save a disk access, or to recover a missing block 135 * -updating the parity on a write operation (reconstruct write and 136 * read-modify-write) 137 * -checking parity correctness 138 * -running i/o to disk 139 * These operations are carried out by raid5_run_ops which uses the async_tx 140 * api to (optionally) offload operations to dedicated hardware engines. 141 * When requesting an operation handle_stripe sets the pending bit for the 142 * operation and increments the count. raid5_run_ops is then run whenever 143 * the count is non-zero. 144 * There are some critical dependencies between the operations that prevent some 145 * from being requested while another is in flight. 146 * 1/ Parity check operations destroy the in cache version of the parity block, 147 * so we prevent parity dependent operations like writes and compute_blocks 148 * from starting while a check is in progress. Some dma engines can perform 149 * the check without damaging the parity block, in these cases the parity 150 * block is re-marked up to date (assuming the check was successful) and is 151 * not re-read from disk. 152 * 2/ When a write operation is requested we immediately lock the affected 153 * blocks, and mark them as not up to date. This causes new read requests 154 * to be held off, as well as parity checks and compute block operations. 155 * 3/ Once a compute block operation has been requested handle_stripe treats 156 * that block as if it is up to date. raid5_run_ops guaruntees that any 157 * operation that is dependent on the compute block result is initiated after 158 * the compute block completes. 159 */ 160 161 /* 162 * Operations state - intermediate states that are visible outside of sh->lock 163 * In general _idle indicates nothing is running, _run indicates a data 164 * processing operation is active, and _result means the data processing result 165 * is stable and can be acted upon. For simple operations like biofill and 166 * compute that only have an _idle and _run state they are indicated with 167 * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN) 168 */ 169 /** 170 * enum check_states - handles syncing / repairing a stripe 171 * @check_state_idle - check operations are quiesced 172 * @check_state_run - check operation is running 173 * @check_state_result - set outside lock when check result is valid 174 * @check_state_compute_run - check failed and we are repairing 175 * @check_state_compute_result - set outside lock when compute result is valid 176 */ 177 enum check_states { 178 check_state_idle = 0, 179 check_state_run, /* xor parity check */ 180 check_state_run_q, /* q-parity check */ 181 check_state_run_pq, /* pq dual parity check */ 182 check_state_check_result, 183 check_state_compute_run, /* parity repair */ 184 check_state_compute_result, 185 }; 186 187 /** 188 * enum reconstruct_states - handles writing or expanding a stripe 189 */ 190 enum reconstruct_states { 191 reconstruct_state_idle = 0, 192 reconstruct_state_prexor_drain_run, /* prexor-write */ 193 reconstruct_state_drain_run, /* write */ 194 reconstruct_state_run, /* expand */ 195 reconstruct_state_prexor_drain_result, 196 reconstruct_state_drain_result, 197 reconstruct_state_result, 198 }; 199 200 struct stripe_head { 201 struct hlist_node hash; 202 struct list_head lru; /* inactive_list or handle_list */ 203 struct raid5_private_data *raid_conf; 204 short generation; /* increments with every 205 * reshape */ 206 sector_t sector; /* sector of this row */ 207 short pd_idx; /* parity disk index */ 208 short qd_idx; /* 'Q' disk index for raid6 */ 209 short ddf_layout;/* use DDF ordering to calculate Q */ 210 unsigned long state; /* state flags */ 211 atomic_t count; /* nr of active thread/requests */ 212 spinlock_t lock; 213 int bm_seq; /* sequence number for bitmap flushes */ 214 int disks; /* disks in stripe */ 215 enum check_states check_state; 216 enum reconstruct_states reconstruct_state; 217 /* stripe_operations 218 * @target - STRIPE_OP_COMPUTE_BLK target 219 */ 220 struct stripe_operations { 221 int target, target2; 222 enum sum_check_flags zero_sum_result; 223 } ops; 224 struct r5dev { 225 struct bio req; 226 struct bio_vec vec; 227 struct page *page; 228 struct bio *toread, *read, *towrite, *written; 229 sector_t sector; /* sector of this page */ 230 unsigned long flags; 231 } dev[1]; /* allocated with extra space depending of RAID geometry */ 232 }; 233 234 /* stripe_head_state - collects and tracks the dynamic state of a stripe_head 235 * for handle_stripe. It is only valid under spin_lock(sh->lock); 236 */ 237 struct stripe_head_state { 238 int syncing, expanding, expanded; 239 int locked, uptodate, to_read, to_write, failed, written; 240 int to_fill, compute, req_compute, non_overwrite; 241 int failed_num; 242 unsigned long ops_request; 243 }; 244 245 /* r6_state - extra state data only relevant to r6 */ 246 struct r6_state { 247 int p_failed, q_failed, failed_num[2]; 248 }; 249 250 /* Flags */ 251 #define R5_UPTODATE 0 /* page contains current data */ 252 #define R5_LOCKED 1 /* IO has been submitted on "req" */ 253 #define R5_OVERWRITE 2 /* towrite covers whole page */ 254 /* and some that are internal to handle_stripe */ 255 #define R5_Insync 3 /* rdev && rdev->in_sync at start */ 256 #define R5_Wantread 4 /* want to schedule a read */ 257 #define R5_Wantwrite 5 258 #define R5_Overlap 7 /* There is a pending overlapping request on this block */ 259 #define R5_ReadError 8 /* seen a read error here recently */ 260 #define R5_ReWrite 9 /* have tried to over-write the readerror */ 261 262 #define R5_Expanded 10 /* This block now has post-expand data */ 263 #define R5_Wantcompute 11 /* compute_block in progress treat as 264 * uptodate 265 */ 266 #define R5_Wantfill 12 /* dev->toread contains a bio that needs 267 * filling 268 */ 269 #define R5_Wantdrain 13 /* dev->towrite needs to be drained */ 270 /* 271 * Write method 272 */ 273 #define RECONSTRUCT_WRITE 1 274 #define READ_MODIFY_WRITE 2 275 /* not a write method, but a compute_parity mode */ 276 #define CHECK_PARITY 3 277 /* Additional compute_parity mode -- updates the parity w/o LOCKING */ 278 #define UPDATE_PARITY 4 279 280 /* 281 * Stripe state 282 */ 283 #define STRIPE_HANDLE 2 284 #define STRIPE_SYNCING 3 285 #define STRIPE_INSYNC 4 286 #define STRIPE_PREREAD_ACTIVE 5 287 #define STRIPE_DELAYED 6 288 #define STRIPE_DEGRADED 7 289 #define STRIPE_BIT_DELAY 8 290 #define STRIPE_EXPANDING 9 291 #define STRIPE_EXPAND_SOURCE 10 292 #define STRIPE_EXPAND_READY 11 293 #define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */ 294 #define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ 295 #define STRIPE_BIOFILL_RUN 14 296 #define STRIPE_COMPUTE_RUN 15 297 /* 298 * Operation request flags 299 */ 300 #define STRIPE_OP_BIOFILL 0 301 #define STRIPE_OP_COMPUTE_BLK 1 302 #define STRIPE_OP_PREXOR 2 303 #define STRIPE_OP_BIODRAIN 3 304 #define STRIPE_OP_RECONSTRUCT 4 305 #define STRIPE_OP_CHECK 5 306 307 /* 308 * Plugging: 309 * 310 * To improve write throughput, we need to delay the handling of some 311 * stripes until there has been a chance that several write requests 312 * for the one stripe have all been collected. 313 * In particular, any write request that would require pre-reading 314 * is put on a "delayed" queue until there are no stripes currently 315 * in a pre-read phase. Further, if the "delayed" queue is empty when 316 * a stripe is put on it then we "plug" the queue and do not process it 317 * until an unplug call is made. (the unplug_io_fn() is called). 318 * 319 * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add 320 * it to the count of prereading stripes. 321 * When write is initiated, or the stripe refcnt == 0 (just in case) we 322 * clear the PREREAD_ACTIVE flag and decrement the count 323 * Whenever the 'handle' queue is empty and the device is not plugged, we 324 * move any strips from delayed to handle and clear the DELAYED flag and set 325 * PREREAD_ACTIVE. 326 * In stripe_handle, if we find pre-reading is necessary, we do it if 327 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue. 328 * HANDLE gets cleared if stripe_handle leave nothing locked. 329 */ 330 331 332 struct disk_info { 333 mdk_rdev_t *rdev; 334 }; 335 336 struct raid5_private_data { 337 struct hlist_head *stripe_hashtbl; 338 mddev_t *mddev; 339 struct disk_info *spare; 340 int chunk_sectors; 341 int level, algorithm; 342 int max_degraded; 343 int raid_disks; 344 int max_nr_stripes; 345 346 /* reshape_progress is the leading edge of a 'reshape' 347 * It has value MaxSector when no reshape is happening 348 * If delta_disks < 0, it is the last sector we started work on, 349 * else is it the next sector to work on. 350 */ 351 sector_t reshape_progress; 352 /* reshape_safe is the trailing edge of a reshape. We know that 353 * before (or after) this address, all reshape has completed. 354 */ 355 sector_t reshape_safe; 356 int previous_raid_disks; 357 int prev_chunk_sectors; 358 int prev_algo; 359 short generation; /* increments with every reshape */ 360 unsigned long reshape_checkpoint; /* Time we last updated 361 * metadata */ 362 363 struct list_head handle_list; /* stripes needing handling */ 364 struct list_head hold_list; /* preread ready stripes */ 365 struct list_head delayed_list; /* stripes that have plugged requests */ 366 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */ 367 struct bio *retry_read_aligned; /* currently retrying aligned bios */ 368 struct bio *retry_read_aligned_list; /* aligned bios retry list */ 369 atomic_t preread_active_stripes; /* stripes with scheduled io */ 370 atomic_t active_aligned_reads; 371 atomic_t pending_full_writes; /* full write backlog */ 372 int bypass_count; /* bypassed prereads */ 373 int bypass_threshold; /* preread nice */ 374 struct list_head *last_hold; /* detect hold_list promotions */ 375 376 atomic_t reshape_stripes; /* stripes with pending writes for reshape */ 377 /* unfortunately we need two cache names as we temporarily have 378 * two caches. 379 */ 380 int active_name; 381 char cache_name[2][20]; 382 struct kmem_cache *slab_cache; /* for allocating stripes */ 383 384 int seq_flush, seq_write; 385 int quiesce; 386 387 int fullsync; /* set to 1 if a full sync is needed, 388 * (fresh device added). 389 * Cleared when a sync completes. 390 */ 391 /* per cpu variables */ 392 struct raid5_percpu { 393 struct page *spare_page; /* Used when checking P/Q in raid6 */ 394 void *scribble; /* space for constructing buffer 395 * lists and performing address 396 * conversions 397 */ 398 } *percpu; 399 size_t scribble_len; /* size of scribble region must be 400 * associated with conf to handle 401 * cpu hotplug while reshaping 402 */ 403 #ifdef CONFIG_HOTPLUG_CPU 404 struct notifier_block cpu_notify; 405 #endif 406 407 /* 408 * Free stripes pool 409 */ 410 atomic_t active_stripes; 411 struct list_head inactive_list; 412 wait_queue_head_t wait_for_stripe; 413 wait_queue_head_t wait_for_overlap; 414 int inactive_blocked; /* release of inactive stripes blocked, 415 * waiting for 25% to be free 416 */ 417 int pool_size; /* number of disks in stripeheads in pool */ 418 spinlock_t device_lock; 419 struct disk_info *disks; 420 421 /* When taking over an array from a different personality, we store 422 * the new thread here until we fully activate the array. 423 */ 424 struct mdk_thread_s *thread; 425 }; 426 427 typedef struct raid5_private_data raid5_conf_t; 428 429 /* 430 * Our supported algorithms 431 */ 432 #define ALGORITHM_LEFT_ASYMMETRIC 0 /* Rotating Parity N with Data Restart */ 433 #define ALGORITHM_RIGHT_ASYMMETRIC 1 /* Rotating Parity 0 with Data Restart */ 434 #define ALGORITHM_LEFT_SYMMETRIC 2 /* Rotating Parity N with Data Continuation */ 435 #define ALGORITHM_RIGHT_SYMMETRIC 3 /* Rotating Parity 0 with Data Continuation */ 436 437 /* Define non-rotating (raid4) algorithms. These allow 438 * conversion of raid4 to raid5. 439 */ 440 #define ALGORITHM_PARITY_0 4 /* P or P,Q are initial devices */ 441 #define ALGORITHM_PARITY_N 5 /* P or P,Q are final devices. */ 442 443 /* DDF RAID6 layouts differ from md/raid6 layouts in two ways. 444 * Firstly, the exact positioning of the parity block is slightly 445 * different between the 'LEFT_*' modes of md and the "_N_*" modes 446 * of DDF. 447 * Secondly, or order of datablocks over which the Q syndrome is computed 448 * is different. 449 * Consequently we have different layouts for DDF/raid6 than md/raid6. 450 * These layouts are from the DDFv1.2 spec. 451 * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but 452 * leaves RLQ=3 as 'Vendor Specific' 453 */ 454 455 #define ALGORITHM_ROTATING_ZERO_RESTART 8 /* DDF PRL=6 RLQ=1 */ 456 #define ALGORITHM_ROTATING_N_RESTART 9 /* DDF PRL=6 RLQ=2 */ 457 #define ALGORITHM_ROTATING_N_CONTINUE 10 /*DDF PRL=6 RLQ=3 */ 458 459 460 /* For every RAID5 algorithm we define a RAID6 algorithm 461 * with exactly the same layout for data and parity, and 462 * with the Q block always on the last device (N-1). 463 * This allows trivial conversion from RAID5 to RAID6 464 */ 465 #define ALGORITHM_LEFT_ASYMMETRIC_6 16 466 #define ALGORITHM_RIGHT_ASYMMETRIC_6 17 467 #define ALGORITHM_LEFT_SYMMETRIC_6 18 468 #define ALGORITHM_RIGHT_SYMMETRIC_6 19 469 #define ALGORITHM_PARITY_0_6 20 470 #define ALGORITHM_PARITY_N_6 ALGORITHM_PARITY_N 471 472 static inline int algorithm_valid_raid5(int layout) 473 { 474 return (layout >= 0) && 475 (layout <= 5); 476 } 477 static inline int algorithm_valid_raid6(int layout) 478 { 479 return (layout >= 0 && layout <= 5) 480 || 481 (layout == 8 || layout == 10) 482 || 483 (layout >= 16 && layout <= 20); 484 } 485 486 static inline int algorithm_is_DDF(int layout) 487 { 488 return layout >= 8 && layout <= 10; 489 } 490 #endif 491