xref: /openbmc/linux/drivers/md/raid5-ppl.c (revision d8bcaabe)
1 /*
2  * Partial Parity Log for closing the RAID5 write hole
3  * Copyright (c) 2017, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/blkdev.h>
17 #include <linux/slab.h>
18 #include <linux/crc32c.h>
19 #include <linux/flex_array.h>
20 #include <linux/async_tx.h>
21 #include <linux/raid/md_p.h>
22 #include "md.h"
23 #include "raid5.h"
24 
25 /*
26  * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for
27  * partial parity data. The header contains an array of entries
28  * (struct ppl_header_entry) which describe the logged write requests.
29  * Partial parity for the entries comes after the header, written in the same
30  * sequence as the entries:
31  *
32  * Header
33  *   entry0
34  *   ...
35  *   entryN
36  * PP data
37  *   PP for entry0
38  *   ...
39  *   PP for entryN
40  *
41  * An entry describes one or more consecutive stripe_heads, up to a full
42  * stripe. The modifed raid data chunks form an m-by-n matrix, where m is the
43  * number of stripe_heads in the entry and n is the number of modified data
44  * disks. Every stripe_head in the entry must write to the same data disks.
45  * An example of a valid case described by a single entry (writes to the first
46  * stripe of a 4 disk array, 16k chunk size):
47  *
48  * sh->sector   dd0   dd1   dd2    ppl
49  *            +-----+-----+-----+
50  * 0          | --- | --- | --- | +----+
51  * 8          | -W- | -W- | --- | | pp |   data_sector = 8
52  * 16         | -W- | -W- | --- | | pp |   data_size = 3 * 2 * 4k
53  * 24         | -W- | -W- | --- | | pp |   pp_size = 3 * 4k
54  *            +-----+-----+-----+ +----+
55  *
56  * data_sector is the first raid sector of the modified data, data_size is the
57  * total size of modified data and pp_size is the size of partial parity for
58  * this entry. Entries for full stripe writes contain no partial parity
59  * (pp_size = 0), they only mark the stripes for which parity should be
60  * recalculated after an unclean shutdown. Every entry holds a checksum of its
61  * partial parity, the header also has a checksum of the header itself.
62  *
63  * A write request is always logged to the PPL instance stored on the parity
64  * disk of the corresponding stripe. For each member disk there is one ppl_log
65  * used to handle logging for this disk, independently from others. They are
66  * grouped in child_logs array in struct ppl_conf, which is assigned to
67  * r5conf->log_private.
68  *
69  * ppl_io_unit represents a full PPL write, header_page contains the ppl_header.
70  * PPL entries for logged stripes are added in ppl_log_stripe(). A stripe_head
71  * can be appended to the last entry if it meets the conditions for a valid
72  * entry described above, otherwise a new entry is added. Checksums of entries
73  * are calculated incrementally as stripes containing partial parity are being
74  * added. ppl_submit_iounit() calculates the checksum of the header and submits
75  * a bio containing the header page and partial parity pages (sh->ppl_page) for
76  * all stripes of the io_unit. When the PPL write completes, the stripes
77  * associated with the io_unit are released and raid5d starts writing their data
78  * and parity. When all stripes are written, the io_unit is freed and the next
79  * can be submitted.
80  *
81  * An io_unit is used to gather stripes until it is submitted or becomes full
82  * (if the maximum number of entries or size of PPL is reached). Another io_unit
83  * can't be submitted until the previous has completed (PPL and stripe
84  * data+parity is written). The log->io_list tracks all io_units of a log
85  * (for a single member disk). New io_units are added to the end of the list
86  * and the first io_unit is submitted, if it is not submitted already.
87  * The current io_unit accepting new stripes is always at the end of the list.
88  */
89 
90 #define PPL_SPACE_SIZE (128 * 1024)
91 
92 struct ppl_conf {
93 	struct mddev *mddev;
94 
95 	/* array of child logs, one for each raid disk */
96 	struct ppl_log *child_logs;
97 	int count;
98 
99 	int block_size;		/* the logical block size used for data_sector
100 				 * in ppl_header_entry */
101 	u32 signature;		/* raid array identifier */
102 	atomic64_t seq;		/* current log write sequence number */
103 
104 	struct kmem_cache *io_kc;
105 	mempool_t *io_pool;
106 	struct bio_set *bs;
107 
108 	/* used only for recovery */
109 	int recovered_entries;
110 	int mismatch_count;
111 
112 	/* stripes to retry if failed to allocate io_unit */
113 	struct list_head no_mem_stripes;
114 	spinlock_t no_mem_stripes_lock;
115 };
116 
117 struct ppl_log {
118 	struct ppl_conf *ppl_conf;	/* shared between all log instances */
119 
120 	struct md_rdev *rdev;		/* array member disk associated with
121 					 * this log instance */
122 	struct mutex io_mutex;
123 	struct ppl_io_unit *current_io;	/* current io_unit accepting new data
124 					 * always at the end of io_list */
125 	spinlock_t io_list_lock;
126 	struct list_head io_list;	/* all io_units of this log */
127 
128 	sector_t next_io_sector;
129 	unsigned int entry_space;
130 	bool use_multippl;
131 };
132 
133 #define PPL_IO_INLINE_BVECS 32
134 
135 struct ppl_io_unit {
136 	struct ppl_log *log;
137 
138 	struct page *header_page;	/* for ppl_header */
139 
140 	unsigned int entries_count;	/* number of entries in ppl_header */
141 	unsigned int pp_size;		/* total size current of partial parity */
142 
143 	u64 seq;			/* sequence number of this log write */
144 	struct list_head log_sibling;	/* log->io_list */
145 
146 	struct list_head stripe_list;	/* stripes added to the io_unit */
147 	atomic_t pending_stripes;	/* how many stripes not written to raid */
148 
149 	bool submitted;			/* true if write to log started */
150 
151 	/* inline bio and its biovec for submitting the iounit */
152 	struct bio bio;
153 	struct bio_vec biovec[PPL_IO_INLINE_BVECS];
154 };
155 
156 struct dma_async_tx_descriptor *
157 ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
158 		       struct dma_async_tx_descriptor *tx)
159 {
160 	int disks = sh->disks;
161 	struct page **srcs = flex_array_get(percpu->scribble, 0);
162 	int count = 0, pd_idx = sh->pd_idx, i;
163 	struct async_submit_ctl submit;
164 
165 	pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
166 
167 	/*
168 	 * Partial parity is the XOR of stripe data chunks that are not changed
169 	 * during the write request. Depending on available data
170 	 * (read-modify-write vs. reconstruct-write case) we calculate it
171 	 * differently.
172 	 */
173 	if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
174 		/*
175 		 * rmw: xor old data and parity from updated disks
176 		 * This is calculated earlier by ops_run_prexor5() so just copy
177 		 * the parity dev page.
178 		 */
179 		srcs[count++] = sh->dev[pd_idx].page;
180 	} else if (sh->reconstruct_state == reconstruct_state_drain_run) {
181 		/* rcw: xor data from all not updated disks */
182 		for (i = disks; i--;) {
183 			struct r5dev *dev = &sh->dev[i];
184 			if (test_bit(R5_UPTODATE, &dev->flags))
185 				srcs[count++] = dev->page;
186 		}
187 	} else {
188 		return tx;
189 	}
190 
191 	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, tx,
192 			  NULL, sh, flex_array_get(percpu->scribble, 0)
193 			  + sizeof(struct page *) * (sh->disks + 2));
194 
195 	if (count == 1)
196 		tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE,
197 				  &submit);
198 	else
199 		tx = async_xor(sh->ppl_page, srcs, 0, count, PAGE_SIZE,
200 			       &submit);
201 
202 	return tx;
203 }
204 
205 static void *ppl_io_pool_alloc(gfp_t gfp_mask, void *pool_data)
206 {
207 	struct kmem_cache *kc = pool_data;
208 	struct ppl_io_unit *io;
209 
210 	io = kmem_cache_alloc(kc, gfp_mask);
211 	if (!io)
212 		return NULL;
213 
214 	io->header_page = alloc_page(gfp_mask);
215 	if (!io->header_page) {
216 		kmem_cache_free(kc, io);
217 		return NULL;
218 	}
219 
220 	return io;
221 }
222 
223 static void ppl_io_pool_free(void *element, void *pool_data)
224 {
225 	struct kmem_cache *kc = pool_data;
226 	struct ppl_io_unit *io = element;
227 
228 	__free_page(io->header_page);
229 	kmem_cache_free(kc, io);
230 }
231 
232 static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
233 					  struct stripe_head *sh)
234 {
235 	struct ppl_conf *ppl_conf = log->ppl_conf;
236 	struct ppl_io_unit *io;
237 	struct ppl_header *pplhdr;
238 	struct page *header_page;
239 
240 	io = mempool_alloc(ppl_conf->io_pool, GFP_NOWAIT);
241 	if (!io)
242 		return NULL;
243 
244 	header_page = io->header_page;
245 	memset(io, 0, sizeof(*io));
246 	io->header_page = header_page;
247 
248 	io->log = log;
249 	INIT_LIST_HEAD(&io->log_sibling);
250 	INIT_LIST_HEAD(&io->stripe_list);
251 	atomic_set(&io->pending_stripes, 0);
252 	bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
253 
254 	pplhdr = page_address(io->header_page);
255 	clear_page(pplhdr);
256 	memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
257 	pplhdr->signature = cpu_to_le32(ppl_conf->signature);
258 
259 	io->seq = atomic64_add_return(1, &ppl_conf->seq);
260 	pplhdr->generation = cpu_to_le64(io->seq);
261 
262 	return io;
263 }
264 
265 static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
266 {
267 	struct ppl_io_unit *io = log->current_io;
268 	struct ppl_header_entry *e = NULL;
269 	struct ppl_header *pplhdr;
270 	int i;
271 	sector_t data_sector = 0;
272 	int data_disks = 0;
273 	struct r5conf *conf = sh->raid_conf;
274 
275 	pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector);
276 
277 	/* check if current io_unit is full */
278 	if (io && (io->pp_size == log->entry_space ||
279 		   io->entries_count == PPL_HDR_MAX_ENTRIES)) {
280 		pr_debug("%s: add io_unit blocked by seq: %llu\n",
281 			 __func__, io->seq);
282 		io = NULL;
283 	}
284 
285 	/* add a new unit if there is none or the current is full */
286 	if (!io) {
287 		io = ppl_new_iounit(log, sh);
288 		if (!io)
289 			return -ENOMEM;
290 		spin_lock_irq(&log->io_list_lock);
291 		list_add_tail(&io->log_sibling, &log->io_list);
292 		spin_unlock_irq(&log->io_list_lock);
293 
294 		log->current_io = io;
295 	}
296 
297 	for (i = 0; i < sh->disks; i++) {
298 		struct r5dev *dev = &sh->dev[i];
299 
300 		if (i != sh->pd_idx && test_bit(R5_Wantwrite, &dev->flags)) {
301 			if (!data_disks || dev->sector < data_sector)
302 				data_sector = dev->sector;
303 			data_disks++;
304 		}
305 	}
306 	BUG_ON(!data_disks);
307 
308 	pr_debug("%s: seq: %llu data_sector: %llu data_disks: %d\n", __func__,
309 		 io->seq, (unsigned long long)data_sector, data_disks);
310 
311 	pplhdr = page_address(io->header_page);
312 
313 	if (io->entries_count > 0) {
314 		struct ppl_header_entry *last =
315 				&pplhdr->entries[io->entries_count - 1];
316 		struct stripe_head *sh_last = list_last_entry(
317 				&io->stripe_list, struct stripe_head, log_list);
318 		u64 data_sector_last = le64_to_cpu(last->data_sector);
319 		u32 data_size_last = le32_to_cpu(last->data_size);
320 
321 		/*
322 		 * Check if we can append the stripe to the last entry. It must
323 		 * be just after the last logged stripe and write to the same
324 		 * disks. Use bit shift and logarithm to avoid 64-bit division.
325 		 */
326 		if ((sh->sector == sh_last->sector + STRIPE_SECTORS) &&
327 		    (data_sector >> ilog2(conf->chunk_sectors) ==
328 		     data_sector_last >> ilog2(conf->chunk_sectors)) &&
329 		    ((data_sector - data_sector_last) * data_disks ==
330 		     data_size_last >> 9))
331 			e = last;
332 	}
333 
334 	if (!e) {
335 		e = &pplhdr->entries[io->entries_count++];
336 		e->data_sector = cpu_to_le64(data_sector);
337 		e->parity_disk = cpu_to_le32(sh->pd_idx);
338 		e->checksum = cpu_to_le32(~0);
339 	}
340 
341 	le32_add_cpu(&e->data_size, data_disks << PAGE_SHIFT);
342 
343 	/* don't write any PP if full stripe write */
344 	if (!test_bit(STRIPE_FULL_WRITE, &sh->state)) {
345 		le32_add_cpu(&e->pp_size, PAGE_SIZE);
346 		io->pp_size += PAGE_SIZE;
347 		e->checksum = cpu_to_le32(crc32c_le(le32_to_cpu(e->checksum),
348 						    page_address(sh->ppl_page),
349 						    PAGE_SIZE));
350 	}
351 
352 	list_add_tail(&sh->log_list, &io->stripe_list);
353 	atomic_inc(&io->pending_stripes);
354 	sh->ppl_io = io;
355 
356 	return 0;
357 }
358 
359 int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh)
360 {
361 	struct ppl_conf *ppl_conf = conf->log_private;
362 	struct ppl_io_unit *io = sh->ppl_io;
363 	struct ppl_log *log;
364 
365 	if (io || test_bit(STRIPE_SYNCING, &sh->state) || !sh->ppl_page ||
366 	    !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
367 	    !test_bit(R5_Insync, &sh->dev[sh->pd_idx].flags)) {
368 		clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
369 		return -EAGAIN;
370 	}
371 
372 	log = &ppl_conf->child_logs[sh->pd_idx];
373 
374 	mutex_lock(&log->io_mutex);
375 
376 	if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
377 		mutex_unlock(&log->io_mutex);
378 		return -EAGAIN;
379 	}
380 
381 	set_bit(STRIPE_LOG_TRAPPED, &sh->state);
382 	clear_bit(STRIPE_DELAYED, &sh->state);
383 	atomic_inc(&sh->count);
384 
385 	if (ppl_log_stripe(log, sh)) {
386 		spin_lock_irq(&ppl_conf->no_mem_stripes_lock);
387 		list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes);
388 		spin_unlock_irq(&ppl_conf->no_mem_stripes_lock);
389 	}
390 
391 	mutex_unlock(&log->io_mutex);
392 
393 	return 0;
394 }
395 
396 static void ppl_log_endio(struct bio *bio)
397 {
398 	struct ppl_io_unit *io = bio->bi_private;
399 	struct ppl_log *log = io->log;
400 	struct ppl_conf *ppl_conf = log->ppl_conf;
401 	struct stripe_head *sh, *next;
402 
403 	pr_debug("%s: seq: %llu\n", __func__, io->seq);
404 
405 	if (bio->bi_status)
406 		md_error(ppl_conf->mddev, log->rdev);
407 
408 	list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
409 		list_del_init(&sh->log_list);
410 
411 		set_bit(STRIPE_HANDLE, &sh->state);
412 		raid5_release_stripe(sh);
413 	}
414 }
415 
416 static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
417 {
418 	char b[BDEVNAME_SIZE];
419 
420 	pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
421 		 __func__, io->seq, bio->bi_iter.bi_size,
422 		 (unsigned long long)bio->bi_iter.bi_sector,
423 		 bio_devname(bio, b));
424 
425 	submit_bio(bio);
426 }
427 
428 static void ppl_submit_iounit(struct ppl_io_unit *io)
429 {
430 	struct ppl_log *log = io->log;
431 	struct ppl_conf *ppl_conf = log->ppl_conf;
432 	struct ppl_header *pplhdr = page_address(io->header_page);
433 	struct bio *bio = &io->bio;
434 	struct stripe_head *sh;
435 	int i;
436 
437 	bio->bi_private = io;
438 
439 	if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
440 		ppl_log_endio(bio);
441 		return;
442 	}
443 
444 	for (i = 0; i < io->entries_count; i++) {
445 		struct ppl_header_entry *e = &pplhdr->entries[i];
446 
447 		pr_debug("%s: seq: %llu entry: %d data_sector: %llu pp_size: %u data_size: %u\n",
448 			 __func__, io->seq, i, le64_to_cpu(e->data_sector),
449 			 le32_to_cpu(e->pp_size), le32_to_cpu(e->data_size));
450 
451 		e->data_sector = cpu_to_le64(le64_to_cpu(e->data_sector) >>
452 					     ilog2(ppl_conf->block_size >> 9));
453 		e->checksum = cpu_to_le32(~le32_to_cpu(e->checksum));
454 	}
455 
456 	pplhdr->entries_count = cpu_to_le32(io->entries_count);
457 	pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE));
458 
459 	/* Rewind the buffer if current PPL is larger then remaining space */
460 	if (log->use_multippl &&
461 	    log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector <
462 	    (PPL_HEADER_SIZE + io->pp_size) >> 9)
463 		log->next_io_sector = log->rdev->ppl.sector;
464 
465 
466 	bio->bi_end_io = ppl_log_endio;
467 	bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
468 	bio_set_dev(bio, log->rdev->bdev);
469 	bio->bi_iter.bi_sector = log->next_io_sector;
470 	bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
471 
472 	pr_debug("%s: log->current_io_sector: %llu\n", __func__,
473 	    (unsigned long long)log->next_io_sector);
474 
475 	if (log->use_multippl)
476 		log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
477 
478 	list_for_each_entry(sh, &io->stripe_list, log_list) {
479 		/* entries for full stripe writes have no partial parity */
480 		if (test_bit(STRIPE_FULL_WRITE, &sh->state))
481 			continue;
482 
483 		if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
484 			struct bio *prev = bio;
485 
486 			bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
487 					       ppl_conf->bs);
488 			bio->bi_opf = prev->bi_opf;
489 			bio_copy_dev(bio, prev);
490 			bio->bi_iter.bi_sector = bio_end_sector(prev);
491 			bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
492 
493 			bio_chain(bio, prev);
494 			ppl_submit_iounit_bio(io, prev);
495 		}
496 	}
497 
498 	ppl_submit_iounit_bio(io, bio);
499 }
500 
501 static void ppl_submit_current_io(struct ppl_log *log)
502 {
503 	struct ppl_io_unit *io;
504 
505 	spin_lock_irq(&log->io_list_lock);
506 
507 	io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
508 				      log_sibling);
509 	if (io && io->submitted)
510 		io = NULL;
511 
512 	spin_unlock_irq(&log->io_list_lock);
513 
514 	if (io) {
515 		io->submitted = true;
516 
517 		if (io == log->current_io)
518 			log->current_io = NULL;
519 
520 		ppl_submit_iounit(io);
521 	}
522 }
523 
524 void ppl_write_stripe_run(struct r5conf *conf)
525 {
526 	struct ppl_conf *ppl_conf = conf->log_private;
527 	struct ppl_log *log;
528 	int i;
529 
530 	for (i = 0; i < ppl_conf->count; i++) {
531 		log = &ppl_conf->child_logs[i];
532 
533 		mutex_lock(&log->io_mutex);
534 		ppl_submit_current_io(log);
535 		mutex_unlock(&log->io_mutex);
536 	}
537 }
538 
539 static void ppl_io_unit_finished(struct ppl_io_unit *io)
540 {
541 	struct ppl_log *log = io->log;
542 	struct ppl_conf *ppl_conf = log->ppl_conf;
543 	unsigned long flags;
544 
545 	pr_debug("%s: seq: %llu\n", __func__, io->seq);
546 
547 	local_irq_save(flags);
548 
549 	spin_lock(&log->io_list_lock);
550 	list_del(&io->log_sibling);
551 	spin_unlock(&log->io_list_lock);
552 
553 	mempool_free(io, ppl_conf->io_pool);
554 
555 	spin_lock(&ppl_conf->no_mem_stripes_lock);
556 	if (!list_empty(&ppl_conf->no_mem_stripes)) {
557 		struct stripe_head *sh;
558 
559 		sh = list_first_entry(&ppl_conf->no_mem_stripes,
560 				      struct stripe_head, log_list);
561 		list_del_init(&sh->log_list);
562 		set_bit(STRIPE_HANDLE, &sh->state);
563 		raid5_release_stripe(sh);
564 	}
565 	spin_unlock(&ppl_conf->no_mem_stripes_lock);
566 
567 	local_irq_restore(flags);
568 }
569 
570 void ppl_stripe_write_finished(struct stripe_head *sh)
571 {
572 	struct ppl_io_unit *io;
573 
574 	io = sh->ppl_io;
575 	sh->ppl_io = NULL;
576 
577 	if (io && atomic_dec_and_test(&io->pending_stripes))
578 		ppl_io_unit_finished(io);
579 }
580 
581 static void ppl_xor(int size, struct page *page1, struct page *page2)
582 {
583 	struct async_submit_ctl submit;
584 	struct dma_async_tx_descriptor *tx;
585 	struct page *xor_srcs[] = { page1, page2 };
586 
587 	init_async_submit(&submit, ASYNC_TX_ACK|ASYNC_TX_XOR_DROP_DST,
588 			  NULL, NULL, NULL, NULL);
589 	tx = async_xor(page1, xor_srcs, 0, 2, size, &submit);
590 
591 	async_tx_quiesce(&tx);
592 }
593 
594 /*
595  * PPL recovery strategy: xor partial parity and data from all modified data
596  * disks within a stripe and write the result as the new stripe parity. If all
597  * stripe data disks are modified (full stripe write), no partial parity is
598  * available, so just xor the data disks.
599  *
600  * Recovery of a PPL entry shall occur only if all modified data disks are
601  * available and read from all of them succeeds.
602  *
603  * A PPL entry applies to a stripe, partial parity size for an entry is at most
604  * the size of the chunk. Examples of possible cases for a single entry:
605  *
606  * case 0: single data disk write:
607  *   data0    data1    data2     ppl        parity
608  * +--------+--------+--------+           +--------------------+
609  * | ------ | ------ | ------ | +----+    | (no change)        |
610  * | ------ | -data- | ------ | | pp | -> | data1 ^ pp         |
611  * | ------ | -data- | ------ | | pp | -> | data1 ^ pp         |
612  * | ------ | ------ | ------ | +----+    | (no change)        |
613  * +--------+--------+--------+           +--------------------+
614  * pp_size = data_size
615  *
616  * case 1: more than one data disk write:
617  *   data0    data1    data2     ppl        parity
618  * +--------+--------+--------+           +--------------------+
619  * | ------ | ------ | ------ | +----+    | (no change)        |
620  * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
621  * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
622  * | ------ | ------ | ------ | +----+    | (no change)        |
623  * +--------+--------+--------+           +--------------------+
624  * pp_size = data_size / modified_data_disks
625  *
626  * case 2: write to all data disks (also full stripe write):
627  *   data0    data1    data2                parity
628  * +--------+--------+--------+           +--------------------+
629  * | ------ | ------ | ------ |           | (no change)        |
630  * | -data- | -data- | -data- | --------> | xor all data       |
631  * | ------ | ------ | ------ | --------> | (no change)        |
632  * | ------ | ------ | ------ |           | (no change)        |
633  * +--------+--------+--------+           +--------------------+
634  * pp_size = 0
635  *
636  * The following cases are possible only in other implementations. The recovery
637  * code can handle them, but they are not generated at runtime because they can
638  * be reduced to cases 0, 1 and 2:
639  *
640  * case 3:
641  *   data0    data1    data2     ppl        parity
642  * +--------+--------+--------+ +----+    +--------------------+
643  * | ------ | -data- | -data- | | pp |    | data1 ^ data2 ^ pp |
644  * | ------ | -data- | -data- | | pp | -> | data1 ^ data2 ^ pp |
645  * | -data- | -data- | -data- | | -- | -> | xor all data       |
646  * | -data- | -data- | ------ | | pp |    | data0 ^ data1 ^ pp |
647  * +--------+--------+--------+ +----+    +--------------------+
648  * pp_size = chunk_size
649  *
650  * case 4:
651  *   data0    data1    data2     ppl        parity
652  * +--------+--------+--------+ +----+    +--------------------+
653  * | ------ | -data- | ------ | | pp |    | data1 ^ pp         |
654  * | ------ | ------ | ------ | | -- | -> | (no change)        |
655  * | ------ | ------ | ------ | | -- | -> | (no change)        |
656  * | -data- | ------ | ------ | | pp |    | data0 ^ pp         |
657  * +--------+--------+--------+ +----+    +--------------------+
658  * pp_size = chunk_size
659  */
660 static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
661 			     sector_t ppl_sector)
662 {
663 	struct ppl_conf *ppl_conf = log->ppl_conf;
664 	struct mddev *mddev = ppl_conf->mddev;
665 	struct r5conf *conf = mddev->private;
666 	int block_size = ppl_conf->block_size;
667 	struct page *page1;
668 	struct page *page2;
669 	sector_t r_sector_first;
670 	sector_t r_sector_last;
671 	int strip_sectors;
672 	int data_disks;
673 	int i;
674 	int ret = 0;
675 	char b[BDEVNAME_SIZE];
676 	unsigned int pp_size = le32_to_cpu(e->pp_size);
677 	unsigned int data_size = le32_to_cpu(e->data_size);
678 
679 	page1 = alloc_page(GFP_KERNEL);
680 	page2 = alloc_page(GFP_KERNEL);
681 
682 	if (!page1 || !page2) {
683 		ret = -ENOMEM;
684 		goto out;
685 	}
686 
687 	r_sector_first = le64_to_cpu(e->data_sector) * (block_size >> 9);
688 
689 	if ((pp_size >> 9) < conf->chunk_sectors) {
690 		if (pp_size > 0) {
691 			data_disks = data_size / pp_size;
692 			strip_sectors = pp_size >> 9;
693 		} else {
694 			data_disks = conf->raid_disks - conf->max_degraded;
695 			strip_sectors = (data_size >> 9) / data_disks;
696 		}
697 		r_sector_last = r_sector_first +
698 				(data_disks - 1) * conf->chunk_sectors +
699 				strip_sectors;
700 	} else {
701 		data_disks = conf->raid_disks - conf->max_degraded;
702 		strip_sectors = conf->chunk_sectors;
703 		r_sector_last = r_sector_first + (data_size >> 9);
704 	}
705 
706 	pr_debug("%s: array sector first: %llu last: %llu\n", __func__,
707 		 (unsigned long long)r_sector_first,
708 		 (unsigned long long)r_sector_last);
709 
710 	/* if start and end is 4k aligned, use a 4k block */
711 	if (block_size == 512 &&
712 	    (r_sector_first & (STRIPE_SECTORS - 1)) == 0 &&
713 	    (r_sector_last & (STRIPE_SECTORS - 1)) == 0)
714 		block_size = STRIPE_SIZE;
715 
716 	/* iterate through blocks in strip */
717 	for (i = 0; i < strip_sectors; i += (block_size >> 9)) {
718 		bool update_parity = false;
719 		sector_t parity_sector;
720 		struct md_rdev *parity_rdev;
721 		struct stripe_head sh;
722 		int disk;
723 		int indent = 0;
724 
725 		pr_debug("%s:%*s iter %d start\n", __func__, indent, "", i);
726 		indent += 2;
727 
728 		memset(page_address(page1), 0, PAGE_SIZE);
729 
730 		/* iterate through data member disks */
731 		for (disk = 0; disk < data_disks; disk++) {
732 			int dd_idx;
733 			struct md_rdev *rdev;
734 			sector_t sector;
735 			sector_t r_sector = r_sector_first + i +
736 					    (disk * conf->chunk_sectors);
737 
738 			pr_debug("%s:%*s data member disk %d start\n",
739 				 __func__, indent, "", disk);
740 			indent += 2;
741 
742 			if (r_sector >= r_sector_last) {
743 				pr_debug("%s:%*s array sector %llu doesn't need parity update\n",
744 					 __func__, indent, "",
745 					 (unsigned long long)r_sector);
746 				indent -= 2;
747 				continue;
748 			}
749 
750 			update_parity = true;
751 
752 			/* map raid sector to member disk */
753 			sector = raid5_compute_sector(conf, r_sector, 0,
754 						      &dd_idx, NULL);
755 			pr_debug("%s:%*s processing array sector %llu => data member disk %d, sector %llu\n",
756 				 __func__, indent, "",
757 				 (unsigned long long)r_sector, dd_idx,
758 				 (unsigned long long)sector);
759 
760 			rdev = conf->disks[dd_idx].rdev;
761 			if (!rdev) {
762 				pr_debug("%s:%*s data member disk %d missing\n",
763 					 __func__, indent, "", dd_idx);
764 				update_parity = false;
765 				break;
766 			}
767 
768 			pr_debug("%s:%*s reading data member disk %s sector %llu\n",
769 				 __func__, indent, "", bdevname(rdev->bdev, b),
770 				 (unsigned long long)sector);
771 			if (!sync_page_io(rdev, sector, block_size, page2,
772 					REQ_OP_READ, 0, false)) {
773 				md_error(mddev, rdev);
774 				pr_debug("%s:%*s read failed!\n", __func__,
775 					 indent, "");
776 				ret = -EIO;
777 				goto out;
778 			}
779 
780 			ppl_xor(block_size, page1, page2);
781 
782 			indent -= 2;
783 		}
784 
785 		if (!update_parity)
786 			continue;
787 
788 		if (pp_size > 0) {
789 			pr_debug("%s:%*s reading pp disk sector %llu\n",
790 				 __func__, indent, "",
791 				 (unsigned long long)(ppl_sector + i));
792 			if (!sync_page_io(log->rdev,
793 					ppl_sector - log->rdev->data_offset + i,
794 					block_size, page2, REQ_OP_READ, 0,
795 					false)) {
796 				pr_debug("%s:%*s read failed!\n", __func__,
797 					 indent, "");
798 				md_error(mddev, log->rdev);
799 				ret = -EIO;
800 				goto out;
801 			}
802 
803 			ppl_xor(block_size, page1, page2);
804 		}
805 
806 		/* map raid sector to parity disk */
807 		parity_sector = raid5_compute_sector(conf, r_sector_first + i,
808 				0, &disk, &sh);
809 		BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk));
810 		parity_rdev = conf->disks[sh.pd_idx].rdev;
811 
812 		BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
813 		pr_debug("%s:%*s write parity at sector %llu, disk %s\n",
814 			 __func__, indent, "",
815 			 (unsigned long long)parity_sector,
816 			 bdevname(parity_rdev->bdev, b));
817 		if (!sync_page_io(parity_rdev, parity_sector, block_size,
818 				page1, REQ_OP_WRITE, 0, false)) {
819 			pr_debug("%s:%*s parity write error!\n", __func__,
820 				 indent, "");
821 			md_error(mddev, parity_rdev);
822 			ret = -EIO;
823 			goto out;
824 		}
825 	}
826 out:
827 	if (page1)
828 		__free_page(page1);
829 	if (page2)
830 		__free_page(page2);
831 	return ret;
832 }
833 
834 static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
835 		       sector_t offset)
836 {
837 	struct ppl_conf *ppl_conf = log->ppl_conf;
838 	struct md_rdev *rdev = log->rdev;
839 	struct mddev *mddev = rdev->mddev;
840 	sector_t ppl_sector = rdev->ppl.sector + offset +
841 			      (PPL_HEADER_SIZE >> 9);
842 	struct page *page;
843 	int i;
844 	int ret = 0;
845 
846 	page = alloc_page(GFP_KERNEL);
847 	if (!page)
848 		return -ENOMEM;
849 
850 	/* iterate through all PPL entries saved */
851 	for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++) {
852 		struct ppl_header_entry *e = &pplhdr->entries[i];
853 		u32 pp_size = le32_to_cpu(e->pp_size);
854 		sector_t sector = ppl_sector;
855 		int ppl_entry_sectors = pp_size >> 9;
856 		u32 crc, crc_stored;
857 
858 		pr_debug("%s: disk: %d entry: %d ppl_sector: %llu pp_size: %u\n",
859 			 __func__, rdev->raid_disk, i,
860 			 (unsigned long long)ppl_sector, pp_size);
861 
862 		crc = ~0;
863 		crc_stored = le32_to_cpu(e->checksum);
864 
865 		/* read parial parity for this entry and calculate its checksum */
866 		while (pp_size) {
867 			int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size;
868 
869 			if (!sync_page_io(rdev, sector - rdev->data_offset,
870 					s, page, REQ_OP_READ, 0, false)) {
871 				md_error(mddev, rdev);
872 				ret = -EIO;
873 				goto out;
874 			}
875 
876 			crc = crc32c_le(crc, page_address(page), s);
877 
878 			pp_size -= s;
879 			sector += s >> 9;
880 		}
881 
882 		crc = ~crc;
883 
884 		if (crc != crc_stored) {
885 			/*
886 			 * Don't recover this entry if the checksum does not
887 			 * match, but keep going and try to recover other
888 			 * entries.
889 			 */
890 			pr_debug("%s: ppl entry crc does not match: stored: 0x%x calculated: 0x%x\n",
891 				 __func__, crc_stored, crc);
892 			ppl_conf->mismatch_count++;
893 		} else {
894 			ret = ppl_recover_entry(log, e, ppl_sector);
895 			if (ret)
896 				goto out;
897 			ppl_conf->recovered_entries++;
898 		}
899 
900 		ppl_sector += ppl_entry_sectors;
901 	}
902 
903 	/* flush the disk cache after recovery if necessary */
904 	ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL, NULL);
905 out:
906 	__free_page(page);
907 	return ret;
908 }
909 
910 static int ppl_write_empty_header(struct ppl_log *log)
911 {
912 	struct page *page;
913 	struct ppl_header *pplhdr;
914 	struct md_rdev *rdev = log->rdev;
915 	int ret = 0;
916 
917 	pr_debug("%s: disk: %d ppl_sector: %llu\n", __func__,
918 		 rdev->raid_disk, (unsigned long long)rdev->ppl.sector);
919 
920 	page = alloc_page(GFP_NOIO | __GFP_ZERO);
921 	if (!page)
922 		return -ENOMEM;
923 
924 	pplhdr = page_address(page);
925 	/* zero out PPL space to avoid collision with old PPLs */
926 	blkdev_issue_zeroout(rdev->bdev, rdev->ppl.sector,
927 			    log->rdev->ppl.size, GFP_NOIO, 0);
928 	memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
929 	pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
930 	pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
931 
932 	if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
933 			  PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
934 			  REQ_FUA, 0, false)) {
935 		md_error(rdev->mddev, rdev);
936 		ret = -EIO;
937 	}
938 
939 	__free_page(page);
940 	return ret;
941 }
942 
943 static int ppl_load_distributed(struct ppl_log *log)
944 {
945 	struct ppl_conf *ppl_conf = log->ppl_conf;
946 	struct md_rdev *rdev = log->rdev;
947 	struct mddev *mddev = rdev->mddev;
948 	struct page *page, *page2, *tmp;
949 	struct ppl_header *pplhdr = NULL, *prev_pplhdr = NULL;
950 	u32 crc, crc_stored;
951 	u32 signature;
952 	int ret = 0, i;
953 	sector_t pplhdr_offset = 0, prev_pplhdr_offset = 0;
954 
955 	pr_debug("%s: disk: %d\n", __func__, rdev->raid_disk);
956 	/* read PPL headers, find the recent one */
957 	page = alloc_page(GFP_KERNEL);
958 	if (!page)
959 		return -ENOMEM;
960 
961 	page2 = alloc_page(GFP_KERNEL);
962 	if (!page2) {
963 		__free_page(page);
964 		return -ENOMEM;
965 	}
966 
967 	/* searching ppl area for latest ppl */
968 	while (pplhdr_offset < rdev->ppl.size - (PPL_HEADER_SIZE >> 9)) {
969 		if (!sync_page_io(rdev,
970 				  rdev->ppl.sector - rdev->data_offset +
971 				  pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ,
972 				  0, false)) {
973 			md_error(mddev, rdev);
974 			ret = -EIO;
975 			/* if not able to read - don't recover any PPL */
976 			pplhdr = NULL;
977 			break;
978 		}
979 		pplhdr = page_address(page);
980 
981 		/* check header validity */
982 		crc_stored = le32_to_cpu(pplhdr->checksum);
983 		pplhdr->checksum = 0;
984 		crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE);
985 
986 		if (crc_stored != crc) {
987 			pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n",
988 				 __func__, crc_stored, crc,
989 				 (unsigned long long)pplhdr_offset);
990 			pplhdr = prev_pplhdr;
991 			pplhdr_offset = prev_pplhdr_offset;
992 			break;
993 		}
994 
995 		signature = le32_to_cpu(pplhdr->signature);
996 
997 		if (mddev->external) {
998 			/*
999 			 * For external metadata the header signature is set and
1000 			 * validated in userspace.
1001 			 */
1002 			ppl_conf->signature = signature;
1003 		} else if (ppl_conf->signature != signature) {
1004 			pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n",
1005 				 __func__, signature, ppl_conf->signature,
1006 				 (unsigned long long)pplhdr_offset);
1007 			pplhdr = prev_pplhdr;
1008 			pplhdr_offset = prev_pplhdr_offset;
1009 			break;
1010 		}
1011 
1012 		if (prev_pplhdr && le64_to_cpu(prev_pplhdr->generation) >
1013 		    le64_to_cpu(pplhdr->generation)) {
1014 			/* previous was newest */
1015 			pplhdr = prev_pplhdr;
1016 			pplhdr_offset = prev_pplhdr_offset;
1017 			break;
1018 		}
1019 
1020 		prev_pplhdr_offset = pplhdr_offset;
1021 		prev_pplhdr = pplhdr;
1022 
1023 		tmp = page;
1024 		page = page2;
1025 		page2 = tmp;
1026 
1027 		/* calculate next potential ppl offset */
1028 		for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++)
1029 			pplhdr_offset +=
1030 			    le32_to_cpu(pplhdr->entries[i].pp_size) >> 9;
1031 		pplhdr_offset += PPL_HEADER_SIZE >> 9;
1032 	}
1033 
1034 	/* no valid ppl found */
1035 	if (!pplhdr)
1036 		ppl_conf->mismatch_count++;
1037 	else
1038 		pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n",
1039 		    __func__, (unsigned long long)pplhdr_offset,
1040 		    le64_to_cpu(pplhdr->generation));
1041 
1042 	/* attempt to recover from log if we are starting a dirty array */
1043 	if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector)
1044 		ret = ppl_recover(log, pplhdr, pplhdr_offset);
1045 
1046 	/* write empty header if we are starting the array */
1047 	if (!ret && !mddev->pers)
1048 		ret = ppl_write_empty_header(log);
1049 
1050 	__free_page(page);
1051 	__free_page(page2);
1052 
1053 	pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1054 		 __func__, ret, ppl_conf->mismatch_count,
1055 		 ppl_conf->recovered_entries);
1056 	return ret;
1057 }
1058 
1059 static int ppl_load(struct ppl_conf *ppl_conf)
1060 {
1061 	int ret = 0;
1062 	u32 signature = 0;
1063 	bool signature_set = false;
1064 	int i;
1065 
1066 	for (i = 0; i < ppl_conf->count; i++) {
1067 		struct ppl_log *log = &ppl_conf->child_logs[i];
1068 
1069 		/* skip missing drive */
1070 		if (!log->rdev)
1071 			continue;
1072 
1073 		ret = ppl_load_distributed(log);
1074 		if (ret)
1075 			break;
1076 
1077 		/*
1078 		 * For external metadata we can't check if the signature is
1079 		 * correct on a single drive, but we can check if it is the same
1080 		 * on all drives.
1081 		 */
1082 		if (ppl_conf->mddev->external) {
1083 			if (!signature_set) {
1084 				signature = ppl_conf->signature;
1085 				signature_set = true;
1086 			} else if (signature != ppl_conf->signature) {
1087 				pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n",
1088 					mdname(ppl_conf->mddev));
1089 				ret = -EINVAL;
1090 				break;
1091 			}
1092 		}
1093 	}
1094 
1095 	pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1096 		 __func__, ret, ppl_conf->mismatch_count,
1097 		 ppl_conf->recovered_entries);
1098 	return ret;
1099 }
1100 
1101 static void __ppl_exit_log(struct ppl_conf *ppl_conf)
1102 {
1103 	clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1104 	clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags);
1105 
1106 	kfree(ppl_conf->child_logs);
1107 
1108 	if (ppl_conf->bs)
1109 		bioset_free(ppl_conf->bs);
1110 	mempool_destroy(ppl_conf->io_pool);
1111 	kmem_cache_destroy(ppl_conf->io_kc);
1112 
1113 	kfree(ppl_conf);
1114 }
1115 
1116 void ppl_exit_log(struct r5conf *conf)
1117 {
1118 	struct ppl_conf *ppl_conf = conf->log_private;
1119 
1120 	if (ppl_conf) {
1121 		__ppl_exit_log(ppl_conf);
1122 		conf->log_private = NULL;
1123 	}
1124 }
1125 
1126 static int ppl_validate_rdev(struct md_rdev *rdev)
1127 {
1128 	char b[BDEVNAME_SIZE];
1129 	int ppl_data_sectors;
1130 	int ppl_size_new;
1131 
1132 	/*
1133 	 * The configured PPL size must be enough to store
1134 	 * the header and (at the very least) partial parity
1135 	 * for one stripe. Round it down to ensure the data
1136 	 * space is cleanly divisible by stripe size.
1137 	 */
1138 	ppl_data_sectors = rdev->ppl.size - (PPL_HEADER_SIZE >> 9);
1139 
1140 	if (ppl_data_sectors > 0)
1141 		ppl_data_sectors = rounddown(ppl_data_sectors, STRIPE_SECTORS);
1142 
1143 	if (ppl_data_sectors <= 0) {
1144 		pr_warn("md/raid:%s: PPL space too small on %s\n",
1145 			mdname(rdev->mddev), bdevname(rdev->bdev, b));
1146 		return -ENOSPC;
1147 	}
1148 
1149 	ppl_size_new = ppl_data_sectors + (PPL_HEADER_SIZE >> 9);
1150 
1151 	if ((rdev->ppl.sector < rdev->data_offset &&
1152 	     rdev->ppl.sector + ppl_size_new > rdev->data_offset) ||
1153 	    (rdev->ppl.sector >= rdev->data_offset &&
1154 	     rdev->data_offset + rdev->sectors > rdev->ppl.sector)) {
1155 		pr_warn("md/raid:%s: PPL space overlaps with data on %s\n",
1156 			mdname(rdev->mddev), bdevname(rdev->bdev, b));
1157 		return -EINVAL;
1158 	}
1159 
1160 	if (!rdev->mddev->external &&
1161 	    ((rdev->ppl.offset > 0 && rdev->ppl.offset < (rdev->sb_size >> 9)) ||
1162 	     (rdev->ppl.offset <= 0 && rdev->ppl.offset + ppl_size_new > 0))) {
1163 		pr_warn("md/raid:%s: PPL space overlaps with superblock on %s\n",
1164 			mdname(rdev->mddev), bdevname(rdev->bdev, b));
1165 		return -EINVAL;
1166 	}
1167 
1168 	rdev->ppl.size = ppl_size_new;
1169 
1170 	return 0;
1171 }
1172 
1173 static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
1174 {
1175 	if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
1176 				      PPL_HEADER_SIZE) * 2) {
1177 		log->use_multippl = true;
1178 		set_bit(MD_HAS_MULTIPLE_PPLS,
1179 			&log->ppl_conf->mddev->flags);
1180 		log->entry_space = PPL_SPACE_SIZE;
1181 	} else {
1182 		log->use_multippl = false;
1183 		log->entry_space = (log->rdev->ppl.size << 9) -
1184 				   PPL_HEADER_SIZE;
1185 	}
1186 	log->next_io_sector = rdev->ppl.sector;
1187 }
1188 
1189 int ppl_init_log(struct r5conf *conf)
1190 {
1191 	struct ppl_conf *ppl_conf;
1192 	struct mddev *mddev = conf->mddev;
1193 	int ret = 0;
1194 	int i;
1195 	bool need_cache_flush = false;
1196 
1197 	pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
1198 		 mdname(conf->mddev));
1199 
1200 	if (PAGE_SIZE != 4096)
1201 		return -EINVAL;
1202 
1203 	if (mddev->level != 5) {
1204 		pr_warn("md/raid:%s PPL is not compatible with raid level %d\n",
1205 			mdname(mddev), mddev->level);
1206 		return -EINVAL;
1207 	}
1208 
1209 	if (mddev->bitmap_info.file || mddev->bitmap_info.offset) {
1210 		pr_warn("md/raid:%s PPL is not compatible with bitmap\n",
1211 			mdname(mddev));
1212 		return -EINVAL;
1213 	}
1214 
1215 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1216 		pr_warn("md/raid:%s PPL is not compatible with journal\n",
1217 			mdname(mddev));
1218 		return -EINVAL;
1219 	}
1220 
1221 	ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL);
1222 	if (!ppl_conf)
1223 		return -ENOMEM;
1224 
1225 	ppl_conf->mddev = mddev;
1226 
1227 	ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0);
1228 	if (!ppl_conf->io_kc) {
1229 		ret = -ENOMEM;
1230 		goto err;
1231 	}
1232 
1233 	ppl_conf->io_pool = mempool_create(conf->raid_disks, ppl_io_pool_alloc,
1234 					   ppl_io_pool_free, ppl_conf->io_kc);
1235 	if (!ppl_conf->io_pool) {
1236 		ret = -ENOMEM;
1237 		goto err;
1238 	}
1239 
1240 	ppl_conf->bs = bioset_create(conf->raid_disks, 0, BIOSET_NEED_BVECS);
1241 	if (!ppl_conf->bs) {
1242 		ret = -ENOMEM;
1243 		goto err;
1244 	}
1245 
1246 	ppl_conf->count = conf->raid_disks;
1247 	ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
1248 				       GFP_KERNEL);
1249 	if (!ppl_conf->child_logs) {
1250 		ret = -ENOMEM;
1251 		goto err;
1252 	}
1253 
1254 	atomic64_set(&ppl_conf->seq, 0);
1255 	INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
1256 	spin_lock_init(&ppl_conf->no_mem_stripes_lock);
1257 
1258 	if (!mddev->external) {
1259 		ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
1260 		ppl_conf->block_size = 512;
1261 	} else {
1262 		ppl_conf->block_size = queue_logical_block_size(mddev->queue);
1263 	}
1264 
1265 	for (i = 0; i < ppl_conf->count; i++) {
1266 		struct ppl_log *log = &ppl_conf->child_logs[i];
1267 		struct md_rdev *rdev = conf->disks[i].rdev;
1268 
1269 		mutex_init(&log->io_mutex);
1270 		spin_lock_init(&log->io_list_lock);
1271 		INIT_LIST_HEAD(&log->io_list);
1272 
1273 		log->ppl_conf = ppl_conf;
1274 		log->rdev = rdev;
1275 
1276 		if (rdev) {
1277 			struct request_queue *q;
1278 
1279 			ret = ppl_validate_rdev(rdev);
1280 			if (ret)
1281 				goto err;
1282 
1283 			q = bdev_get_queue(rdev->bdev);
1284 			if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
1285 				need_cache_flush = true;
1286 			ppl_init_child_log(log, rdev);
1287 		}
1288 	}
1289 
1290 	if (need_cache_flush)
1291 		pr_warn("md/raid:%s: Volatile write-back cache should be disabled on all member drives when using PPL!\n",
1292 			mdname(mddev));
1293 
1294 	/* load and possibly recover the logs from the member disks */
1295 	ret = ppl_load(ppl_conf);
1296 
1297 	if (ret) {
1298 		goto err;
1299 	} else if (!mddev->pers &&
1300 		   mddev->recovery_cp == 0 && !mddev->degraded &&
1301 		   ppl_conf->recovered_entries > 0 &&
1302 		   ppl_conf->mismatch_count == 0) {
1303 		/*
1304 		 * If we are starting a dirty array and the recovery succeeds
1305 		 * without any issues, set the array as clean.
1306 		 */
1307 		mddev->recovery_cp = MaxSector;
1308 		set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
1309 	} else if (mddev->pers && ppl_conf->mismatch_count > 0) {
1310 		/* no mismatch allowed when enabling PPL for a running array */
1311 		ret = -EINVAL;
1312 		goto err;
1313 	}
1314 
1315 	conf->log_private = ppl_conf;
1316 	set_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1317 
1318 	return 0;
1319 err:
1320 	__ppl_exit_log(ppl_conf);
1321 	return ret;
1322 }
1323 
1324 int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
1325 {
1326 	struct ppl_conf *ppl_conf = conf->log_private;
1327 	struct ppl_log *log;
1328 	int ret = 0;
1329 	char b[BDEVNAME_SIZE];
1330 
1331 	if (!rdev)
1332 		return -EINVAL;
1333 
1334 	pr_debug("%s: disk: %d operation: %s dev: %s\n",
1335 		 __func__, rdev->raid_disk, add ? "add" : "remove",
1336 		 bdevname(rdev->bdev, b));
1337 
1338 	if (rdev->raid_disk < 0)
1339 		return 0;
1340 
1341 	if (rdev->raid_disk >= ppl_conf->count)
1342 		return -ENODEV;
1343 
1344 	log = &ppl_conf->child_logs[rdev->raid_disk];
1345 
1346 	mutex_lock(&log->io_mutex);
1347 	if (add) {
1348 		ret = ppl_validate_rdev(rdev);
1349 		if (!ret) {
1350 			log->rdev = rdev;
1351 			ret = ppl_write_empty_header(log);
1352 			ppl_init_child_log(log, rdev);
1353 		}
1354 	} else {
1355 		log->rdev = NULL;
1356 	}
1357 	mutex_unlock(&log->io_mutex);
1358 
1359 	return ret;
1360 }
1361