xref: /openbmc/linux/crypto/async_tx/async_pq.c (revision 6f4eaea2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
4  * Copyright(c) 2009 Intel Corporation
5  */
6 #include <linux/kernel.h>
7 #include <linux/interrupt.h>
8 #include <linux/module.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/raid/pq.h>
11 #include <linux/async_tx.h>
12 #include <linux/gfp.h>
13 
14 /**
15  * pq_scribble_page - space to hold throwaway P or Q buffer for
16  * synchronous gen_syndrome
17  */
18 static struct page *pq_scribble_page;
19 
20 /* the struct page *blocks[] parameter passed to async_gen_syndrome()
21  * and async_syndrome_val() contains the 'P' destination address at
22  * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
23  *
24  * note: these are macros as they are used as lvalues
25  */
26 #define P(b, d) (b[d-2])
27 #define Q(b, d) (b[d-1])
28 
29 #define MAX_DISKS 255
30 
31 /**
32  * do_async_gen_syndrome - asynchronously calculate P and/or Q
33  */
34 static __async_inline struct dma_async_tx_descriptor *
35 do_async_gen_syndrome(struct dma_chan *chan,
36 		      const unsigned char *scfs, int disks,
37 		      struct dmaengine_unmap_data *unmap,
38 		      enum dma_ctrl_flags dma_flags,
39 		      struct async_submit_ctl *submit)
40 {
41 	struct dma_async_tx_descriptor *tx = NULL;
42 	struct dma_device *dma = chan->device;
43 	enum async_tx_flags flags_orig = submit->flags;
44 	dma_async_tx_callback cb_fn_orig = submit->cb_fn;
45 	dma_async_tx_callback cb_param_orig = submit->cb_param;
46 	int src_cnt = disks - 2;
47 	unsigned short pq_src_cnt;
48 	dma_addr_t dma_dest[2];
49 	int src_off = 0;
50 
51 	while (src_cnt > 0) {
52 		submit->flags = flags_orig;
53 		pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
54 		/* if we are submitting additional pqs, leave the chain open,
55 		 * clear the callback parameters, and leave the destination
56 		 * buffers mapped
57 		 */
58 		if (src_cnt > pq_src_cnt) {
59 			submit->flags &= ~ASYNC_TX_ACK;
60 			submit->flags |= ASYNC_TX_FENCE;
61 			submit->cb_fn = NULL;
62 			submit->cb_param = NULL;
63 		} else {
64 			submit->cb_fn = cb_fn_orig;
65 			submit->cb_param = cb_param_orig;
66 			if (cb_fn_orig)
67 				dma_flags |= DMA_PREP_INTERRUPT;
68 		}
69 		if (submit->flags & ASYNC_TX_FENCE)
70 			dma_flags |= DMA_PREP_FENCE;
71 
72 		/* Drivers force forward progress in case they can not provide
73 		 * a descriptor
74 		 */
75 		for (;;) {
76 			dma_dest[0] = unmap->addr[disks - 2];
77 			dma_dest[1] = unmap->addr[disks - 1];
78 			tx = dma->device_prep_dma_pq(chan, dma_dest,
79 						     &unmap->addr[src_off],
80 						     pq_src_cnt,
81 						     &scfs[src_off], unmap->len,
82 						     dma_flags);
83 			if (likely(tx))
84 				break;
85 			async_tx_quiesce(&submit->depend_tx);
86 			dma_async_issue_pending(chan);
87 		}
88 
89 		dma_set_unmap(tx, unmap);
90 		async_tx_submit(chan, tx, submit);
91 		submit->depend_tx = tx;
92 
93 		/* drop completed sources */
94 		src_cnt -= pq_src_cnt;
95 		src_off += pq_src_cnt;
96 
97 		dma_flags |= DMA_PREP_CONTINUE;
98 	}
99 
100 	return tx;
101 }
102 
103 /**
104  * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
105  */
106 static void
107 do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
108 		     size_t len, struct async_submit_ctl *submit)
109 {
110 	void **srcs;
111 	int i;
112 	int start = -1, stop = disks - 3;
113 
114 	if (submit->scribble)
115 		srcs = submit->scribble;
116 	else
117 		srcs = (void **) blocks;
118 
119 	for (i = 0; i < disks; i++) {
120 		if (blocks[i] == NULL) {
121 			BUG_ON(i > disks - 3); /* P or Q can't be zero */
122 			srcs[i] = (void*)raid6_empty_zero_page;
123 		} else {
124 			srcs[i] = page_address(blocks[i]) + offsets[i];
125 
126 			if (i < disks - 2) {
127 				stop = i;
128 				if (start == -1)
129 					start = i;
130 			}
131 		}
132 	}
133 	if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
134 		BUG_ON(!raid6_call.xor_syndrome);
135 		if (start >= 0)
136 			raid6_call.xor_syndrome(disks, start, stop, len, srcs);
137 	} else
138 		raid6_call.gen_syndrome(disks, len, srcs);
139 	async_tx_sync_epilog(submit);
140 }
141 
142 static inline bool
143 is_dma_pq_aligned_offs(struct dma_device *dev, unsigned int *offs,
144 				     int src_cnt, size_t len)
145 {
146 	int i;
147 
148 	for (i = 0; i < src_cnt; i++) {
149 		if (!is_dma_pq_aligned(dev, offs[i], 0, len))
150 			return false;
151 	}
152 	return true;
153 }
154 
155 /**
156  * async_gen_syndrome - asynchronously calculate a raid6 syndrome
157  * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
158  * @offsets: offset array into each block (src and dest) to start transaction
159  * @disks: number of blocks (including missing P or Q, see below)
160  * @len: length of operation in bytes
161  * @submit: submission/completion modifiers
162  *
163  * General note: This routine assumes a field of GF(2^8) with a
164  * primitive polynomial of 0x11d and a generator of {02}.
165  *
166  * 'disks' note: callers can optionally omit either P or Q (but not
167  * both) from the calculation by setting blocks[disks-2] or
168  * blocks[disks-1] to NULL.  When P or Q is omitted 'len' must be <=
169  * PAGE_SIZE as a temporary buffer of this size is used in the
170  * synchronous path.  'disks' always accounts for both destination
171  * buffers.  If any source buffers (blocks[i] where i < disks - 2) are
172  * set to NULL those buffers will be replaced with the raid6_zero_page
173  * in the synchronous path and omitted in the hardware-asynchronous
174  * path.
175  */
176 struct dma_async_tx_descriptor *
177 async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
178 		   size_t len, struct async_submit_ctl *submit)
179 {
180 	int src_cnt = disks - 2;
181 	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
182 						      &P(blocks, disks), 2,
183 						      blocks, src_cnt, len);
184 	struct dma_device *device = chan ? chan->device : NULL;
185 	struct dmaengine_unmap_data *unmap = NULL;
186 
187 	BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks)));
188 
189 	if (device)
190 		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
191 
192 	/* XORing P/Q is only implemented in software */
193 	if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
194 	    (src_cnt <= dma_maxpq(device, 0) ||
195 	     dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
196 	    is_dma_pq_aligned_offs(device, offsets, disks, len)) {
197 		struct dma_async_tx_descriptor *tx;
198 		enum dma_ctrl_flags dma_flags = 0;
199 		unsigned char coefs[MAX_DISKS];
200 		int i, j;
201 
202 		/* run the p+q asynchronously */
203 		pr_debug("%s: (async) disks: %d len: %zu\n",
204 			 __func__, disks, len);
205 
206 		/* convert source addresses being careful to collapse 'empty'
207 		 * sources and update the coefficients accordingly
208 		 */
209 		unmap->len = len;
210 		for (i = 0, j = 0; i < src_cnt; i++) {
211 			if (blocks[i] == NULL)
212 				continue;
213 			unmap->addr[j] = dma_map_page(device->dev, blocks[i],
214 						offsets[i], len, DMA_TO_DEVICE);
215 			coefs[j] = raid6_gfexp[i];
216 			unmap->to_cnt++;
217 			j++;
218 		}
219 
220 		/*
221 		 * DMAs use destinations as sources,
222 		 * so use BIDIRECTIONAL mapping
223 		 */
224 		unmap->bidi_cnt++;
225 		if (P(blocks, disks))
226 			unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
227 							P(offsets, disks),
228 							len, DMA_BIDIRECTIONAL);
229 		else {
230 			unmap->addr[j++] = 0;
231 			dma_flags |= DMA_PREP_PQ_DISABLE_P;
232 		}
233 
234 		unmap->bidi_cnt++;
235 		if (Q(blocks, disks))
236 			unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
237 							Q(offsets, disks),
238 							len, DMA_BIDIRECTIONAL);
239 		else {
240 			unmap->addr[j++] = 0;
241 			dma_flags |= DMA_PREP_PQ_DISABLE_Q;
242 		}
243 
244 		tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
245 		dmaengine_unmap_put(unmap);
246 		return tx;
247 	}
248 
249 	dmaengine_unmap_put(unmap);
250 
251 	/* run the pq synchronously */
252 	pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
253 
254 	/* wait for any prerequisite operations */
255 	async_tx_quiesce(&submit->depend_tx);
256 
257 	if (!P(blocks, disks)) {
258 		P(blocks, disks) = pq_scribble_page;
259 		P(offsets, disks) = 0;
260 	}
261 	if (!Q(blocks, disks)) {
262 		Q(blocks, disks) = pq_scribble_page;
263 		Q(offsets, disks) = 0;
264 	}
265 	do_sync_gen_syndrome(blocks, offsets, disks, len, submit);
266 
267 	return NULL;
268 }
269 EXPORT_SYMBOL_GPL(async_gen_syndrome);
270 
271 static inline struct dma_chan *
272 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
273 {
274 	#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
275 	return NULL;
276 	#endif
277 	return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0,  blocks,
278 				     disks, len);
279 }
280 
281 /**
282  * async_syndrome_val - asynchronously validate a raid6 syndrome
283  * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
284  * @offset: common offset into each block (src and dest) to start transaction
285  * @disks: number of blocks (including missing P or Q, see below)
286  * @len: length of operation in bytes
287  * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
288  * @spare: temporary result buffer for the synchronous case
289  * @s_off: spare buffer page offset
290  * @submit: submission / completion modifiers
291  *
292  * The same notes from async_gen_syndrome apply to the 'blocks',
293  * and 'disks' parameters of this routine.  The synchronous path
294  * requires a temporary result buffer and submit->scribble to be
295  * specified.
296  */
297 struct dma_async_tx_descriptor *
298 async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks,
299 		   size_t len, enum sum_check_flags *pqres, struct page *spare,
300 		   unsigned int s_off, struct async_submit_ctl *submit)
301 {
302 	struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
303 	struct dma_device *device = chan ? chan->device : NULL;
304 	struct dma_async_tx_descriptor *tx;
305 	unsigned char coefs[MAX_DISKS];
306 	enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
307 	struct dmaengine_unmap_data *unmap = NULL;
308 
309 	BUG_ON(disks < 4 || disks > MAX_DISKS);
310 
311 	if (device)
312 		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
313 
314 	if (unmap && disks <= dma_maxpq(device, 0) &&
315 	    is_dma_pq_aligned_offs(device, offsets, disks, len)) {
316 		struct device *dev = device->dev;
317 		dma_addr_t pq[2];
318 		int i, j = 0, src_cnt = 0;
319 
320 		pr_debug("%s: (async) disks: %d len: %zu\n",
321 			 __func__, disks, len);
322 
323 		unmap->len = len;
324 		for (i = 0; i < disks-2; i++)
325 			if (likely(blocks[i])) {
326 				unmap->addr[j] = dma_map_page(dev, blocks[i],
327 							      offsets[i], len,
328 							      DMA_TO_DEVICE);
329 				coefs[j] = raid6_gfexp[i];
330 				unmap->to_cnt++;
331 				src_cnt++;
332 				j++;
333 			}
334 
335 		if (!P(blocks, disks)) {
336 			pq[0] = 0;
337 			dma_flags |= DMA_PREP_PQ_DISABLE_P;
338 		} else {
339 			pq[0] = dma_map_page(dev, P(blocks, disks),
340 					     P(offsets, disks), len,
341 					     DMA_TO_DEVICE);
342 			unmap->addr[j++] = pq[0];
343 			unmap->to_cnt++;
344 		}
345 		if (!Q(blocks, disks)) {
346 			pq[1] = 0;
347 			dma_flags |= DMA_PREP_PQ_DISABLE_Q;
348 		} else {
349 			pq[1] = dma_map_page(dev, Q(blocks, disks),
350 					     Q(offsets, disks), len,
351 					     DMA_TO_DEVICE);
352 			unmap->addr[j++] = pq[1];
353 			unmap->to_cnt++;
354 		}
355 
356 		if (submit->flags & ASYNC_TX_FENCE)
357 			dma_flags |= DMA_PREP_FENCE;
358 		for (;;) {
359 			tx = device->device_prep_dma_pq_val(chan, pq,
360 							    unmap->addr,
361 							    src_cnt,
362 							    coefs,
363 							    len, pqres,
364 							    dma_flags);
365 			if (likely(tx))
366 				break;
367 			async_tx_quiesce(&submit->depend_tx);
368 			dma_async_issue_pending(chan);
369 		}
370 
371 		dma_set_unmap(tx, unmap);
372 		async_tx_submit(chan, tx, submit);
373 	} else {
374 		struct page *p_src = P(blocks, disks);
375 		unsigned int p_off = P(offsets, disks);
376 		struct page *q_src = Q(blocks, disks);
377 		unsigned int q_off = Q(offsets, disks);
378 		enum async_tx_flags flags_orig = submit->flags;
379 		dma_async_tx_callback cb_fn_orig = submit->cb_fn;
380 		void *scribble = submit->scribble;
381 		void *cb_param_orig = submit->cb_param;
382 		void *p, *q, *s;
383 
384 		pr_debug("%s: (sync) disks: %d len: %zu\n",
385 			 __func__, disks, len);
386 
387 		/* caller must provide a temporary result buffer and
388 		 * allow the input parameters to be preserved
389 		 */
390 		BUG_ON(!spare || !scribble);
391 
392 		/* wait for any prerequisite operations */
393 		async_tx_quiesce(&submit->depend_tx);
394 
395 		/* recompute p and/or q into the temporary buffer and then
396 		 * check to see the result matches the current value
397 		 */
398 		tx = NULL;
399 		*pqres = 0;
400 		if (p_src) {
401 			init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
402 					  NULL, NULL, scribble);
403 			tx = async_xor_offs(spare, s_off,
404 					blocks, offsets, disks-2, len, submit);
405 			async_tx_quiesce(&tx);
406 			p = page_address(p_src) + p_off;
407 			s = page_address(spare) + s_off;
408 			*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
409 		}
410 
411 		if (q_src) {
412 			P(blocks, disks) = NULL;
413 			Q(blocks, disks) = spare;
414 			Q(offsets, disks) = s_off;
415 			init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
416 			tx = async_gen_syndrome(blocks, offsets, disks,
417 					len, submit);
418 			async_tx_quiesce(&tx);
419 			q = page_address(q_src) + q_off;
420 			s = page_address(spare) + s_off;
421 			*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
422 		}
423 
424 		/* restore P, Q and submit */
425 		P(blocks, disks) = p_src;
426 		P(offsets, disks) = p_off;
427 		Q(blocks, disks) = q_src;
428 		Q(offsets, disks) = q_off;
429 
430 		submit->cb_fn = cb_fn_orig;
431 		submit->cb_param = cb_param_orig;
432 		submit->flags = flags_orig;
433 		async_tx_sync_epilog(submit);
434 		tx = NULL;
435 	}
436 	dmaengine_unmap_put(unmap);
437 
438 	return tx;
439 }
440 EXPORT_SYMBOL_GPL(async_syndrome_val);
441 
442 static int __init async_pq_init(void)
443 {
444 	pq_scribble_page = alloc_page(GFP_KERNEL);
445 
446 	if (pq_scribble_page)
447 		return 0;
448 
449 	pr_err("%s: failed to allocate required spare page\n", __func__);
450 
451 	return -ENOMEM;
452 }
453 
454 static void __exit async_pq_exit(void)
455 {
456 	__free_page(pq_scribble_page);
457 }
458 
459 module_init(async_pq_init);
460 module_exit(async_pq_exit);
461 
462 MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
463 MODULE_LICENSE("GPL");
464