1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
3 #include <linux/crypto.h>
4 #include <linux/dma-mapping.h>
5 #include <linux/dmapool.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/slab.h>
9 
10 #include <crypto/aes.h>
11 #include <crypto/algapi.h>
12 #include <crypto/internal/des.h>
13 #include <crypto/skcipher.h>
14 #include <crypto/xts.h>
15 #include <crypto/internal/skcipher.h>
16 
17 #include "sec_drv.h"
18 
19 #define SEC_MAX_CIPHER_KEY		64
20 #define SEC_REQ_LIMIT SZ_32M
21 
22 struct sec_c_alg_cfg {
23 	unsigned c_alg		: 3;
24 	unsigned c_mode		: 3;
25 	unsigned key_len	: 2;
26 	unsigned c_width	: 2;
27 };
28 
29 static const struct sec_c_alg_cfg sec_c_alg_cfgs[] =  {
30 	[SEC_C_DES_ECB_64] = {
31 		.c_alg = SEC_C_ALG_DES,
32 		.c_mode = SEC_C_MODE_ECB,
33 		.key_len = SEC_KEY_LEN_DES,
34 	},
35 	[SEC_C_DES_CBC_64] = {
36 		.c_alg = SEC_C_ALG_DES,
37 		.c_mode = SEC_C_MODE_CBC,
38 		.key_len = SEC_KEY_LEN_DES,
39 	},
40 	[SEC_C_3DES_ECB_192_3KEY] = {
41 		.c_alg = SEC_C_ALG_3DES,
42 		.c_mode = SEC_C_MODE_ECB,
43 		.key_len = SEC_KEY_LEN_3DES_3_KEY,
44 	},
45 	[SEC_C_3DES_ECB_192_2KEY] = {
46 		.c_alg = SEC_C_ALG_3DES,
47 		.c_mode = SEC_C_MODE_ECB,
48 		.key_len = SEC_KEY_LEN_3DES_2_KEY,
49 	},
50 	[SEC_C_3DES_CBC_192_3KEY] = {
51 		.c_alg = SEC_C_ALG_3DES,
52 		.c_mode = SEC_C_MODE_CBC,
53 		.key_len = SEC_KEY_LEN_3DES_3_KEY,
54 	},
55 	[SEC_C_3DES_CBC_192_2KEY] = {
56 		.c_alg = SEC_C_ALG_3DES,
57 		.c_mode = SEC_C_MODE_CBC,
58 		.key_len = SEC_KEY_LEN_3DES_2_KEY,
59 	},
60 	[SEC_C_AES_ECB_128] = {
61 		.c_alg = SEC_C_ALG_AES,
62 		.c_mode = SEC_C_MODE_ECB,
63 		.key_len = SEC_KEY_LEN_AES_128,
64 	},
65 	[SEC_C_AES_ECB_192] = {
66 		.c_alg = SEC_C_ALG_AES,
67 		.c_mode = SEC_C_MODE_ECB,
68 		.key_len = SEC_KEY_LEN_AES_192,
69 	},
70 	[SEC_C_AES_ECB_256] = {
71 		.c_alg = SEC_C_ALG_AES,
72 		.c_mode = SEC_C_MODE_ECB,
73 		.key_len = SEC_KEY_LEN_AES_256,
74 	},
75 	[SEC_C_AES_CBC_128] = {
76 		.c_alg = SEC_C_ALG_AES,
77 		.c_mode = SEC_C_MODE_CBC,
78 		.key_len = SEC_KEY_LEN_AES_128,
79 	},
80 	[SEC_C_AES_CBC_192] = {
81 		.c_alg = SEC_C_ALG_AES,
82 		.c_mode = SEC_C_MODE_CBC,
83 		.key_len = SEC_KEY_LEN_AES_192,
84 	},
85 	[SEC_C_AES_CBC_256] = {
86 		.c_alg = SEC_C_ALG_AES,
87 		.c_mode = SEC_C_MODE_CBC,
88 		.key_len = SEC_KEY_LEN_AES_256,
89 	},
90 	[SEC_C_AES_CTR_128] = {
91 		.c_alg = SEC_C_ALG_AES,
92 		.c_mode = SEC_C_MODE_CTR,
93 		.key_len = SEC_KEY_LEN_AES_128,
94 	},
95 	[SEC_C_AES_CTR_192] = {
96 		.c_alg = SEC_C_ALG_AES,
97 		.c_mode = SEC_C_MODE_CTR,
98 		.key_len = SEC_KEY_LEN_AES_192,
99 	},
100 	[SEC_C_AES_CTR_256] = {
101 		.c_alg = SEC_C_ALG_AES,
102 		.c_mode = SEC_C_MODE_CTR,
103 		.key_len = SEC_KEY_LEN_AES_256,
104 	},
105 	[SEC_C_AES_XTS_128] = {
106 		.c_alg = SEC_C_ALG_AES,
107 		.c_mode = SEC_C_MODE_XTS,
108 		.key_len = SEC_KEY_LEN_AES_128,
109 	},
110 	[SEC_C_AES_XTS_256] = {
111 		.c_alg = SEC_C_ALG_AES,
112 		.c_mode = SEC_C_MODE_XTS,
113 		.key_len = SEC_KEY_LEN_AES_256,
114 	},
115 	[SEC_C_NULL] = {
116 	},
117 };
118 
119 /*
120  * Mutex used to ensure safe operation of reference count of
121  * alg providers
122  */
123 static DEFINE_MUTEX(algs_lock);
124 static unsigned int active_devs;
125 
126 static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx,
127 					   struct sec_bd_info *req,
128 					   enum sec_cipher_alg alg)
129 {
130 	const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg];
131 
132 	memset(req, 0, sizeof(*req));
133 	req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;
134 	req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;
135 	req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;
136 	req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;
137 
138 	req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);
139 	req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);
140 }
141 
142 static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
143 					  const u8 *key,
144 					  unsigned int keylen,
145 					  enum sec_cipher_alg alg)
146 {
147 	struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
148 	struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
149 
150 	ctx->cipher_alg = alg;
151 	memcpy(ctx->key, key, keylen);
152 	sec_alg_skcipher_init_template(ctx, &ctx->req_template,
153 				       ctx->cipher_alg);
154 }
155 
156 static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
157 			    dma_addr_t psec_sgl, struct sec_dev_info *info)
158 {
159 	struct sec_hw_sgl *sgl_current, *sgl_next;
160 	dma_addr_t sgl_next_dma;
161 
162 	sgl_current = hw_sgl;
163 	while (sgl_current) {
164 		sgl_next = sgl_current->next;
165 		sgl_next_dma = sgl_current->next_sgl;
166 
167 		dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
168 
169 		sgl_current = sgl_next;
170 		psec_sgl = sgl_next_dma;
171 	}
172 }
173 
174 static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
175 				     dma_addr_t *psec_sgl,
176 				     struct scatterlist *sgl,
177 				     int count,
178 				     struct sec_dev_info *info)
179 {
180 	struct sec_hw_sgl *sgl_current = NULL;
181 	struct sec_hw_sgl *sgl_next;
182 	dma_addr_t sgl_next_dma;
183 	struct scatterlist *sg;
184 	int ret, sge_index, i;
185 
186 	if (!count)
187 		return -EINVAL;
188 
189 	for_each_sg(sgl, sg, count, i) {
190 		sge_index = i % SEC_MAX_SGE_NUM;
191 		if (sge_index == 0) {
192 			sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
193 						   GFP_KERNEL, &sgl_next_dma);
194 			if (!sgl_next) {
195 				ret = -ENOMEM;
196 				goto err_free_hw_sgls;
197 			}
198 
199 			if (!sgl_current) { /* First one */
200 				*psec_sgl = sgl_next_dma;
201 				*sec_sgl = sgl_next;
202 			} else { /* Chained */
203 				sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM;
204 				sgl_current->next_sgl = sgl_next_dma;
205 				sgl_current->next = sgl_next;
206 			}
207 			sgl_current = sgl_next;
208 		}
209 		sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg);
210 		sgl_current->sge_entries[sge_index].len = sg_dma_len(sg);
211 		sgl_current->data_bytes_in_sgl += sg_dma_len(sg);
212 	}
213 	sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM;
214 	sgl_current->next_sgl = 0;
215 	(*sec_sgl)->entry_sum_in_chain = count;
216 
217 	return 0;
218 
219 err_free_hw_sgls:
220 	sec_free_hw_sgl(*sec_sgl, *psec_sgl, info);
221 	*psec_sgl = 0;
222 
223 	return ret;
224 }
225 
226 static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
227 				   const u8 *key, unsigned int keylen,
228 				   enum sec_cipher_alg alg)
229 {
230 	struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
231 	struct device *dev = ctx->queue->dev_info->dev;
232 
233 	mutex_lock(&ctx->lock);
234 	if (ctx->key) {
235 		/* rekeying */
236 		memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
237 	} else {
238 		/* new key */
239 		ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY,
240 					      &ctx->pkey, GFP_KERNEL);
241 		if (!ctx->key) {
242 			mutex_unlock(&ctx->lock);
243 			return -ENOMEM;
244 		}
245 	}
246 	mutex_unlock(&ctx->lock);
247 	sec_alg_skcipher_init_context(tfm, key, keylen, alg);
248 
249 	return 0;
250 }
251 
252 static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
253 					   const u8 *key, unsigned int keylen)
254 {
255 	enum sec_cipher_alg alg;
256 
257 	switch (keylen) {
258 	case AES_KEYSIZE_128:
259 		alg = SEC_C_AES_ECB_128;
260 		break;
261 	case AES_KEYSIZE_192:
262 		alg = SEC_C_AES_ECB_192;
263 		break;
264 	case AES_KEYSIZE_256:
265 		alg = SEC_C_AES_ECB_256;
266 		break;
267 	default:
268 		return -EINVAL;
269 	}
270 
271 	return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
272 }
273 
274 static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
275 					   const u8 *key, unsigned int keylen)
276 {
277 	enum sec_cipher_alg alg;
278 
279 	switch (keylen) {
280 	case AES_KEYSIZE_128:
281 		alg = SEC_C_AES_CBC_128;
282 		break;
283 	case AES_KEYSIZE_192:
284 		alg = SEC_C_AES_CBC_192;
285 		break;
286 	case AES_KEYSIZE_256:
287 		alg = SEC_C_AES_CBC_256;
288 		break;
289 	default:
290 		return -EINVAL;
291 	}
292 
293 	return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
294 }
295 
296 static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
297 					   const u8 *key, unsigned int keylen)
298 {
299 	enum sec_cipher_alg alg;
300 
301 	switch (keylen) {
302 	case AES_KEYSIZE_128:
303 		alg = SEC_C_AES_CTR_128;
304 		break;
305 	case AES_KEYSIZE_192:
306 		alg = SEC_C_AES_CTR_192;
307 		break;
308 	case AES_KEYSIZE_256:
309 		alg = SEC_C_AES_CTR_256;
310 		break;
311 	default:
312 		return -EINVAL;
313 	}
314 
315 	return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
316 }
317 
318 static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
319 					   const u8 *key, unsigned int keylen)
320 {
321 	enum sec_cipher_alg alg;
322 	int ret;
323 
324 	ret = xts_verify_key(tfm, key, keylen);
325 	if (ret)
326 		return ret;
327 
328 	switch (keylen) {
329 	case AES_KEYSIZE_128 * 2:
330 		alg = SEC_C_AES_XTS_128;
331 		break;
332 	case AES_KEYSIZE_256 * 2:
333 		alg = SEC_C_AES_XTS_256;
334 		break;
335 	default:
336 		return -EINVAL;
337 	}
338 
339 	return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
340 }
341 
342 static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,
343 					   const u8 *key, unsigned int keylen)
344 {
345 	return verify_skcipher_des_key(tfm, key) ?:
346 	       sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
347 }
348 
349 static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
350 					   const u8 *key, unsigned int keylen)
351 {
352 	return verify_skcipher_des_key(tfm, key) ?:
353 	       sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
354 }
355 
356 static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
357 					    const u8 *key, unsigned int keylen)
358 {
359 	return verify_skcipher_des3_key(tfm, key) ?:
360 	       sec_alg_skcipher_setkey(tfm, key, keylen,
361 				       SEC_C_3DES_ECB_192_3KEY);
362 }
363 
364 static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
365 					    const u8 *key, unsigned int keylen)
366 {
367 	return verify_skcipher_des3_key(tfm, key) ?:
368 	       sec_alg_skcipher_setkey(tfm, key, keylen,
369 				       SEC_C_3DES_CBC_192_3KEY);
370 }
371 
372 static void sec_alg_free_el(struct sec_request_el *el,
373 			    struct sec_dev_info *info)
374 {
375 	sec_free_hw_sgl(el->out, el->dma_out, info);
376 	sec_free_hw_sgl(el->in, el->dma_in, info);
377 	kfree(el->sgl_in);
378 	kfree(el->sgl_out);
379 	kfree(el);
380 }
381 
382 /* queuelock must be held */
383 static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)
384 {
385 	struct sec_request_el *el, *temp;
386 	int ret = 0;
387 
388 	mutex_lock(&sec_req->lock);
389 	list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
390 		/*
391 		 * Add to hardware queue only under following circumstances
392 		 * 1) Software and hardware queue empty so no chain dependencies
393 		 * 2) No dependencies as new IV - (check software queue empty
394 		 *    to maintain order)
395 		 * 3) No dependencies because the mode does no chaining.
396 		 *
397 		 * In other cases first insert onto the software queue which
398 		 * is then emptied as requests complete
399 		 */
400 		if (!queue->havesoftqueue ||
401 		    (kfifo_is_empty(&queue->softqueue) &&
402 		     sec_queue_empty(queue))) {
403 			ret = sec_queue_send(queue, &el->req, sec_req);
404 			if (ret == -EAGAIN) {
405 				/* Wait unti we can send then try again */
406 				/* DEAD if here - should not happen */
407 				ret = -EBUSY;
408 				goto err_unlock;
409 			}
410 		} else {
411 			kfifo_put(&queue->softqueue, el);
412 		}
413 	}
414 err_unlock:
415 	mutex_unlock(&sec_req->lock);
416 
417 	return ret;
418 }
419 
420 static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
421 				      struct crypto_async_request *req_base)
422 {
423 	struct skcipher_request *skreq = container_of(req_base,
424 						      struct skcipher_request,
425 						      base);
426 	struct sec_request *sec_req = skcipher_request_ctx(skreq);
427 	struct sec_request *backlog_req;
428 	struct sec_request_el *sec_req_el, *nextrequest;
429 	struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;
430 	struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
431 	struct device *dev = ctx->queue->dev_info->dev;
432 	int icv_or_skey_en, ret;
433 	bool done;
434 
435 	sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,
436 				      head);
437 	icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
438 		SEC_BD_W0_ICV_OR_SKEY_EN_S;
439 	if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
440 		dev_err(dev, "Got an invalid answer %lu %d\n",
441 			sec_resp->w1 & SEC_BD_W1_BD_INVALID,
442 			icv_or_skey_en);
443 		sec_req->err = -EINVAL;
444 		/*
445 		 * We need to muddle on to avoid getting stuck with elements
446 		 * on the queue. Error will be reported so requester so
447 		 * it should be able to handle appropriately.
448 		 */
449 	}
450 
451 	mutex_lock(&ctx->queue->queuelock);
452 	/* Put the IV in place for chained cases */
453 	switch (ctx->cipher_alg) {
454 	case SEC_C_AES_CBC_128:
455 	case SEC_C_AES_CBC_192:
456 	case SEC_C_AES_CBC_256:
457 		if (sec_req_el->req.w0 & SEC_BD_W0_DE)
458 			sg_pcopy_to_buffer(sec_req_el->sgl_out,
459 					   sg_nents(sec_req_el->sgl_out),
460 					   skreq->iv,
461 					   crypto_skcipher_ivsize(atfm),
462 					   sec_req_el->el_length -
463 					   crypto_skcipher_ivsize(atfm));
464 		else
465 			sg_pcopy_to_buffer(sec_req_el->sgl_in,
466 					   sg_nents(sec_req_el->sgl_in),
467 					   skreq->iv,
468 					   crypto_skcipher_ivsize(atfm),
469 					   sec_req_el->el_length -
470 					   crypto_skcipher_ivsize(atfm));
471 		/* No need to sync to the device as coherent DMA */
472 		break;
473 	case SEC_C_AES_CTR_128:
474 	case SEC_C_AES_CTR_192:
475 	case SEC_C_AES_CTR_256:
476 		crypto_inc(skreq->iv, 16);
477 		break;
478 	default:
479 		/* Do not update */
480 		break;
481 	}
482 
483 	if (ctx->queue->havesoftqueue &&
484 	    !kfifo_is_empty(&ctx->queue->softqueue) &&
485 	    sec_queue_empty(ctx->queue)) {
486 		ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
487 		if (ret <= 0)
488 			dev_err(dev,
489 				"Error getting next element from kfifo %d\n",
490 				ret);
491 		else
492 			/* We know there is space so this cannot fail */
493 			sec_queue_send(ctx->queue, &nextrequest->req,
494 				       nextrequest->sec_req);
495 	} else if (!list_empty(&ctx->backlog)) {
496 		/* Need to verify there is room first */
497 		backlog_req = list_first_entry(&ctx->backlog,
498 					       typeof(*backlog_req),
499 					       backlog_head);
500 		if (sec_queue_can_enqueue(ctx->queue,
501 		    backlog_req->num_elements) ||
502 		    (ctx->queue->havesoftqueue &&
503 		     kfifo_avail(&ctx->queue->softqueue) >
504 		     backlog_req->num_elements)) {
505 			sec_send_request(backlog_req, ctx->queue);
506 			backlog_req->req_base->complete(backlog_req->req_base,
507 							-EINPROGRESS);
508 			list_del(&backlog_req->backlog_head);
509 		}
510 	}
511 	mutex_unlock(&ctx->queue->queuelock);
512 
513 	mutex_lock(&sec_req->lock);
514 	list_del(&sec_req_el->head);
515 	mutex_unlock(&sec_req->lock);
516 	sec_alg_free_el(sec_req_el, ctx->queue->dev_info);
517 
518 	/*
519 	 * Request is done.
520 	 * The dance is needed as the lock is freed in the completion
521 	 */
522 	mutex_lock(&sec_req->lock);
523 	done = list_empty(&sec_req->elements);
524 	mutex_unlock(&sec_req->lock);
525 	if (done) {
526 		if (crypto_skcipher_ivsize(atfm)) {
527 			dma_unmap_single(dev, sec_req->dma_iv,
528 					 crypto_skcipher_ivsize(atfm),
529 					 DMA_TO_DEVICE);
530 		}
531 		dma_unmap_sg(dev, skreq->src, sec_req->len_in,
532 			     DMA_BIDIRECTIONAL);
533 		if (skreq->src != skreq->dst)
534 			dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
535 				     DMA_BIDIRECTIONAL);
536 		skreq->base.complete(&skreq->base, sec_req->err);
537 	}
538 }
539 
540 void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
541 {
542 	struct sec_request *sec_req = shadow;
543 
544 	sec_req->cb(resp, sec_req->req_base);
545 }
546 
547 static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
548 					      int *steps)
549 {
550 	size_t *sizes;
551 	int i;
552 
553 	/* Split into suitable sized blocks */
554 	*steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
555 	sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
556 	if (!sizes)
557 		return -ENOMEM;
558 
559 	for (i = 0; i < *steps - 1; i++)
560 		sizes[i] = SEC_REQ_LIMIT;
561 	sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1);
562 	*split_sizes = sizes;
563 
564 	return 0;
565 }
566 
567 static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
568 				int steps, struct scatterlist ***splits,
569 				int **splits_nents,
570 				int sgl_len_in,
571 				struct device *dev)
572 {
573 	int ret, count;
574 
575 	count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
576 	if (!count)
577 		return -EINVAL;
578 
579 	*splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
580 	if (!*splits) {
581 		ret = -ENOMEM;
582 		goto err_unmap_sg;
583 	}
584 	*splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
585 	if (!*splits_nents) {
586 		ret = -ENOMEM;
587 		goto err_free_splits;
588 	}
589 
590 	/* output the scatter list before and after this */
591 	ret = sg_split(sgl, count, 0, steps, split_sizes,
592 		       *splits, *splits_nents, GFP_KERNEL);
593 	if (ret) {
594 		ret = -ENOMEM;
595 		goto err_free_splits_nents;
596 	}
597 
598 	return 0;
599 
600 err_free_splits_nents:
601 	kfree(*splits_nents);
602 err_free_splits:
603 	kfree(*splits);
604 err_unmap_sg:
605 	dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
606 
607 	return ret;
608 }
609 
610 /*
611  * Reverses the sec_map_and_split_sg call for messages not yet added to
612  * the queues.
613  */
614 static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps,
615 				struct scatterlist **splits, int *splits_nents,
616 				int sgl_len_in, struct device *dev)
617 {
618 	int i;
619 
620 	for (i = 0; i < steps; i++)
621 		kfree(splits[i]);
622 	kfree(splits_nents);
623 	kfree(splits);
624 
625 	dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
626 }
627 
628 static struct sec_request_el
629 *sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt,
630 			   int el_size, bool different_dest,
631 			   struct scatterlist *sgl_in, int n_ents_in,
632 			   struct scatterlist *sgl_out, int n_ents_out,
633 			   struct sec_dev_info *info)
634 {
635 	struct sec_request_el *el;
636 	struct sec_bd_info *req;
637 	int ret;
638 
639 	el = kzalloc(sizeof(*el), GFP_KERNEL);
640 	if (!el)
641 		return ERR_PTR(-ENOMEM);
642 	el->el_length = el_size;
643 	req = &el->req;
644 	memcpy(req, template, sizeof(*req));
645 
646 	req->w0 &= ~SEC_BD_W0_CIPHER_M;
647 	if (encrypt)
648 		req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;
649 	else
650 		req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;
651 
652 	req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;
653 	req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &
654 		SEC_BD_W0_C_GRAN_SIZE_19_16_M;
655 
656 	req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;
657 	req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &
658 		SEC_BD_W0_C_GRAN_SIZE_21_20_M;
659 
660 	/* Writing whole u32 so no need to take care of masking */
661 	req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |
662 		((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) &
663 		 SEC_BD_W2_C_GRAN_SIZE_15_0_M);
664 
665 	req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;
666 	req->w1 |= SEC_BD_W1_ADDR_TYPE;
667 
668 	el->sgl_in = sgl_in;
669 
670 	ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
671 					n_ents_in, info);
672 	if (ret)
673 		goto err_free_el;
674 
675 	req->data_addr_lo = lower_32_bits(el->dma_in);
676 	req->data_addr_hi = upper_32_bits(el->dma_in);
677 
678 	if (different_dest) {
679 		el->sgl_out = sgl_out;
680 		ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
681 						el->sgl_out,
682 						n_ents_out, info);
683 		if (ret)
684 			goto err_free_hw_sgl_in;
685 
686 		req->w0 |= SEC_BD_W0_DE;
687 		req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);
688 		req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);
689 
690 	} else {
691 		req->w0 &= ~SEC_BD_W0_DE;
692 		req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);
693 		req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);
694 	}
695 
696 	return el;
697 
698 err_free_hw_sgl_in:
699 	sec_free_hw_sgl(el->in, el->dma_in, info);
700 err_free_el:
701 	kfree(el);
702 
703 	return ERR_PTR(ret);
704 }
705 
706 static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
707 				   bool encrypt)
708 {
709 	struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
710 	struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
711 	struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
712 	struct sec_queue *queue = ctx->queue;
713 	struct sec_request *sec_req = skcipher_request_ctx(skreq);
714 	struct sec_dev_info *info = queue->dev_info;
715 	int i, ret, steps;
716 	size_t *split_sizes;
717 	struct scatterlist **splits_in;
718 	struct scatterlist **splits_out = NULL;
719 	int *splits_in_nents;
720 	int *splits_out_nents = NULL;
721 	struct sec_request_el *el, *temp;
722 	bool split = skreq->src != skreq->dst;
723 
724 	mutex_init(&sec_req->lock);
725 	sec_req->req_base = &skreq->base;
726 	sec_req->err = 0;
727 	/* SGL mapping out here to allow us to break it up as necessary */
728 	sec_req->len_in = sg_nents(skreq->src);
729 
730 	ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
731 						 &steps);
732 	if (ret)
733 		return ret;
734 	sec_req->num_elements = steps;
735 	ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
736 				   &splits_in_nents, sec_req->len_in,
737 				   info->dev);
738 	if (ret)
739 		goto err_free_split_sizes;
740 
741 	if (split) {
742 		sec_req->len_out = sg_nents(skreq->dst);
743 		ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
744 					   &splits_out, &splits_out_nents,
745 					   sec_req->len_out, info->dev);
746 		if (ret)
747 			goto err_unmap_in_sg;
748 	}
749 	/* Shared info stored in seq_req - applies to all BDs */
750 	sec_req->tfm_ctx = ctx;
751 	sec_req->cb = sec_skcipher_alg_callback;
752 	INIT_LIST_HEAD(&sec_req->elements);
753 
754 	/*
755 	 * Future optimization.
756 	 * In the chaining case we can't use a dma pool bounce buffer
757 	 * but in the case where we know there is no chaining we can
758 	 */
759 	if (crypto_skcipher_ivsize(atfm)) {
760 		sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,
761 						 crypto_skcipher_ivsize(atfm),
762 						 DMA_TO_DEVICE);
763 		if (dma_mapping_error(info->dev, sec_req->dma_iv)) {
764 			ret = -ENOMEM;
765 			goto err_unmap_out_sg;
766 		}
767 	}
768 
769 	/* Set them all up then queue - cleaner error handling. */
770 	for (i = 0; i < steps; i++) {
771 		el = sec_alg_alloc_and_fill_el(&ctx->req_template,
772 					       encrypt ? 1 : 0,
773 					       split_sizes[i],
774 					       skreq->src != skreq->dst,
775 					       splits_in[i], splits_in_nents[i],
776 					       split ? splits_out[i] : NULL,
777 					       split ? splits_out_nents[i] : 0,
778 					       info);
779 		if (IS_ERR(el)) {
780 			ret = PTR_ERR(el);
781 			goto err_free_elements;
782 		}
783 		el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
784 		el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
785 		el->sec_req = sec_req;
786 		list_add_tail(&el->head, &sec_req->elements);
787 	}
788 
789 	/*
790 	 * Only attempt to queue if the whole lot can fit in the queue -
791 	 * we can't successfully cleanup after a partial queing so this
792 	 * must succeed or fail atomically.
793 	 *
794 	 * Big hammer test of both software and hardware queues - could be
795 	 * more refined but this is unlikely to happen so no need.
796 	 */
797 
798 	/* Grab a big lock for a long time to avoid concurrency issues */
799 	mutex_lock(&queue->queuelock);
800 
801 	/*
802 	 * Can go on to queue if we have space in either:
803 	 * 1) The hardware queue and no software queue
804 	 * 2) The software queue
805 	 * AND there is nothing in the backlog.  If there is backlog we
806 	 * have to only queue to the backlog queue and return busy.
807 	 */
808 	if ((!sec_queue_can_enqueue(queue, steps) &&
809 	     (!queue->havesoftqueue ||
810 	      kfifo_avail(&queue->softqueue) > steps)) ||
811 	    !list_empty(&ctx->backlog)) {
812 		ret = -EBUSY;
813 		if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
814 			list_add_tail(&sec_req->backlog_head, &ctx->backlog);
815 			mutex_unlock(&queue->queuelock);
816 			goto out;
817 		}
818 
819 		mutex_unlock(&queue->queuelock);
820 		goto err_free_elements;
821 	}
822 	ret = sec_send_request(sec_req, queue);
823 	mutex_unlock(&queue->queuelock);
824 	if (ret)
825 		goto err_free_elements;
826 
827 	ret = -EINPROGRESS;
828 out:
829 	/* Cleanup - all elements in pointer arrays have been copied */
830 	kfree(splits_in_nents);
831 	kfree(splits_in);
832 	kfree(splits_out_nents);
833 	kfree(splits_out);
834 	kfree(split_sizes);
835 	return ret;
836 
837 err_free_elements:
838 	list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
839 		list_del(&el->head);
840 		sec_alg_free_el(el, info);
841 	}
842 	if (crypto_skcipher_ivsize(atfm))
843 		dma_unmap_single(info->dev, sec_req->dma_iv,
844 				 crypto_skcipher_ivsize(atfm),
845 				 DMA_BIDIRECTIONAL);
846 err_unmap_out_sg:
847 	if (split)
848 		sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
849 				    splits_out_nents, sec_req->len_out,
850 				    info->dev);
851 err_unmap_in_sg:
852 	sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,
853 			    sec_req->len_in, info->dev);
854 err_free_split_sizes:
855 	kfree(split_sizes);
856 
857 	return ret;
858 }
859 
860 static int sec_alg_skcipher_encrypt(struct skcipher_request *req)
861 {
862 	return sec_alg_skcipher_crypto(req, true);
863 }
864 
865 static int sec_alg_skcipher_decrypt(struct skcipher_request *req)
866 {
867 	return sec_alg_skcipher_crypto(req, false);
868 }
869 
870 static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
871 {
872 	struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
873 
874 	mutex_init(&ctx->lock);
875 	INIT_LIST_HEAD(&ctx->backlog);
876 	crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request));
877 
878 	ctx->queue = sec_queue_alloc_start_safe();
879 	if (IS_ERR(ctx->queue))
880 		return PTR_ERR(ctx->queue);
881 
882 	mutex_init(&ctx->queue->queuelock);
883 	ctx->queue->havesoftqueue = false;
884 
885 	return 0;
886 }
887 
888 static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)
889 {
890 	struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
891 	struct device *dev = ctx->queue->dev_info->dev;
892 
893 	if (ctx->key) {
894 		memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY);
895 		dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key,
896 				  ctx->pkey);
897 	}
898 	sec_queue_stop_release(ctx->queue);
899 }
900 
901 static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm)
902 {
903 	struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
904 	int ret;
905 
906 	ret = sec_alg_skcipher_init(tfm);
907 	if (ret)
908 		return ret;
909 
910 	INIT_KFIFO(ctx->queue->softqueue);
911 	ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);
912 	if (ret) {
913 		sec_alg_skcipher_exit(tfm);
914 		return ret;
915 	}
916 	ctx->queue->havesoftqueue = true;
917 
918 	return 0;
919 }
920 
921 static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm)
922 {
923 	struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
924 
925 	kfifo_free(&ctx->queue->softqueue);
926 	sec_alg_skcipher_exit(tfm);
927 }
928 
929 static struct skcipher_alg sec_algs[] = {
930 	{
931 		.base = {
932 			.cra_name = "ecb(aes)",
933 			.cra_driver_name = "hisi_sec_aes_ecb",
934 			.cra_priority = 4001,
935 			.cra_flags = CRYPTO_ALG_ASYNC,
936 			.cra_blocksize = AES_BLOCK_SIZE,
937 			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
938 			.cra_alignmask = 0,
939 			.cra_module = THIS_MODULE,
940 		},
941 		.init = sec_alg_skcipher_init,
942 		.exit = sec_alg_skcipher_exit,
943 		.setkey = sec_alg_skcipher_setkey_aes_ecb,
944 		.decrypt = sec_alg_skcipher_decrypt,
945 		.encrypt = sec_alg_skcipher_encrypt,
946 		.min_keysize = AES_MIN_KEY_SIZE,
947 		.max_keysize = AES_MAX_KEY_SIZE,
948 		.ivsize = 0,
949 	}, {
950 		.base = {
951 			.cra_name = "cbc(aes)",
952 			.cra_driver_name = "hisi_sec_aes_cbc",
953 			.cra_priority = 4001,
954 			.cra_flags = CRYPTO_ALG_ASYNC,
955 			.cra_blocksize = AES_BLOCK_SIZE,
956 			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
957 			.cra_alignmask = 0,
958 			.cra_module = THIS_MODULE,
959 		},
960 		.init = sec_alg_skcipher_init_with_queue,
961 		.exit = sec_alg_skcipher_exit_with_queue,
962 		.setkey = sec_alg_skcipher_setkey_aes_cbc,
963 		.decrypt = sec_alg_skcipher_decrypt,
964 		.encrypt = sec_alg_skcipher_encrypt,
965 		.min_keysize = AES_MIN_KEY_SIZE,
966 		.max_keysize = AES_MAX_KEY_SIZE,
967 		.ivsize = AES_BLOCK_SIZE,
968 	}, {
969 		.base = {
970 			.cra_name = "ctr(aes)",
971 			.cra_driver_name = "hisi_sec_aes_ctr",
972 			.cra_priority = 4001,
973 			.cra_flags = CRYPTO_ALG_ASYNC,
974 			.cra_blocksize = AES_BLOCK_SIZE,
975 			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
976 			.cra_alignmask = 0,
977 			.cra_module = THIS_MODULE,
978 		},
979 		.init = sec_alg_skcipher_init_with_queue,
980 		.exit = sec_alg_skcipher_exit_with_queue,
981 		.setkey = sec_alg_skcipher_setkey_aes_ctr,
982 		.decrypt = sec_alg_skcipher_decrypt,
983 		.encrypt = sec_alg_skcipher_encrypt,
984 		.min_keysize = AES_MIN_KEY_SIZE,
985 		.max_keysize = AES_MAX_KEY_SIZE,
986 		.ivsize = AES_BLOCK_SIZE,
987 	}, {
988 		.base = {
989 			.cra_name = "xts(aes)",
990 			.cra_driver_name = "hisi_sec_aes_xts",
991 			.cra_priority = 4001,
992 			.cra_flags = CRYPTO_ALG_ASYNC,
993 			.cra_blocksize = AES_BLOCK_SIZE,
994 			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
995 			.cra_alignmask = 0,
996 			.cra_module = THIS_MODULE,
997 		},
998 		.init = sec_alg_skcipher_init,
999 		.exit = sec_alg_skcipher_exit,
1000 		.setkey = sec_alg_skcipher_setkey_aes_xts,
1001 		.decrypt = sec_alg_skcipher_decrypt,
1002 		.encrypt = sec_alg_skcipher_encrypt,
1003 		.min_keysize = 2 * AES_MIN_KEY_SIZE,
1004 		.max_keysize = 2 * AES_MAX_KEY_SIZE,
1005 		.ivsize = AES_BLOCK_SIZE,
1006 	}, {
1007 	/* Unable to find any test vectors so untested */
1008 		.base = {
1009 			.cra_name = "ecb(des)",
1010 			.cra_driver_name = "hisi_sec_des_ecb",
1011 			.cra_priority = 4001,
1012 			.cra_flags = CRYPTO_ALG_ASYNC,
1013 			.cra_blocksize = DES_BLOCK_SIZE,
1014 			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1015 			.cra_alignmask = 0,
1016 			.cra_module = THIS_MODULE,
1017 		},
1018 		.init = sec_alg_skcipher_init,
1019 		.exit = sec_alg_skcipher_exit,
1020 		.setkey = sec_alg_skcipher_setkey_des_ecb,
1021 		.decrypt = sec_alg_skcipher_decrypt,
1022 		.encrypt = sec_alg_skcipher_encrypt,
1023 		.min_keysize = DES_KEY_SIZE,
1024 		.max_keysize = DES_KEY_SIZE,
1025 		.ivsize = 0,
1026 	}, {
1027 		.base = {
1028 			.cra_name = "cbc(des)",
1029 			.cra_driver_name = "hisi_sec_des_cbc",
1030 			.cra_priority = 4001,
1031 			.cra_flags = CRYPTO_ALG_ASYNC,
1032 			.cra_blocksize = DES_BLOCK_SIZE,
1033 			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1034 			.cra_alignmask = 0,
1035 			.cra_module = THIS_MODULE,
1036 		},
1037 		.init = sec_alg_skcipher_init_with_queue,
1038 		.exit = sec_alg_skcipher_exit_with_queue,
1039 		.setkey = sec_alg_skcipher_setkey_des_cbc,
1040 		.decrypt = sec_alg_skcipher_decrypt,
1041 		.encrypt = sec_alg_skcipher_encrypt,
1042 		.min_keysize = DES_KEY_SIZE,
1043 		.max_keysize = DES_KEY_SIZE,
1044 		.ivsize = DES_BLOCK_SIZE,
1045 	}, {
1046 		.base = {
1047 			.cra_name = "cbc(des3_ede)",
1048 			.cra_driver_name = "hisi_sec_3des_cbc",
1049 			.cra_priority = 4001,
1050 			.cra_flags = CRYPTO_ALG_ASYNC,
1051 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1052 			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1053 			.cra_alignmask = 0,
1054 			.cra_module = THIS_MODULE,
1055 		},
1056 		.init = sec_alg_skcipher_init_with_queue,
1057 		.exit = sec_alg_skcipher_exit_with_queue,
1058 		.setkey = sec_alg_skcipher_setkey_3des_cbc,
1059 		.decrypt = sec_alg_skcipher_decrypt,
1060 		.encrypt = sec_alg_skcipher_encrypt,
1061 		.min_keysize = DES3_EDE_KEY_SIZE,
1062 		.max_keysize = DES3_EDE_KEY_SIZE,
1063 		.ivsize = DES3_EDE_BLOCK_SIZE,
1064 	}, {
1065 		.base = {
1066 			.cra_name = "ecb(des3_ede)",
1067 			.cra_driver_name = "hisi_sec_3des_ecb",
1068 			.cra_priority = 4001,
1069 			.cra_flags = CRYPTO_ALG_ASYNC,
1070 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1071 			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1072 			.cra_alignmask = 0,
1073 			.cra_module = THIS_MODULE,
1074 		},
1075 		.init = sec_alg_skcipher_init,
1076 		.exit = sec_alg_skcipher_exit,
1077 		.setkey = sec_alg_skcipher_setkey_3des_ecb,
1078 		.decrypt = sec_alg_skcipher_decrypt,
1079 		.encrypt = sec_alg_skcipher_encrypt,
1080 		.min_keysize = DES3_EDE_KEY_SIZE,
1081 		.max_keysize = DES3_EDE_KEY_SIZE,
1082 		.ivsize = 0,
1083 	}
1084 };
1085 
1086 int sec_algs_register(void)
1087 {
1088 	int ret = 0;
1089 
1090 	mutex_lock(&algs_lock);
1091 	if (++active_devs != 1)
1092 		goto unlock;
1093 
1094 	ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
1095 	if (ret)
1096 		--active_devs;
1097 unlock:
1098 	mutex_unlock(&algs_lock);
1099 
1100 	return ret;
1101 }
1102 
1103 void sec_algs_unregister(void)
1104 {
1105 	mutex_lock(&algs_lock);
1106 	if (--active_devs != 0)
1107 		goto unlock;
1108 	crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
1109 
1110 unlock:
1111 	mutex_unlock(&algs_lock);
1112 }
1113