1 /*
2  * Crypto acceleration support for Rockchip RK3288
3  *
4  * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
5  *
6  * Author: Zain Wang <zain.wang@rock-chips.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
13  */
14 #include "rk3288_crypto.h"
15 
16 /*
17  * IC can not process zero message hash,
18  * so we put the fixed hash out when met zero message.
19  */
20 
21 static int zero_message_process(struct ahash_request *req)
22 {
23 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
24 	int rk_digest_size = crypto_ahash_digestsize(tfm);
25 
26 	switch (rk_digest_size) {
27 	case SHA1_DIGEST_SIZE:
28 		memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
29 		break;
30 	case SHA256_DIGEST_SIZE:
31 		memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
32 		break;
33 	case MD5_DIGEST_SIZE:
34 		memcpy(req->result, md5_zero_message_hash, rk_digest_size);
35 		break;
36 	default:
37 		return -EINVAL;
38 	}
39 
40 	return 0;
41 }
42 
43 static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
44 {
45 	if (base->complete)
46 		base->complete(base, err);
47 }
48 
49 static void rk_ahash_reg_init(struct rk_crypto_info *dev)
50 {
51 	struct ahash_request *req = ahash_request_cast(dev->async_req);
52 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
53 	int reg_status = 0;
54 
55 	reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
56 		     RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
57 	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
58 
59 	reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL);
60 	reg_status &= (~RK_CRYPTO_HASH_FLUSH);
61 	reg_status |= _SBF(0xffff, 16);
62 	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
63 
64 	memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32);
65 
66 	CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA |
67 					    RK_CRYPTO_HRDMA_DONE_ENA);
68 
69 	CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
70 					    RK_CRYPTO_HRDMA_DONE_INT);
71 
72 	CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, rctx->mode |
73 					       RK_CRYPTO_HASH_SWAP_DO);
74 
75 	CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
76 					  RK_CRYPTO_BYTESWAP_BRFIFO |
77 					  RK_CRYPTO_BYTESWAP_BTFIFO);
78 
79 	CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total);
80 }
81 
82 static int rk_ahash_init(struct ahash_request *req)
83 {
84 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
85 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
86 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
87 
88 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
89 	rctx->fallback_req.base.flags = req->base.flags &
90 					CRYPTO_TFM_REQ_MAY_SLEEP;
91 
92 	return crypto_ahash_init(&rctx->fallback_req);
93 }
94 
95 static int rk_ahash_update(struct ahash_request *req)
96 {
97 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
98 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
99 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
100 
101 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
102 	rctx->fallback_req.base.flags = req->base.flags &
103 					CRYPTO_TFM_REQ_MAY_SLEEP;
104 	rctx->fallback_req.nbytes = req->nbytes;
105 	rctx->fallback_req.src = req->src;
106 
107 	return crypto_ahash_update(&rctx->fallback_req);
108 }
109 
110 static int rk_ahash_final(struct ahash_request *req)
111 {
112 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
113 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
114 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
115 
116 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
117 	rctx->fallback_req.base.flags = req->base.flags &
118 					CRYPTO_TFM_REQ_MAY_SLEEP;
119 	rctx->fallback_req.result = req->result;
120 
121 	return crypto_ahash_final(&rctx->fallback_req);
122 }
123 
124 static int rk_ahash_finup(struct ahash_request *req)
125 {
126 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
127 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
128 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
129 
130 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
131 	rctx->fallback_req.base.flags = req->base.flags &
132 					CRYPTO_TFM_REQ_MAY_SLEEP;
133 
134 	rctx->fallback_req.nbytes = req->nbytes;
135 	rctx->fallback_req.src = req->src;
136 	rctx->fallback_req.result = req->result;
137 
138 	return crypto_ahash_finup(&rctx->fallback_req);
139 }
140 
141 static int rk_ahash_import(struct ahash_request *req, const void *in)
142 {
143 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
144 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
145 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
146 
147 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
148 	rctx->fallback_req.base.flags = req->base.flags &
149 					CRYPTO_TFM_REQ_MAY_SLEEP;
150 
151 	return crypto_ahash_import(&rctx->fallback_req, in);
152 }
153 
154 static int rk_ahash_export(struct ahash_request *req, void *out)
155 {
156 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
157 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
158 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
159 
160 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
161 	rctx->fallback_req.base.flags = req->base.flags &
162 					CRYPTO_TFM_REQ_MAY_SLEEP;
163 
164 	return crypto_ahash_export(&rctx->fallback_req, out);
165 }
166 
167 static int rk_ahash_digest(struct ahash_request *req)
168 {
169 	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
170 	struct rk_crypto_info *dev = tctx->dev;
171 
172 	if (!req->nbytes)
173 		return zero_message_process(req);
174 	else
175 		return dev->enqueue(dev, &req->base);
176 }
177 
178 static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
179 {
180 	CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in);
181 	CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4);
182 	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
183 					  (RK_CRYPTO_HASH_START << 16));
184 }
185 
186 static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
187 {
188 	int err;
189 
190 	err = dev->load_data(dev, dev->sg_src, NULL);
191 	if (!err)
192 		crypto_ahash_dma_start(dev);
193 	return err;
194 }
195 
196 static int rk_ahash_start(struct rk_crypto_info *dev)
197 {
198 	struct ahash_request *req = ahash_request_cast(dev->async_req);
199 	struct crypto_ahash *tfm;
200 	struct rk_ahash_rctx *rctx;
201 
202 	dev->total = req->nbytes;
203 	dev->left_bytes = req->nbytes;
204 	dev->aligned = 0;
205 	dev->align_size = 4;
206 	dev->sg_dst = NULL;
207 	dev->sg_src = req->src;
208 	dev->first = req->src;
209 	dev->src_nents = sg_nents(req->src);
210 	rctx = ahash_request_ctx(req);
211 	rctx->mode = 0;
212 
213 	tfm = crypto_ahash_reqtfm(req);
214 	switch (crypto_ahash_digestsize(tfm)) {
215 	case SHA1_DIGEST_SIZE:
216 		rctx->mode = RK_CRYPTO_HASH_SHA1;
217 		break;
218 	case SHA256_DIGEST_SIZE:
219 		rctx->mode = RK_CRYPTO_HASH_SHA256;
220 		break;
221 	case MD5_DIGEST_SIZE:
222 		rctx->mode = RK_CRYPTO_HASH_MD5;
223 		break;
224 	default:
225 		return -EINVAL;
226 	}
227 
228 	rk_ahash_reg_init(dev);
229 	return rk_ahash_set_data_start(dev);
230 }
231 
232 static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
233 {
234 	int err = 0;
235 	struct ahash_request *req = ahash_request_cast(dev->async_req);
236 	struct crypto_ahash *tfm;
237 
238 	dev->unload_data(dev);
239 	if (dev->left_bytes) {
240 		if (dev->aligned) {
241 			if (sg_is_last(dev->sg_src)) {
242 				dev_warn(dev->dev, "[%s:%d], Lack of data\n",
243 					 __func__, __LINE__);
244 				err = -ENOMEM;
245 				goto out_rx;
246 			}
247 			dev->sg_src = sg_next(dev->sg_src);
248 		}
249 		err = rk_ahash_set_data_start(dev);
250 	} else {
251 		/*
252 		 * it will take some time to process date after last dma
253 		 * transmission.
254 		 *
255 		 * waiting time is relative with the last date len,
256 		 * so cannot set a fixed time here.
257 		 * 10us makes system not call here frequently wasting
258 		 * efficiency, and make it response quickly when dma
259 		 * complete.
260 		 */
261 		while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
262 			udelay(10);
263 
264 		tfm = crypto_ahash_reqtfm(req);
265 		memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
266 			      crypto_ahash_digestsize(tfm));
267 		dev->complete(dev->async_req, 0);
268 		tasklet_schedule(&dev->queue_task);
269 	}
270 
271 out_rx:
272 	return err;
273 }
274 
275 static int rk_cra_hash_init(struct crypto_tfm *tfm)
276 {
277 	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
278 	struct rk_crypto_tmp *algt;
279 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
280 
281 	const char *alg_name = crypto_tfm_alg_name(tfm);
282 
283 	algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
284 
285 	tctx->dev = algt->dev;
286 	tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
287 	if (!tctx->dev->addr_vir) {
288 		dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
289 		return -ENOMEM;
290 	}
291 	tctx->dev->start = rk_ahash_start;
292 	tctx->dev->update = rk_ahash_crypto_rx;
293 	tctx->dev->complete = rk_ahash_crypto_complete;
294 
295 	/* for fallback */
296 	tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
297 					       CRYPTO_ALG_NEED_FALLBACK);
298 	if (IS_ERR(tctx->fallback_tfm)) {
299 		dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
300 		return PTR_ERR(tctx->fallback_tfm);
301 	}
302 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
303 				 sizeof(struct rk_ahash_rctx) +
304 				 crypto_ahash_reqsize(tctx->fallback_tfm));
305 
306 	return tctx->dev->enable_clk(tctx->dev);
307 }
308 
309 static void rk_cra_hash_exit(struct crypto_tfm *tfm)
310 {
311 	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
312 
313 	free_page((unsigned long)tctx->dev->addr_vir);
314 	return tctx->dev->disable_clk(tctx->dev);
315 }
316 
317 struct rk_crypto_tmp rk_ahash_sha1 = {
318 	.type = ALG_TYPE_HASH,
319 	.alg.hash = {
320 		.init = rk_ahash_init,
321 		.update = rk_ahash_update,
322 		.final = rk_ahash_final,
323 		.finup = rk_ahash_finup,
324 		.export = rk_ahash_export,
325 		.import = rk_ahash_import,
326 		.digest = rk_ahash_digest,
327 		.halg = {
328 			 .digestsize = SHA1_DIGEST_SIZE,
329 			 .statesize = sizeof(struct sha1_state),
330 			 .base = {
331 				  .cra_name = "sha1",
332 				  .cra_driver_name = "rk-sha1",
333 				  .cra_priority = 300,
334 				  .cra_flags = CRYPTO_ALG_ASYNC |
335 					       CRYPTO_ALG_NEED_FALLBACK,
336 				  .cra_blocksize = SHA1_BLOCK_SIZE,
337 				  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
338 				  .cra_alignmask = 3,
339 				  .cra_init = rk_cra_hash_init,
340 				  .cra_exit = rk_cra_hash_exit,
341 				  .cra_module = THIS_MODULE,
342 				  }
343 			 }
344 	}
345 };
346 
347 struct rk_crypto_tmp rk_ahash_sha256 = {
348 	.type = ALG_TYPE_HASH,
349 	.alg.hash = {
350 		.init = rk_ahash_init,
351 		.update = rk_ahash_update,
352 		.final = rk_ahash_final,
353 		.finup = rk_ahash_finup,
354 		.export = rk_ahash_export,
355 		.import = rk_ahash_import,
356 		.digest = rk_ahash_digest,
357 		.halg = {
358 			 .digestsize = SHA256_DIGEST_SIZE,
359 			 .statesize = sizeof(struct sha256_state),
360 			 .base = {
361 				  .cra_name = "sha256",
362 				  .cra_driver_name = "rk-sha256",
363 				  .cra_priority = 300,
364 				  .cra_flags = CRYPTO_ALG_ASYNC |
365 					       CRYPTO_ALG_NEED_FALLBACK,
366 				  .cra_blocksize = SHA256_BLOCK_SIZE,
367 				  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
368 				  .cra_alignmask = 3,
369 				  .cra_init = rk_cra_hash_init,
370 				  .cra_exit = rk_cra_hash_exit,
371 				  .cra_module = THIS_MODULE,
372 				  }
373 			 }
374 	}
375 };
376 
377 struct rk_crypto_tmp rk_ahash_md5 = {
378 	.type = ALG_TYPE_HASH,
379 	.alg.hash = {
380 		.init = rk_ahash_init,
381 		.update = rk_ahash_update,
382 		.final = rk_ahash_final,
383 		.finup = rk_ahash_finup,
384 		.export = rk_ahash_export,
385 		.import = rk_ahash_import,
386 		.digest = rk_ahash_digest,
387 		.halg = {
388 			 .digestsize = MD5_DIGEST_SIZE,
389 			 .statesize = sizeof(struct md5_state),
390 			 .base = {
391 				  .cra_name = "md5",
392 				  .cra_driver_name = "rk-md5",
393 				  .cra_priority = 300,
394 				  .cra_flags = CRYPTO_ALG_ASYNC |
395 					       CRYPTO_ALG_NEED_FALLBACK,
396 				  .cra_blocksize = SHA1_BLOCK_SIZE,
397 				  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
398 				  .cra_alignmask = 3,
399 				  .cra_init = rk_cra_hash_init,
400 				  .cra_exit = rk_cra_hash_exit,
401 				  .cra_module = THIS_MODULE,
402 				  }
403 			}
404 	}
405 };
406