1 /*
2  * Crypto acceleration support for Rockchip RK3288
3  *
4  * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
5  *
6  * Author: Zain Wang <zain.wang@rock-chips.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
13  */
14 #include "rk3288_crypto.h"
15 
16 /*
17  * IC can not process zero message hash,
18  * so we put the fixed hash out when met zero message.
19  */
20 
21 static int zero_message_process(struct ahash_request *req)
22 {
23 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
24 	int rk_digest_size = crypto_ahash_digestsize(tfm);
25 
26 	switch (rk_digest_size) {
27 	case SHA1_DIGEST_SIZE:
28 		memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
29 		break;
30 	case SHA256_DIGEST_SIZE:
31 		memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
32 		break;
33 	case MD5_DIGEST_SIZE:
34 		memcpy(req->result, md5_zero_message_hash, rk_digest_size);
35 		break;
36 	default:
37 		return -EINVAL;
38 	}
39 
40 	return 0;
41 }
42 
43 static void rk_ahash_crypto_complete(struct rk_crypto_info *dev, int err)
44 {
45 	if (dev->ahash_req->base.complete)
46 		dev->ahash_req->base.complete(&dev->ahash_req->base, err);
47 }
48 
49 static void rk_ahash_reg_init(struct rk_crypto_info *dev)
50 {
51 	int reg_status = 0;
52 
53 	reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
54 		     RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
55 	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
56 
57 	reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL);
58 	reg_status &= (~RK_CRYPTO_HASH_FLUSH);
59 	reg_status |= _SBF(0xffff, 16);
60 	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
61 
62 	memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32);
63 
64 	CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA |
65 					    RK_CRYPTO_HRDMA_DONE_ENA);
66 
67 	CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
68 					    RK_CRYPTO_HRDMA_DONE_INT);
69 
70 	CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, dev->mode |
71 					       RK_CRYPTO_HASH_SWAP_DO);
72 
73 	CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
74 					  RK_CRYPTO_BYTESWAP_BRFIFO |
75 					  RK_CRYPTO_BYTESWAP_BTFIFO);
76 
77 	CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total);
78 }
79 
80 static int rk_ahash_init(struct ahash_request *req)
81 {
82 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
83 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
84 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
85 
86 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
87 	rctx->fallback_req.base.flags = req->base.flags &
88 					CRYPTO_TFM_REQ_MAY_SLEEP;
89 
90 	return crypto_ahash_init(&rctx->fallback_req);
91 }
92 
93 static int rk_ahash_update(struct ahash_request *req)
94 {
95 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
96 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
97 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
98 
99 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
100 	rctx->fallback_req.base.flags = req->base.flags &
101 					CRYPTO_TFM_REQ_MAY_SLEEP;
102 	rctx->fallback_req.nbytes = req->nbytes;
103 	rctx->fallback_req.src = req->src;
104 
105 	return crypto_ahash_update(&rctx->fallback_req);
106 }
107 
108 static int rk_ahash_final(struct ahash_request *req)
109 {
110 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
111 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
112 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
113 
114 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
115 	rctx->fallback_req.base.flags = req->base.flags &
116 					CRYPTO_TFM_REQ_MAY_SLEEP;
117 	rctx->fallback_req.result = req->result;
118 
119 	return crypto_ahash_final(&rctx->fallback_req);
120 }
121 
122 static int rk_ahash_finup(struct ahash_request *req)
123 {
124 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
125 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
126 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
127 
128 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
129 	rctx->fallback_req.base.flags = req->base.flags &
130 					CRYPTO_TFM_REQ_MAY_SLEEP;
131 
132 	rctx->fallback_req.nbytes = req->nbytes;
133 	rctx->fallback_req.src = req->src;
134 	rctx->fallback_req.result = req->result;
135 
136 	return crypto_ahash_finup(&rctx->fallback_req);
137 }
138 
139 static int rk_ahash_import(struct ahash_request *req, const void *in)
140 {
141 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
142 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
143 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
144 
145 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
146 	rctx->fallback_req.base.flags = req->base.flags &
147 					CRYPTO_TFM_REQ_MAY_SLEEP;
148 
149 	return crypto_ahash_import(&rctx->fallback_req, in);
150 }
151 
152 static int rk_ahash_export(struct ahash_request *req, void *out)
153 {
154 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
155 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
156 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
157 
158 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
159 	rctx->fallback_req.base.flags = req->base.flags &
160 					CRYPTO_TFM_REQ_MAY_SLEEP;
161 
162 	return crypto_ahash_export(&rctx->fallback_req, out);
163 }
164 
165 static int rk_ahash_digest(struct ahash_request *req)
166 {
167 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
168 	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
169 	struct rk_crypto_info *dev = NULL;
170 	unsigned long flags;
171 	int ret;
172 
173 	if (!req->nbytes)
174 		return zero_message_process(req);
175 
176 	dev = tctx->dev;
177 	dev->total = req->nbytes;
178 	dev->left_bytes = req->nbytes;
179 	dev->aligned = 0;
180 	dev->mode = 0;
181 	dev->align_size = 4;
182 	dev->sg_dst = NULL;
183 	dev->sg_src = req->src;
184 	dev->first = req->src;
185 	dev->nents = sg_nents(req->src);
186 
187 	switch (crypto_ahash_digestsize(tfm)) {
188 	case SHA1_DIGEST_SIZE:
189 		dev->mode = RK_CRYPTO_HASH_SHA1;
190 		break;
191 	case SHA256_DIGEST_SIZE:
192 		dev->mode = RK_CRYPTO_HASH_SHA256;
193 		break;
194 	case MD5_DIGEST_SIZE:
195 		dev->mode = RK_CRYPTO_HASH_MD5;
196 		break;
197 	default:
198 		return -EINVAL;
199 	}
200 
201 	rk_ahash_reg_init(dev);
202 
203 	spin_lock_irqsave(&dev->lock, flags);
204 	ret = crypto_enqueue_request(&dev->queue, &req->base);
205 	spin_unlock_irqrestore(&dev->lock, flags);
206 
207 	tasklet_schedule(&dev->crypto_tasklet);
208 
209 	/*
210 	 * it will take some time to process date after last dma transmission.
211 	 *
212 	 * waiting time is relative with the last date len,
213 	 * so cannot set a fixed time here.
214 	 * 10-50 makes system not call here frequently wasting
215 	 * efficiency, and make it response quickly when dma
216 	 * complete.
217 	 */
218 	while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
219 		usleep_range(10, 50);
220 
221 	memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
222 		      crypto_ahash_digestsize(tfm));
223 
224 	return 0;
225 }
226 
227 static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
228 {
229 	CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in);
230 	CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4);
231 	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
232 					  (RK_CRYPTO_HASH_START << 16));
233 }
234 
235 static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
236 {
237 	int err;
238 
239 	err = dev->load_data(dev, dev->sg_src, NULL);
240 	if (!err)
241 		crypto_ahash_dma_start(dev);
242 	return err;
243 }
244 
245 static int rk_ahash_start(struct rk_crypto_info *dev)
246 {
247 	return rk_ahash_set_data_start(dev);
248 }
249 
250 static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
251 {
252 	int err = 0;
253 
254 	dev->unload_data(dev);
255 	if (dev->left_bytes) {
256 		if (dev->aligned) {
257 			if (sg_is_last(dev->sg_src)) {
258 				dev_warn(dev->dev, "[%s:%d], Lack of data\n",
259 					 __func__, __LINE__);
260 				err = -ENOMEM;
261 				goto out_rx;
262 			}
263 			dev->sg_src = sg_next(dev->sg_src);
264 		}
265 		err = rk_ahash_set_data_start(dev);
266 	} else {
267 		dev->complete(dev, 0);
268 	}
269 
270 out_rx:
271 	return err;
272 }
273 
274 static int rk_cra_hash_init(struct crypto_tfm *tfm)
275 {
276 	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
277 	struct rk_crypto_tmp *algt;
278 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
279 
280 	const char *alg_name = crypto_tfm_alg_name(tfm);
281 
282 	algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
283 
284 	tctx->dev = algt->dev;
285 	tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
286 	if (!tctx->dev->addr_vir) {
287 		dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
288 		return -ENOMEM;
289 	}
290 	tctx->dev->start = rk_ahash_start;
291 	tctx->dev->update = rk_ahash_crypto_rx;
292 	tctx->dev->complete = rk_ahash_crypto_complete;
293 
294 	/* for fallback */
295 	tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
296 					       CRYPTO_ALG_NEED_FALLBACK);
297 	if (IS_ERR(tctx->fallback_tfm)) {
298 		dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
299 		return PTR_ERR(tctx->fallback_tfm);
300 	}
301 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
302 				 sizeof(struct rk_ahash_rctx) +
303 				 crypto_ahash_reqsize(tctx->fallback_tfm));
304 
305 	return tctx->dev->enable_clk(tctx->dev);
306 }
307 
308 static void rk_cra_hash_exit(struct crypto_tfm *tfm)
309 {
310 	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
311 
312 	free_page((unsigned long)tctx->dev->addr_vir);
313 	return tctx->dev->disable_clk(tctx->dev);
314 }
315 
316 struct rk_crypto_tmp rk_ahash_sha1 = {
317 	.type = ALG_TYPE_HASH,
318 	.alg.hash = {
319 		.init = rk_ahash_init,
320 		.update = rk_ahash_update,
321 		.final = rk_ahash_final,
322 		.finup = rk_ahash_finup,
323 		.export = rk_ahash_export,
324 		.import = rk_ahash_import,
325 		.digest = rk_ahash_digest,
326 		.halg = {
327 			 .digestsize = SHA1_DIGEST_SIZE,
328 			 .statesize = sizeof(struct sha1_state),
329 			 .base = {
330 				  .cra_name = "sha1",
331 				  .cra_driver_name = "rk-sha1",
332 				  .cra_priority = 300,
333 				  .cra_flags = CRYPTO_ALG_ASYNC |
334 					       CRYPTO_ALG_NEED_FALLBACK,
335 				  .cra_blocksize = SHA1_BLOCK_SIZE,
336 				  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
337 				  .cra_alignmask = 3,
338 				  .cra_init = rk_cra_hash_init,
339 				  .cra_exit = rk_cra_hash_exit,
340 				  .cra_module = THIS_MODULE,
341 				  }
342 			 }
343 	}
344 };
345 
346 struct rk_crypto_tmp rk_ahash_sha256 = {
347 	.type = ALG_TYPE_HASH,
348 	.alg.hash = {
349 		.init = rk_ahash_init,
350 		.update = rk_ahash_update,
351 		.final = rk_ahash_final,
352 		.finup = rk_ahash_finup,
353 		.export = rk_ahash_export,
354 		.import = rk_ahash_import,
355 		.digest = rk_ahash_digest,
356 		.halg = {
357 			 .digestsize = SHA256_DIGEST_SIZE,
358 			 .statesize = sizeof(struct sha256_state),
359 			 .base = {
360 				  .cra_name = "sha256",
361 				  .cra_driver_name = "rk-sha256",
362 				  .cra_priority = 300,
363 				  .cra_flags = CRYPTO_ALG_ASYNC |
364 					       CRYPTO_ALG_NEED_FALLBACK,
365 				  .cra_blocksize = SHA256_BLOCK_SIZE,
366 				  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
367 				  .cra_alignmask = 3,
368 				  .cra_init = rk_cra_hash_init,
369 				  .cra_exit = rk_cra_hash_exit,
370 				  .cra_module = THIS_MODULE,
371 				  }
372 			 }
373 	}
374 };
375 
376 struct rk_crypto_tmp rk_ahash_md5 = {
377 	.type = ALG_TYPE_HASH,
378 	.alg.hash = {
379 		.init = rk_ahash_init,
380 		.update = rk_ahash_update,
381 		.final = rk_ahash_final,
382 		.finup = rk_ahash_finup,
383 		.export = rk_ahash_export,
384 		.import = rk_ahash_import,
385 		.digest = rk_ahash_digest,
386 		.halg = {
387 			 .digestsize = MD5_DIGEST_SIZE,
388 			 .statesize = sizeof(struct md5_state),
389 			 .base = {
390 				  .cra_name = "md5",
391 				  .cra_driver_name = "rk-md5",
392 				  .cra_priority = 300,
393 				  .cra_flags = CRYPTO_ALG_ASYNC |
394 					       CRYPTO_ALG_NEED_FALLBACK,
395 				  .cra_blocksize = SHA1_BLOCK_SIZE,
396 				  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
397 				  .cra_alignmask = 3,
398 				  .cra_init = rk_cra_hash_init,
399 				  .cra_exit = rk_cra_hash_exit,
400 				  .cra_module = THIS_MODULE,
401 				  }
402 			}
403 	}
404 };
405