1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * sun8i-ss-cipher.c - hardware cryptographic offloader for 4 * Allwinner A80/A83T SoC 5 * 6 * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com> 7 * 8 * This file add support for AES cipher with 128,192,256 bits keysize in 9 * CBC and ECB mode. 10 * 11 * You could find a link for the datasheet in Documentation/arm/sunxi.rst 12 */ 13 14 #include <linux/crypto.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/io.h> 17 #include <linux/pm_runtime.h> 18 #include <crypto/scatterwalk.h> 19 #include <crypto/internal/skcipher.h> 20 #include "sun8i-ss.h" 21 22 static bool sun8i_ss_need_fallback(struct skcipher_request *areq) 23 { 24 struct scatterlist *in_sg = areq->src; 25 struct scatterlist *out_sg = areq->dst; 26 struct scatterlist *sg; 27 28 if (areq->cryptlen == 0 || areq->cryptlen % 16) 29 return true; 30 31 if (sg_nents(areq->src) > 8 || sg_nents(areq->dst) > 8) 32 return true; 33 34 sg = areq->src; 35 while (sg) { 36 if ((sg->length % 16) != 0) 37 return true; 38 if ((sg_dma_len(sg) % 16) != 0) 39 return true; 40 if (!IS_ALIGNED(sg->offset, 16)) 41 return true; 42 sg = sg_next(sg); 43 } 44 sg = areq->dst; 45 while (sg) { 46 if ((sg->length % 16) != 0) 47 return true; 48 if ((sg_dma_len(sg) % 16) != 0) 49 return true; 50 if (!IS_ALIGNED(sg->offset, 16)) 51 return true; 52 sg = sg_next(sg); 53 } 54 55 /* SS need same numbers of SG (with same length) for source and destination */ 56 in_sg = areq->src; 57 out_sg = areq->dst; 58 while (in_sg && out_sg) { 59 if (in_sg->length != out_sg->length) 60 return true; 61 in_sg = sg_next(in_sg); 62 out_sg = sg_next(out_sg); 63 } 64 if (in_sg || out_sg) 65 return true; 66 return false; 67 } 68 69 static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) 70 { 71 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 72 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 73 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); 74 int err; 75 76 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG 77 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 78 struct sun8i_ss_alg_template *algt; 79 80 algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); 81 algt->stat_fb++; 82 #endif 83 skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); 84 skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, 85 areq->base.complete, areq->base.data); 86 skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, 87 areq->cryptlen, areq->iv); 88 if (rctx->op_dir & SS_DECRYPTION) 89 err = crypto_skcipher_decrypt(&rctx->fallback_req); 90 else 91 err = crypto_skcipher_encrypt(&rctx->fallback_req); 92 return err; 93 } 94 95 static int sun8i_ss_cipher(struct skcipher_request *areq) 96 { 97 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 98 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 99 struct sun8i_ss_dev *ss = op->ss; 100 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); 101 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 102 struct sun8i_ss_alg_template *algt; 103 struct scatterlist *sg; 104 unsigned int todo, len, offset, ivsize; 105 void *backup_iv = NULL; 106 int nr_sgs = 0; 107 int nr_sgd = 0; 108 int err = 0; 109 int i; 110 111 algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); 112 113 dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__, 114 crypto_tfm_alg_name(areq->base.tfm), 115 areq->cryptlen, 116 rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm), 117 op->keylen); 118 119 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG 120 algt->stat_req++; 121 #endif 122 123 rctx->op_mode = ss->variant->op_mode[algt->ss_blockmode]; 124 rctx->method = ss->variant->alg_cipher[algt->ss_algo_id]; 125 rctx->keylen = op->keylen; 126 127 rctx->p_key = dma_map_single(ss->dev, op->key, op->keylen, DMA_TO_DEVICE); 128 if (dma_mapping_error(ss->dev, rctx->p_key)) { 129 dev_err(ss->dev, "Cannot DMA MAP KEY\n"); 130 err = -EFAULT; 131 goto theend; 132 } 133 134 ivsize = crypto_skcipher_ivsize(tfm); 135 if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { 136 rctx->ivlen = ivsize; 137 rctx->biv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA); 138 if (!rctx->biv) { 139 err = -ENOMEM; 140 goto theend_key; 141 } 142 if (rctx->op_dir & SS_DECRYPTION) { 143 backup_iv = kzalloc(ivsize, GFP_KERNEL); 144 if (!backup_iv) { 145 err = -ENOMEM; 146 goto theend_key; 147 } 148 offset = areq->cryptlen - ivsize; 149 scatterwalk_map_and_copy(backup_iv, areq->src, offset, 150 ivsize, 0); 151 } 152 memcpy(rctx->biv, areq->iv, ivsize); 153 rctx->p_iv = dma_map_single(ss->dev, rctx->biv, rctx->ivlen, 154 DMA_TO_DEVICE); 155 if (dma_mapping_error(ss->dev, rctx->p_iv)) { 156 dev_err(ss->dev, "Cannot DMA MAP IV\n"); 157 err = -ENOMEM; 158 goto theend_iv; 159 } 160 } 161 if (areq->src == areq->dst) { 162 nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), 163 DMA_BIDIRECTIONAL); 164 if (nr_sgs <= 0 || nr_sgs > 8) { 165 dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); 166 err = -EINVAL; 167 goto theend_iv; 168 } 169 nr_sgd = nr_sgs; 170 } else { 171 nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), 172 DMA_TO_DEVICE); 173 if (nr_sgs <= 0 || nr_sgs > 8) { 174 dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); 175 err = -EINVAL; 176 goto theend_iv; 177 } 178 nr_sgd = dma_map_sg(ss->dev, areq->dst, sg_nents(areq->dst), 179 DMA_FROM_DEVICE); 180 if (nr_sgd <= 0 || nr_sgd > 8) { 181 dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd); 182 err = -EINVAL; 183 goto theend_sgs; 184 } 185 } 186 187 len = areq->cryptlen; 188 i = 0; 189 sg = areq->src; 190 while (i < nr_sgs && sg && len) { 191 if (sg_dma_len(sg) == 0) 192 goto sgs_next; 193 rctx->t_src[i].addr = sg_dma_address(sg); 194 todo = min(len, sg_dma_len(sg)); 195 rctx->t_src[i].len = todo / 4; 196 dev_dbg(ss->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__, 197 areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo); 198 len -= todo; 199 i++; 200 sgs_next: 201 sg = sg_next(sg); 202 } 203 if (len > 0) { 204 dev_err(ss->dev, "remaining len %d\n", len); 205 err = -EINVAL; 206 goto theend_sgs; 207 } 208 209 len = areq->cryptlen; 210 i = 0; 211 sg = areq->dst; 212 while (i < nr_sgd && sg && len) { 213 if (sg_dma_len(sg) == 0) 214 goto sgd_next; 215 rctx->t_dst[i].addr = sg_dma_address(sg); 216 todo = min(len, sg_dma_len(sg)); 217 rctx->t_dst[i].len = todo / 4; 218 dev_dbg(ss->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__, 219 areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo); 220 len -= todo; 221 i++; 222 sgd_next: 223 sg = sg_next(sg); 224 } 225 if (len > 0) { 226 dev_err(ss->dev, "remaining len %d\n", len); 227 err = -EINVAL; 228 goto theend_sgs; 229 } 230 231 err = sun8i_ss_run_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm)); 232 233 theend_sgs: 234 if (areq->src == areq->dst) { 235 dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL); 236 } else { 237 dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE); 238 dma_unmap_sg(ss->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE); 239 } 240 241 theend_iv: 242 if (rctx->p_iv) 243 dma_unmap_single(ss->dev, rctx->p_iv, rctx->ivlen, 244 DMA_TO_DEVICE); 245 246 if (areq->iv && ivsize > 0) { 247 if (rctx->biv) { 248 offset = areq->cryptlen - ivsize; 249 if (rctx->op_dir & SS_DECRYPTION) { 250 memcpy(areq->iv, backup_iv, ivsize); 251 kfree_sensitive(backup_iv); 252 } else { 253 scatterwalk_map_and_copy(areq->iv, areq->dst, offset, 254 ivsize, 0); 255 } 256 kfree(rctx->biv); 257 } 258 } 259 260 theend_key: 261 dma_unmap_single(ss->dev, rctx->p_key, op->keylen, DMA_TO_DEVICE); 262 263 theend: 264 265 return err; 266 } 267 268 static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq) 269 { 270 int err; 271 struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); 272 273 err = sun8i_ss_cipher(breq); 274 crypto_finalize_skcipher_request(engine, breq, err); 275 276 return 0; 277 } 278 279 int sun8i_ss_skdecrypt(struct skcipher_request *areq) 280 { 281 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 282 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 283 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); 284 struct crypto_engine *engine; 285 int e; 286 287 memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx)); 288 rctx->op_dir = SS_DECRYPTION; 289 290 if (sun8i_ss_need_fallback(areq)) 291 return sun8i_ss_cipher_fallback(areq); 292 293 e = sun8i_ss_get_engine_number(op->ss); 294 engine = op->ss->flows[e].engine; 295 rctx->flow = e; 296 297 return crypto_transfer_skcipher_request_to_engine(engine, areq); 298 } 299 300 int sun8i_ss_skencrypt(struct skcipher_request *areq) 301 { 302 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 303 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 304 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); 305 struct crypto_engine *engine; 306 int e; 307 308 memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx)); 309 rctx->op_dir = SS_ENCRYPTION; 310 311 if (sun8i_ss_need_fallback(areq)) 312 return sun8i_ss_cipher_fallback(areq); 313 314 e = sun8i_ss_get_engine_number(op->ss); 315 engine = op->ss->flows[e].engine; 316 rctx->flow = e; 317 318 return crypto_transfer_skcipher_request_to_engine(engine, areq); 319 } 320 321 int sun8i_ss_cipher_init(struct crypto_tfm *tfm) 322 { 323 struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); 324 struct sun8i_ss_alg_template *algt; 325 const char *name = crypto_tfm_alg_name(tfm); 326 struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm); 327 struct skcipher_alg *alg = crypto_skcipher_alg(sktfm); 328 int err; 329 330 memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx)); 331 332 algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); 333 op->ss = algt->ss; 334 335 op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); 336 if (IS_ERR(op->fallback_tfm)) { 337 dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n", 338 name, PTR_ERR(op->fallback_tfm)); 339 return PTR_ERR(op->fallback_tfm); 340 } 341 342 sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) + 343 crypto_skcipher_reqsize(op->fallback_tfm); 344 345 346 dev_info(op->ss->dev, "Fallback for %s is %s\n", 347 crypto_tfm_alg_driver_name(&sktfm->base), 348 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm))); 349 350 op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request; 351 op->enginectx.op.prepare_request = NULL; 352 op->enginectx.op.unprepare_request = NULL; 353 354 err = pm_runtime_get_sync(op->ss->dev); 355 if (err < 0) { 356 dev_err(op->ss->dev, "pm error %d\n", err); 357 goto error_pm; 358 } 359 360 return 0; 361 error_pm: 362 crypto_free_skcipher(op->fallback_tfm); 363 return err; 364 } 365 366 void sun8i_ss_cipher_exit(struct crypto_tfm *tfm) 367 { 368 struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); 369 370 kfree_sensitive(op->key); 371 crypto_free_skcipher(op->fallback_tfm); 372 pm_runtime_put_sync(op->ss->dev); 373 } 374 375 int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 376 unsigned int keylen) 377 { 378 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 379 struct sun8i_ss_dev *ss = op->ss; 380 381 switch (keylen) { 382 case 128 / 8: 383 break; 384 case 192 / 8: 385 break; 386 case 256 / 8: 387 break; 388 default: 389 dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen); 390 return -EINVAL; 391 } 392 kfree_sensitive(op->key); 393 op->keylen = keylen; 394 op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); 395 if (!op->key) 396 return -ENOMEM; 397 398 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); 399 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 400 401 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); 402 } 403 404 int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, 405 unsigned int keylen) 406 { 407 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 408 struct sun8i_ss_dev *ss = op->ss; 409 410 if (unlikely(keylen != 3 * DES_KEY_SIZE)) { 411 dev_dbg(ss->dev, "Invalid keylen %u\n", keylen); 412 return -EINVAL; 413 } 414 415 kfree_sensitive(op->key); 416 op->keylen = keylen; 417 op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); 418 if (!op->key) 419 return -ENOMEM; 420 421 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); 422 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 423 424 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); 425 } 426