1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Cryptographic Coprocessor (CCP) AES crypto API support 4 * 5 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc. 6 * 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/sched.h> 12 #include <linux/delay.h> 13 #include <linux/scatterlist.h> 14 #include <linux/crypto.h> 15 #include <crypto/algapi.h> 16 #include <crypto/aes.h> 17 #include <crypto/ctr.h> 18 #include <crypto/scatterwalk.h> 19 20 #include "ccp-crypto.h" 21 22 static int ccp_aes_complete(struct crypto_async_request *async_req, int ret) 23 { 24 struct ablkcipher_request *req = ablkcipher_request_cast(async_req); 25 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 26 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); 27 28 if (ret) 29 return ret; 30 31 if (ctx->u.aes.mode != CCP_AES_MODE_ECB) 32 memcpy(req->info, rctx->iv, AES_BLOCK_SIZE); 33 34 return 0; 35 } 36 37 static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 38 unsigned int key_len) 39 { 40 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); 41 struct ccp_crypto_ablkcipher_alg *alg = 42 ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm)); 43 44 switch (key_len) { 45 case AES_KEYSIZE_128: 46 ctx->u.aes.type = CCP_AES_TYPE_128; 47 break; 48 case AES_KEYSIZE_192: 49 ctx->u.aes.type = CCP_AES_TYPE_192; 50 break; 51 case AES_KEYSIZE_256: 52 ctx->u.aes.type = CCP_AES_TYPE_256; 53 break; 54 default: 55 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 56 return -EINVAL; 57 } 58 ctx->u.aes.mode = alg->mode; 59 ctx->u.aes.key_len = key_len; 60 61 memcpy(ctx->u.aes.key, key, key_len); 62 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); 63 64 return 0; 65 } 66 67 static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt) 68 { 69 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 70 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); 71 struct scatterlist *iv_sg = NULL; 72 unsigned int iv_len = 0; 73 int ret; 74 75 if (!ctx->u.aes.key_len) 76 return -EINVAL; 77 78 if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) || 79 (ctx->u.aes.mode == CCP_AES_MODE_CBC)) && 80 (req->nbytes & (AES_BLOCK_SIZE - 1))) 81 return -EINVAL; 82 83 if (ctx->u.aes.mode != CCP_AES_MODE_ECB) { 84 if (!req->info) 85 return -EINVAL; 86 87 memcpy(rctx->iv, req->info, AES_BLOCK_SIZE); 88 iv_sg = &rctx->iv_sg; 89 iv_len = AES_BLOCK_SIZE; 90 sg_init_one(iv_sg, rctx->iv, iv_len); 91 } 92 93 memset(&rctx->cmd, 0, sizeof(rctx->cmd)); 94 INIT_LIST_HEAD(&rctx->cmd.entry); 95 rctx->cmd.engine = CCP_ENGINE_AES; 96 rctx->cmd.u.aes.type = ctx->u.aes.type; 97 rctx->cmd.u.aes.mode = ctx->u.aes.mode; 98 rctx->cmd.u.aes.action = 99 (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT; 100 rctx->cmd.u.aes.key = &ctx->u.aes.key_sg; 101 rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; 102 rctx->cmd.u.aes.iv = iv_sg; 103 rctx->cmd.u.aes.iv_len = iv_len; 104 rctx->cmd.u.aes.src = req->src; 105 rctx->cmd.u.aes.src_len = req->nbytes; 106 rctx->cmd.u.aes.dst = req->dst; 107 108 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); 109 110 return ret; 111 } 112 113 static int ccp_aes_encrypt(struct ablkcipher_request *req) 114 { 115 return ccp_aes_crypt(req, true); 116 } 117 118 static int ccp_aes_decrypt(struct ablkcipher_request *req) 119 { 120 return ccp_aes_crypt(req, false); 121 } 122 123 static int ccp_aes_cra_init(struct crypto_tfm *tfm) 124 { 125 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 126 127 ctx->complete = ccp_aes_complete; 128 ctx->u.aes.key_len = 0; 129 130 tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx); 131 132 return 0; 133 } 134 135 static void ccp_aes_cra_exit(struct crypto_tfm *tfm) 136 { 137 } 138 139 static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req, 140 int ret) 141 { 142 struct ablkcipher_request *req = ablkcipher_request_cast(async_req); 143 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); 144 145 /* Restore the original pointer */ 146 req->info = rctx->rfc3686_info; 147 148 return ccp_aes_complete(async_req, ret); 149 } 150 151 static int ccp_aes_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 152 unsigned int key_len) 153 { 154 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); 155 156 if (key_len < CTR_RFC3686_NONCE_SIZE) 157 return -EINVAL; 158 159 key_len -= CTR_RFC3686_NONCE_SIZE; 160 memcpy(ctx->u.aes.nonce, key + key_len, CTR_RFC3686_NONCE_SIZE); 161 162 return ccp_aes_setkey(tfm, key, key_len); 163 } 164 165 static int ccp_aes_rfc3686_crypt(struct ablkcipher_request *req, bool encrypt) 166 { 167 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 168 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); 169 u8 *iv; 170 171 /* Initialize the CTR block */ 172 iv = rctx->rfc3686_iv; 173 memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE); 174 175 iv += CTR_RFC3686_NONCE_SIZE; 176 memcpy(iv, req->info, CTR_RFC3686_IV_SIZE); 177 178 iv += CTR_RFC3686_IV_SIZE; 179 *(__be32 *)iv = cpu_to_be32(1); 180 181 /* Point to the new IV */ 182 rctx->rfc3686_info = req->info; 183 req->info = rctx->rfc3686_iv; 184 185 return ccp_aes_crypt(req, encrypt); 186 } 187 188 static int ccp_aes_rfc3686_encrypt(struct ablkcipher_request *req) 189 { 190 return ccp_aes_rfc3686_crypt(req, true); 191 } 192 193 static int ccp_aes_rfc3686_decrypt(struct ablkcipher_request *req) 194 { 195 return ccp_aes_rfc3686_crypt(req, false); 196 } 197 198 static int ccp_aes_rfc3686_cra_init(struct crypto_tfm *tfm) 199 { 200 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 201 202 ctx->complete = ccp_aes_rfc3686_complete; 203 ctx->u.aes.key_len = 0; 204 205 tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx); 206 207 return 0; 208 } 209 210 static void ccp_aes_rfc3686_cra_exit(struct crypto_tfm *tfm) 211 { 212 } 213 214 static struct crypto_alg ccp_aes_defaults = { 215 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 216 CRYPTO_ALG_ASYNC | 217 CRYPTO_ALG_KERN_DRIVER_ONLY | 218 CRYPTO_ALG_NEED_FALLBACK, 219 .cra_blocksize = AES_BLOCK_SIZE, 220 .cra_ctxsize = sizeof(struct ccp_ctx), 221 .cra_priority = CCP_CRA_PRIORITY, 222 .cra_type = &crypto_ablkcipher_type, 223 .cra_init = ccp_aes_cra_init, 224 .cra_exit = ccp_aes_cra_exit, 225 .cra_module = THIS_MODULE, 226 .cra_ablkcipher = { 227 .setkey = ccp_aes_setkey, 228 .encrypt = ccp_aes_encrypt, 229 .decrypt = ccp_aes_decrypt, 230 .min_keysize = AES_MIN_KEY_SIZE, 231 .max_keysize = AES_MAX_KEY_SIZE, 232 }, 233 }; 234 235 static struct crypto_alg ccp_aes_rfc3686_defaults = { 236 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 237 CRYPTO_ALG_ASYNC | 238 CRYPTO_ALG_KERN_DRIVER_ONLY | 239 CRYPTO_ALG_NEED_FALLBACK, 240 .cra_blocksize = CTR_RFC3686_BLOCK_SIZE, 241 .cra_ctxsize = sizeof(struct ccp_ctx), 242 .cra_priority = CCP_CRA_PRIORITY, 243 .cra_type = &crypto_ablkcipher_type, 244 .cra_init = ccp_aes_rfc3686_cra_init, 245 .cra_exit = ccp_aes_rfc3686_cra_exit, 246 .cra_module = THIS_MODULE, 247 .cra_ablkcipher = { 248 .setkey = ccp_aes_rfc3686_setkey, 249 .encrypt = ccp_aes_rfc3686_encrypt, 250 .decrypt = ccp_aes_rfc3686_decrypt, 251 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 252 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 253 }, 254 }; 255 256 struct ccp_aes_def { 257 enum ccp_aes_mode mode; 258 unsigned int version; 259 const char *name; 260 const char *driver_name; 261 unsigned int blocksize; 262 unsigned int ivsize; 263 struct crypto_alg *alg_defaults; 264 }; 265 266 static struct ccp_aes_def aes_algs[] = { 267 { 268 .mode = CCP_AES_MODE_ECB, 269 .version = CCP_VERSION(3, 0), 270 .name = "ecb(aes)", 271 .driver_name = "ecb-aes-ccp", 272 .blocksize = AES_BLOCK_SIZE, 273 .ivsize = 0, 274 .alg_defaults = &ccp_aes_defaults, 275 }, 276 { 277 .mode = CCP_AES_MODE_CBC, 278 .version = CCP_VERSION(3, 0), 279 .name = "cbc(aes)", 280 .driver_name = "cbc-aes-ccp", 281 .blocksize = AES_BLOCK_SIZE, 282 .ivsize = AES_BLOCK_SIZE, 283 .alg_defaults = &ccp_aes_defaults, 284 }, 285 { 286 .mode = CCP_AES_MODE_CFB, 287 .version = CCP_VERSION(3, 0), 288 .name = "cfb(aes)", 289 .driver_name = "cfb-aes-ccp", 290 .blocksize = 1, 291 .ivsize = AES_BLOCK_SIZE, 292 .alg_defaults = &ccp_aes_defaults, 293 }, 294 { 295 .mode = CCP_AES_MODE_OFB, 296 .version = CCP_VERSION(3, 0), 297 .name = "ofb(aes)", 298 .driver_name = "ofb-aes-ccp", 299 .blocksize = 1, 300 .ivsize = AES_BLOCK_SIZE, 301 .alg_defaults = &ccp_aes_defaults, 302 }, 303 { 304 .mode = CCP_AES_MODE_CTR, 305 .version = CCP_VERSION(3, 0), 306 .name = "ctr(aes)", 307 .driver_name = "ctr-aes-ccp", 308 .blocksize = 1, 309 .ivsize = AES_BLOCK_SIZE, 310 .alg_defaults = &ccp_aes_defaults, 311 }, 312 { 313 .mode = CCP_AES_MODE_CTR, 314 .version = CCP_VERSION(3, 0), 315 .name = "rfc3686(ctr(aes))", 316 .driver_name = "rfc3686-ctr-aes-ccp", 317 .blocksize = 1, 318 .ivsize = CTR_RFC3686_IV_SIZE, 319 .alg_defaults = &ccp_aes_rfc3686_defaults, 320 }, 321 }; 322 323 static int ccp_register_aes_alg(struct list_head *head, 324 const struct ccp_aes_def *def) 325 { 326 struct ccp_crypto_ablkcipher_alg *ccp_alg; 327 struct crypto_alg *alg; 328 int ret; 329 330 ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); 331 if (!ccp_alg) 332 return -ENOMEM; 333 334 INIT_LIST_HEAD(&ccp_alg->entry); 335 336 ccp_alg->mode = def->mode; 337 338 /* Copy the defaults and override as necessary */ 339 alg = &ccp_alg->alg; 340 *alg = *def->alg_defaults; 341 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); 342 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 343 def->driver_name); 344 alg->cra_blocksize = def->blocksize; 345 alg->cra_ablkcipher.ivsize = def->ivsize; 346 347 ret = crypto_register_alg(alg); 348 if (ret) { 349 pr_err("%s ablkcipher algorithm registration error (%d)\n", 350 alg->cra_name, ret); 351 kfree(ccp_alg); 352 return ret; 353 } 354 355 list_add(&ccp_alg->entry, head); 356 357 return 0; 358 } 359 360 int ccp_register_aes_algs(struct list_head *head) 361 { 362 int i, ret; 363 unsigned int ccpversion = ccp_version(); 364 365 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { 366 if (aes_algs[i].version > ccpversion) 367 continue; 368 ret = ccp_register_aes_alg(head, &aes_algs[i]); 369 if (ret) 370 return ret; 371 } 372 373 return 0; 374 } 375