1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Cryptographic Coprocessor (CCP) AES crypto API support 4 * 5 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc. 6 * 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/sched.h> 12 #include <linux/delay.h> 13 #include <linux/scatterlist.h> 14 #include <linux/crypto.h> 15 #include <crypto/algapi.h> 16 #include <crypto/aes.h> 17 #include <crypto/ctr.h> 18 #include <crypto/scatterwalk.h> 19 20 #include "ccp-crypto.h" 21 22 static int ccp_aes_complete(struct crypto_async_request *async_req, int ret) 23 { 24 struct skcipher_request *req = skcipher_request_cast(async_req); 25 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 26 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req); 27 28 if (ret) 29 return ret; 30 31 if (ctx->u.aes.mode != CCP_AES_MODE_ECB) 32 memcpy(req->iv, rctx->iv, AES_BLOCK_SIZE); 33 34 return 0; 35 } 36 37 static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 38 unsigned int key_len) 39 { 40 struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm); 41 struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); 42 43 switch (key_len) { 44 case AES_KEYSIZE_128: 45 ctx->u.aes.type = CCP_AES_TYPE_128; 46 break; 47 case AES_KEYSIZE_192: 48 ctx->u.aes.type = CCP_AES_TYPE_192; 49 break; 50 case AES_KEYSIZE_256: 51 ctx->u.aes.type = CCP_AES_TYPE_256; 52 break; 53 default: 54 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 55 return -EINVAL; 56 } 57 ctx->u.aes.mode = alg->mode; 58 ctx->u.aes.key_len = key_len; 59 60 memcpy(ctx->u.aes.key, key, key_len); 61 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); 62 63 return 0; 64 } 65 66 static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt) 67 { 68 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 69 struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); 70 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req); 71 struct scatterlist *iv_sg = NULL; 72 unsigned int iv_len = 0; 73 int ret; 74 75 if (!ctx->u.aes.key_len) 76 return -EINVAL; 77 78 if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) || 79 (ctx->u.aes.mode == CCP_AES_MODE_CBC)) && 80 (req->cryptlen & (AES_BLOCK_SIZE - 1))) 81 return -EINVAL; 82 83 if (ctx->u.aes.mode != CCP_AES_MODE_ECB) { 84 if (!req->iv) 85 return -EINVAL; 86 87 memcpy(rctx->iv, req->iv, AES_BLOCK_SIZE); 88 iv_sg = &rctx->iv_sg; 89 iv_len = AES_BLOCK_SIZE; 90 sg_init_one(iv_sg, rctx->iv, iv_len); 91 } 92 93 memset(&rctx->cmd, 0, sizeof(rctx->cmd)); 94 INIT_LIST_HEAD(&rctx->cmd.entry); 95 rctx->cmd.engine = CCP_ENGINE_AES; 96 rctx->cmd.u.aes.type = ctx->u.aes.type; 97 rctx->cmd.u.aes.mode = ctx->u.aes.mode; 98 rctx->cmd.u.aes.action = 99 (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT; 100 rctx->cmd.u.aes.key = &ctx->u.aes.key_sg; 101 rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; 102 rctx->cmd.u.aes.iv = iv_sg; 103 rctx->cmd.u.aes.iv_len = iv_len; 104 rctx->cmd.u.aes.src = req->src; 105 rctx->cmd.u.aes.src_len = req->cryptlen; 106 rctx->cmd.u.aes.dst = req->dst; 107 108 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); 109 110 return ret; 111 } 112 113 static int ccp_aes_encrypt(struct skcipher_request *req) 114 { 115 return ccp_aes_crypt(req, true); 116 } 117 118 static int ccp_aes_decrypt(struct skcipher_request *req) 119 { 120 return ccp_aes_crypt(req, false); 121 } 122 123 static int ccp_aes_init_tfm(struct crypto_skcipher *tfm) 124 { 125 struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); 126 127 ctx->complete = ccp_aes_complete; 128 ctx->u.aes.key_len = 0; 129 130 crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx)); 131 132 return 0; 133 } 134 135 static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req, 136 int ret) 137 { 138 struct skcipher_request *req = skcipher_request_cast(async_req); 139 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req); 140 141 /* Restore the original pointer */ 142 req->iv = rctx->rfc3686_info; 143 144 return ccp_aes_complete(async_req, ret); 145 } 146 147 static int ccp_aes_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key, 148 unsigned int key_len) 149 { 150 struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); 151 152 if (key_len < CTR_RFC3686_NONCE_SIZE) 153 return -EINVAL; 154 155 key_len -= CTR_RFC3686_NONCE_SIZE; 156 memcpy(ctx->u.aes.nonce, key + key_len, CTR_RFC3686_NONCE_SIZE); 157 158 return ccp_aes_setkey(tfm, key, key_len); 159 } 160 161 static int ccp_aes_rfc3686_crypt(struct skcipher_request *req, bool encrypt) 162 { 163 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 164 struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); 165 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req); 166 u8 *iv; 167 168 /* Initialize the CTR block */ 169 iv = rctx->rfc3686_iv; 170 memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE); 171 172 iv += CTR_RFC3686_NONCE_SIZE; 173 memcpy(iv, req->iv, CTR_RFC3686_IV_SIZE); 174 175 iv += CTR_RFC3686_IV_SIZE; 176 *(__be32 *)iv = cpu_to_be32(1); 177 178 /* Point to the new IV */ 179 rctx->rfc3686_info = req->iv; 180 req->iv = rctx->rfc3686_iv; 181 182 return ccp_aes_crypt(req, encrypt); 183 } 184 185 static int ccp_aes_rfc3686_encrypt(struct skcipher_request *req) 186 { 187 return ccp_aes_rfc3686_crypt(req, true); 188 } 189 190 static int ccp_aes_rfc3686_decrypt(struct skcipher_request *req) 191 { 192 return ccp_aes_rfc3686_crypt(req, false); 193 } 194 195 static int ccp_aes_rfc3686_init_tfm(struct crypto_skcipher *tfm) 196 { 197 struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); 198 199 ctx->complete = ccp_aes_rfc3686_complete; 200 ctx->u.aes.key_len = 0; 201 202 crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx)); 203 204 return 0; 205 } 206 207 static const struct skcipher_alg ccp_aes_defaults = { 208 .setkey = ccp_aes_setkey, 209 .encrypt = ccp_aes_encrypt, 210 .decrypt = ccp_aes_decrypt, 211 .min_keysize = AES_MIN_KEY_SIZE, 212 .max_keysize = AES_MAX_KEY_SIZE, 213 .init = ccp_aes_init_tfm, 214 215 .base.cra_flags = CRYPTO_ALG_ASYNC | 216 CRYPTO_ALG_KERN_DRIVER_ONLY | 217 CRYPTO_ALG_NEED_FALLBACK, 218 .base.cra_blocksize = AES_BLOCK_SIZE, 219 .base.cra_ctxsize = sizeof(struct ccp_ctx), 220 .base.cra_priority = CCP_CRA_PRIORITY, 221 .base.cra_module = THIS_MODULE, 222 }; 223 224 static const struct skcipher_alg ccp_aes_rfc3686_defaults = { 225 .setkey = ccp_aes_rfc3686_setkey, 226 .encrypt = ccp_aes_rfc3686_encrypt, 227 .decrypt = ccp_aes_rfc3686_decrypt, 228 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 229 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 230 .init = ccp_aes_rfc3686_init_tfm, 231 232 .base.cra_flags = CRYPTO_ALG_ASYNC | 233 CRYPTO_ALG_KERN_DRIVER_ONLY | 234 CRYPTO_ALG_NEED_FALLBACK, 235 .base.cra_blocksize = CTR_RFC3686_BLOCK_SIZE, 236 .base.cra_ctxsize = sizeof(struct ccp_ctx), 237 .base.cra_priority = CCP_CRA_PRIORITY, 238 .base.cra_module = THIS_MODULE, 239 }; 240 241 struct ccp_aes_def { 242 enum ccp_aes_mode mode; 243 unsigned int version; 244 const char *name; 245 const char *driver_name; 246 unsigned int blocksize; 247 unsigned int ivsize; 248 const struct skcipher_alg *alg_defaults; 249 }; 250 251 static struct ccp_aes_def aes_algs[] = { 252 { 253 .mode = CCP_AES_MODE_ECB, 254 .version = CCP_VERSION(3, 0), 255 .name = "ecb(aes)", 256 .driver_name = "ecb-aes-ccp", 257 .blocksize = AES_BLOCK_SIZE, 258 .ivsize = 0, 259 .alg_defaults = &ccp_aes_defaults, 260 }, 261 { 262 .mode = CCP_AES_MODE_CBC, 263 .version = CCP_VERSION(3, 0), 264 .name = "cbc(aes)", 265 .driver_name = "cbc-aes-ccp", 266 .blocksize = AES_BLOCK_SIZE, 267 .ivsize = AES_BLOCK_SIZE, 268 .alg_defaults = &ccp_aes_defaults, 269 }, 270 { 271 .mode = CCP_AES_MODE_CFB, 272 .version = CCP_VERSION(3, 0), 273 .name = "cfb(aes)", 274 .driver_name = "cfb-aes-ccp", 275 .blocksize = 1, 276 .ivsize = AES_BLOCK_SIZE, 277 .alg_defaults = &ccp_aes_defaults, 278 }, 279 { 280 .mode = CCP_AES_MODE_OFB, 281 .version = CCP_VERSION(3, 0), 282 .name = "ofb(aes)", 283 .driver_name = "ofb-aes-ccp", 284 .blocksize = 1, 285 .ivsize = AES_BLOCK_SIZE, 286 .alg_defaults = &ccp_aes_defaults, 287 }, 288 { 289 .mode = CCP_AES_MODE_CTR, 290 .version = CCP_VERSION(3, 0), 291 .name = "ctr(aes)", 292 .driver_name = "ctr-aes-ccp", 293 .blocksize = 1, 294 .ivsize = AES_BLOCK_SIZE, 295 .alg_defaults = &ccp_aes_defaults, 296 }, 297 { 298 .mode = CCP_AES_MODE_CTR, 299 .version = CCP_VERSION(3, 0), 300 .name = "rfc3686(ctr(aes))", 301 .driver_name = "rfc3686-ctr-aes-ccp", 302 .blocksize = 1, 303 .ivsize = CTR_RFC3686_IV_SIZE, 304 .alg_defaults = &ccp_aes_rfc3686_defaults, 305 }, 306 }; 307 308 static int ccp_register_aes_alg(struct list_head *head, 309 const struct ccp_aes_def *def) 310 { 311 struct ccp_crypto_skcipher_alg *ccp_alg; 312 struct skcipher_alg *alg; 313 int ret; 314 315 ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); 316 if (!ccp_alg) 317 return -ENOMEM; 318 319 INIT_LIST_HEAD(&ccp_alg->entry); 320 321 ccp_alg->mode = def->mode; 322 323 /* Copy the defaults and override as necessary */ 324 alg = &ccp_alg->alg; 325 *alg = *def->alg_defaults; 326 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); 327 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 328 def->driver_name); 329 alg->base.cra_blocksize = def->blocksize; 330 alg->ivsize = def->ivsize; 331 332 ret = crypto_register_skcipher(alg); 333 if (ret) { 334 pr_err("%s skcipher algorithm registration error (%d)\n", 335 alg->base.cra_name, ret); 336 kfree(ccp_alg); 337 return ret; 338 } 339 340 list_add(&ccp_alg->entry, head); 341 342 return 0; 343 } 344 345 int ccp_register_aes_algs(struct list_head *head) 346 { 347 int i, ret; 348 unsigned int ccpversion = ccp_version(); 349 350 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { 351 if (aes_algs[i].version > ccpversion) 352 continue; 353 ret = ccp_register_aes_alg(head, &aes_algs[i]); 354 if (ret) 355 return ret; 356 } 357 358 return 0; 359 } 360