1 /** 2 * AES GCM routines supporting the Power 7+ Nest Accelerators driver 3 * 4 * Copyright (C) 2012 International Business Machines Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2 only. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * 19 * Author: Kent Yoder <yoder1@us.ibm.com> 20 */ 21 22 #include <crypto/internal/aead.h> 23 #include <crypto/aes.h> 24 #include <crypto/scatterwalk.h> 25 #include <linux/module.h> 26 #include <linux/types.h> 27 #include <asm/vio.h> 28 29 #include "nx_csbcpb.h" 30 #include "nx.h" 31 32 33 static int gcm_aes_nx_set_key(struct crypto_aead *tfm, 34 const u8 *in_key, 35 unsigned int key_len) 36 { 37 struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); 38 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 39 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; 40 41 nx_ctx_init(nx_ctx, HCOP_FC_AES); 42 43 switch (key_len) { 44 case AES_KEYSIZE_128: 45 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); 46 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128); 47 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; 48 break; 49 case AES_KEYSIZE_192: 50 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); 51 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192); 52 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; 53 break; 54 case AES_KEYSIZE_256: 55 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); 56 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256); 57 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; 58 break; 59 default: 60 return -EINVAL; 61 } 62 63 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; 64 memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len); 65 66 csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA; 67 memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len); 68 69 return 0; 70 } 71 72 static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm, 73 const u8 *in_key, 74 unsigned int key_len) 75 { 76 struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); 77 char *nonce = nx_ctx->priv.gcm.nonce; 78 int rc; 79 80 if (key_len < 4) 81 return -EINVAL; 82 83 key_len -= 4; 84 85 rc = gcm_aes_nx_set_key(tfm, in_key, key_len); 86 if (rc) 87 goto out; 88 89 memcpy(nonce, in_key + key_len, 4); 90 out: 91 return rc; 92 } 93 94 static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, 95 unsigned int authsize) 96 { 97 switch (authsize) { 98 case 8: 99 case 12: 100 case 16: 101 break; 102 default: 103 return -EINVAL; 104 } 105 106 return 0; 107 } 108 109 static int nx_gca(struct nx_crypto_ctx *nx_ctx, 110 struct aead_request *req, 111 u8 *out, 112 unsigned int assoclen) 113 { 114 int rc; 115 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; 116 struct scatter_walk walk; 117 struct nx_sg *nx_sg = nx_ctx->in_sg; 118 unsigned int nbytes = assoclen; 119 unsigned int processed = 0, to_process; 120 unsigned int max_sg_len; 121 122 if (nbytes <= AES_BLOCK_SIZE) { 123 scatterwalk_start(&walk, req->src); 124 scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); 125 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); 126 return 0; 127 } 128 129 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION; 130 131 /* page_limit: number of sg entries that fit on one page */ 132 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 133 nx_ctx->ap->sglen); 134 max_sg_len = min_t(u64, max_sg_len, 135 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 136 137 do { 138 /* 139 * to_process: the data chunk to process in this update. 140 * This value is bound by sg list limits. 141 */ 142 to_process = min_t(u64, nbytes - processed, 143 nx_ctx->ap->databytelen); 144 to_process = min_t(u64, to_process, 145 NX_PAGE_SIZE * (max_sg_len - 1)); 146 147 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, 148 req->src, processed, &to_process); 149 150 if ((to_process + processed) < nbytes) 151 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; 152 else 153 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE; 154 155 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) 156 * sizeof(struct nx_sg); 157 158 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, 159 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 160 if (rc) 161 return rc; 162 163 memcpy(csbcpb_aead->cpb.aes_gca.in_pat, 164 csbcpb_aead->cpb.aes_gca.out_pat, 165 AES_BLOCK_SIZE); 166 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION; 167 168 atomic_inc(&(nx_ctx->stats->aes_ops)); 169 atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); 170 171 processed += to_process; 172 } while (processed < nbytes); 173 174 memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); 175 176 return rc; 177 } 178 179 static int gmac(struct aead_request *req, struct blkcipher_desc *desc, 180 unsigned int assoclen) 181 { 182 int rc; 183 struct nx_crypto_ctx *nx_ctx = 184 crypto_aead_ctx(crypto_aead_reqtfm(req)); 185 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 186 struct nx_sg *nx_sg; 187 unsigned int nbytes = assoclen; 188 unsigned int processed = 0, to_process; 189 unsigned int max_sg_len; 190 191 /* Set GMAC mode */ 192 csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC; 193 194 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; 195 196 /* page_limit: number of sg entries that fit on one page */ 197 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 198 nx_ctx->ap->sglen); 199 max_sg_len = min_t(u64, max_sg_len, 200 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 201 202 /* Copy IV */ 203 memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE); 204 205 do { 206 /* 207 * to_process: the data chunk to process in this update. 208 * This value is bound by sg list limits. 209 */ 210 to_process = min_t(u64, nbytes - processed, 211 nx_ctx->ap->databytelen); 212 to_process = min_t(u64, to_process, 213 NX_PAGE_SIZE * (max_sg_len - 1)); 214 215 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, 216 req->src, processed, &to_process); 217 218 if ((to_process + processed) < nbytes) 219 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 220 else 221 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 222 223 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg) 224 * sizeof(struct nx_sg); 225 226 csbcpb->cpb.aes_gcm.bit_length_data = 0; 227 csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes; 228 229 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 230 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 231 if (rc) 232 goto out; 233 234 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, 235 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); 236 memcpy(csbcpb->cpb.aes_gcm.in_s0, 237 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); 238 239 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 240 241 atomic_inc(&(nx_ctx->stats->aes_ops)); 242 atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); 243 244 processed += to_process; 245 } while (processed < nbytes); 246 247 out: 248 /* Restore GCM mode */ 249 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; 250 return rc; 251 } 252 253 static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, 254 int enc) 255 { 256 int rc; 257 struct nx_crypto_ctx *nx_ctx = 258 crypto_aead_ctx(crypto_aead_reqtfm(req)); 259 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 260 char out[AES_BLOCK_SIZE]; 261 struct nx_sg *in_sg, *out_sg; 262 int len; 263 264 /* For scenarios where the input message is zero length, AES CTR mode 265 * may be used. Set the source data to be a single block (16B) of all 266 * zeros, and set the input IV value to be the same as the GMAC IV 267 * value. - nx_wb 4.8.1.3 */ 268 269 /* Change to ECB mode */ 270 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; 271 memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key, 272 sizeof(csbcpb->cpb.aes_ecb.key)); 273 if (enc) 274 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 275 else 276 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 277 278 len = AES_BLOCK_SIZE; 279 280 /* Encrypt the counter/IV */ 281 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info, 282 &len, nx_ctx->ap->sglen); 283 284 if (len != AES_BLOCK_SIZE) 285 return -EINVAL; 286 287 len = sizeof(out); 288 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len, 289 nx_ctx->ap->sglen); 290 291 if (len != sizeof(out)) 292 return -EINVAL; 293 294 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 295 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 296 297 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 298 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 299 if (rc) 300 goto out; 301 atomic_inc(&(nx_ctx->stats->aes_ops)); 302 303 /* Copy out the auth tag */ 304 memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out, 305 crypto_aead_authsize(crypto_aead_reqtfm(req))); 306 out: 307 /* Restore XCBC mode */ 308 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; 309 310 /* 311 * ECB key uses the same region that GCM AAD and counter, so it's safe 312 * to just fill it with zeroes. 313 */ 314 memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key)); 315 316 return rc; 317 } 318 319 static int gcm_aes_nx_crypt(struct aead_request *req, int enc, 320 unsigned int assoclen) 321 { 322 struct nx_crypto_ctx *nx_ctx = 323 crypto_aead_ctx(crypto_aead_reqtfm(req)); 324 struct nx_gcm_rctx *rctx = aead_request_ctx(req); 325 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 326 struct blkcipher_desc desc; 327 unsigned int nbytes = req->cryptlen; 328 unsigned int processed = 0, to_process; 329 unsigned long irq_flags; 330 int rc = -EINVAL; 331 332 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 333 334 desc.info = rctx->iv; 335 /* initialize the counter */ 336 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; 337 338 if (nbytes == 0) { 339 if (assoclen == 0) 340 rc = gcm_empty(req, &desc, enc); 341 else 342 rc = gmac(req, &desc, assoclen); 343 if (rc) 344 goto out; 345 else 346 goto mac; 347 } 348 349 /* Process associated data */ 350 csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8; 351 if (assoclen) { 352 rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad, 353 assoclen); 354 if (rc) 355 goto out; 356 } 357 358 /* Set flags for encryption */ 359 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; 360 if (enc) { 361 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 362 } else { 363 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 364 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); 365 } 366 367 do { 368 to_process = nbytes - processed; 369 370 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; 371 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, 372 req->src, &to_process, 373 processed + req->assoclen, 374 csbcpb->cpb.aes_gcm.iv_or_cnt); 375 376 if (rc) 377 goto out; 378 379 if ((to_process + processed) < nbytes) 380 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 381 else 382 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 383 384 385 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 386 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 387 if (rc) 388 goto out; 389 390 memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE); 391 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, 392 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); 393 memcpy(csbcpb->cpb.aes_gcm.in_s0, 394 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); 395 396 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 397 398 atomic_inc(&(nx_ctx->stats->aes_ops)); 399 atomic64_add(csbcpb->csb.processed_byte_count, 400 &(nx_ctx->stats->aes_bytes)); 401 402 processed += to_process; 403 } while (processed < nbytes); 404 405 mac: 406 if (enc) { 407 /* copy out the auth tag */ 408 scatterwalk_map_and_copy( 409 csbcpb->cpb.aes_gcm.out_pat_or_mac, 410 req->dst, req->assoclen + nbytes, 411 crypto_aead_authsize(crypto_aead_reqtfm(req)), 412 SCATTERWALK_TO_SG); 413 } else { 414 u8 *itag = nx_ctx->priv.gcm.iauth_tag; 415 u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; 416 417 scatterwalk_map_and_copy( 418 itag, req->src, req->assoclen + nbytes, 419 crypto_aead_authsize(crypto_aead_reqtfm(req)), 420 SCATTERWALK_FROM_SG); 421 rc = memcmp(itag, otag, 422 crypto_aead_authsize(crypto_aead_reqtfm(req))) ? 423 -EBADMSG : 0; 424 } 425 out: 426 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 427 return rc; 428 } 429 430 static int gcm_aes_nx_encrypt(struct aead_request *req) 431 { 432 struct nx_gcm_rctx *rctx = aead_request_ctx(req); 433 char *iv = rctx->iv; 434 435 memcpy(iv, req->iv, 12); 436 437 return gcm_aes_nx_crypt(req, 1, req->assoclen); 438 } 439 440 static int gcm_aes_nx_decrypt(struct aead_request *req) 441 { 442 struct nx_gcm_rctx *rctx = aead_request_ctx(req); 443 char *iv = rctx->iv; 444 445 memcpy(iv, req->iv, 12); 446 447 return gcm_aes_nx_crypt(req, 0, req->assoclen); 448 } 449 450 static int gcm4106_aes_nx_encrypt(struct aead_request *req) 451 { 452 struct nx_crypto_ctx *nx_ctx = 453 crypto_aead_ctx(crypto_aead_reqtfm(req)); 454 struct nx_gcm_rctx *rctx = aead_request_ctx(req); 455 char *iv = rctx->iv; 456 char *nonce = nx_ctx->priv.gcm.nonce; 457 458 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); 459 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); 460 461 if (req->assoclen < 8) 462 return -EINVAL; 463 464 return gcm_aes_nx_crypt(req, 1, req->assoclen - 8); 465 } 466 467 static int gcm4106_aes_nx_decrypt(struct aead_request *req) 468 { 469 struct nx_crypto_ctx *nx_ctx = 470 crypto_aead_ctx(crypto_aead_reqtfm(req)); 471 struct nx_gcm_rctx *rctx = aead_request_ctx(req); 472 char *iv = rctx->iv; 473 char *nonce = nx_ctx->priv.gcm.nonce; 474 475 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); 476 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); 477 478 if (req->assoclen < 8) 479 return -EINVAL; 480 481 return gcm_aes_nx_crypt(req, 0, req->assoclen - 8); 482 } 483 484 /* tell the block cipher walk routines that this is a stream cipher by 485 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block 486 * during encrypt/decrypt doesn't solve this problem, because it calls 487 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, 488 * but instead uses this tfm->blocksize. */ 489 struct aead_alg nx_gcm_aes_alg = { 490 .base = { 491 .cra_name = "gcm(aes)", 492 .cra_driver_name = "gcm-aes-nx", 493 .cra_priority = 300, 494 .cra_blocksize = 1, 495 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 496 .cra_module = THIS_MODULE, 497 }, 498 .init = nx_crypto_ctx_aes_gcm_init, 499 .exit = nx_crypto_ctx_aead_exit, 500 .ivsize = 12, 501 .maxauthsize = AES_BLOCK_SIZE, 502 .setkey = gcm_aes_nx_set_key, 503 .encrypt = gcm_aes_nx_encrypt, 504 .decrypt = gcm_aes_nx_decrypt, 505 }; 506 507 struct aead_alg nx_gcm4106_aes_alg = { 508 .base = { 509 .cra_name = "rfc4106(gcm(aes))", 510 .cra_driver_name = "rfc4106-gcm-aes-nx", 511 .cra_priority = 300, 512 .cra_blocksize = 1, 513 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 514 .cra_module = THIS_MODULE, 515 }, 516 .init = nx_crypto_ctx_aes_gcm_init, 517 .exit = nx_crypto_ctx_aead_exit, 518 .ivsize = 8, 519 .maxauthsize = AES_BLOCK_SIZE, 520 .setkey = gcm4106_aes_nx_set_key, 521 .setauthsize = gcm4106_aes_nx_setauthsize, 522 .encrypt = gcm4106_aes_nx_encrypt, 523 .decrypt = gcm4106_aes_nx_decrypt, 524 }; 525