1 /** 2 * AES GCM routines supporting the Power 7+ Nest Accelerators driver 3 * 4 * Copyright (C) 2012 International Business Machines Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2 only. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * 19 * Author: Kent Yoder <yoder1@us.ibm.com> 20 */ 21 22 #include <crypto/internal/aead.h> 23 #include <crypto/aes.h> 24 #include <crypto/algapi.h> 25 #include <crypto/scatterwalk.h> 26 #include <linux/module.h> 27 #include <linux/types.h> 28 #include <asm/vio.h> 29 30 #include "nx_csbcpb.h" 31 #include "nx.h" 32 33 34 static int gcm_aes_nx_set_key(struct crypto_aead *tfm, 35 const u8 *in_key, 36 unsigned int key_len) 37 { 38 struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); 39 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 40 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; 41 42 nx_ctx_init(nx_ctx, HCOP_FC_AES); 43 44 switch (key_len) { 45 case AES_KEYSIZE_128: 46 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); 47 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128); 48 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; 49 break; 50 case AES_KEYSIZE_192: 51 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); 52 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192); 53 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; 54 break; 55 case AES_KEYSIZE_256: 56 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); 57 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256); 58 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; 59 break; 60 default: 61 return -EINVAL; 62 } 63 64 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; 65 memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len); 66 67 csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA; 68 memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len); 69 70 return 0; 71 } 72 73 static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm, 74 const u8 *in_key, 75 unsigned int key_len) 76 { 77 struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); 78 char *nonce = nx_ctx->priv.gcm.nonce; 79 int rc; 80 81 if (key_len < 4) 82 return -EINVAL; 83 84 key_len -= 4; 85 86 rc = gcm_aes_nx_set_key(tfm, in_key, key_len); 87 if (rc) 88 goto out; 89 90 memcpy(nonce, in_key + key_len, 4); 91 out: 92 return rc; 93 } 94 95 static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, 96 unsigned int authsize) 97 { 98 switch (authsize) { 99 case 8: 100 case 12: 101 case 16: 102 break; 103 default: 104 return -EINVAL; 105 } 106 107 return 0; 108 } 109 110 static int nx_gca(struct nx_crypto_ctx *nx_ctx, 111 struct aead_request *req, 112 u8 *out, 113 unsigned int assoclen) 114 { 115 int rc; 116 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; 117 struct scatter_walk walk; 118 struct nx_sg *nx_sg = nx_ctx->in_sg; 119 unsigned int nbytes = assoclen; 120 unsigned int processed = 0, to_process; 121 unsigned int max_sg_len; 122 123 if (nbytes <= AES_BLOCK_SIZE) { 124 scatterwalk_start(&walk, req->src); 125 scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); 126 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); 127 return 0; 128 } 129 130 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION; 131 132 /* page_limit: number of sg entries that fit on one page */ 133 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 134 nx_ctx->ap->sglen); 135 max_sg_len = min_t(u64, max_sg_len, 136 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 137 138 do { 139 /* 140 * to_process: the data chunk to process in this update. 141 * This value is bound by sg list limits. 142 */ 143 to_process = min_t(u64, nbytes - processed, 144 nx_ctx->ap->databytelen); 145 to_process = min_t(u64, to_process, 146 NX_PAGE_SIZE * (max_sg_len - 1)); 147 148 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, 149 req->src, processed, &to_process); 150 151 if ((to_process + processed) < nbytes) 152 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; 153 else 154 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE; 155 156 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) 157 * sizeof(struct nx_sg); 158 159 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, 160 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 161 if (rc) 162 return rc; 163 164 memcpy(csbcpb_aead->cpb.aes_gca.in_pat, 165 csbcpb_aead->cpb.aes_gca.out_pat, 166 AES_BLOCK_SIZE); 167 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION; 168 169 atomic_inc(&(nx_ctx->stats->aes_ops)); 170 atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); 171 172 processed += to_process; 173 } while (processed < nbytes); 174 175 memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); 176 177 return rc; 178 } 179 180 static int gmac(struct aead_request *req, struct blkcipher_desc *desc, 181 unsigned int assoclen) 182 { 183 int rc; 184 struct nx_crypto_ctx *nx_ctx = 185 crypto_aead_ctx(crypto_aead_reqtfm(req)); 186 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 187 struct nx_sg *nx_sg; 188 unsigned int nbytes = assoclen; 189 unsigned int processed = 0, to_process; 190 unsigned int max_sg_len; 191 192 /* Set GMAC mode */ 193 csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC; 194 195 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; 196 197 /* page_limit: number of sg entries that fit on one page */ 198 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 199 nx_ctx->ap->sglen); 200 max_sg_len = min_t(u64, max_sg_len, 201 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 202 203 /* Copy IV */ 204 memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE); 205 206 do { 207 /* 208 * to_process: the data chunk to process in this update. 209 * This value is bound by sg list limits. 210 */ 211 to_process = min_t(u64, nbytes - processed, 212 nx_ctx->ap->databytelen); 213 to_process = min_t(u64, to_process, 214 NX_PAGE_SIZE * (max_sg_len - 1)); 215 216 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, 217 req->src, processed, &to_process); 218 219 if ((to_process + processed) < nbytes) 220 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 221 else 222 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 223 224 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg) 225 * sizeof(struct nx_sg); 226 227 csbcpb->cpb.aes_gcm.bit_length_data = 0; 228 csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes; 229 230 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 231 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 232 if (rc) 233 goto out; 234 235 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, 236 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); 237 memcpy(csbcpb->cpb.aes_gcm.in_s0, 238 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); 239 240 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 241 242 atomic_inc(&(nx_ctx->stats->aes_ops)); 243 atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); 244 245 processed += to_process; 246 } while (processed < nbytes); 247 248 out: 249 /* Restore GCM mode */ 250 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; 251 return rc; 252 } 253 254 static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, 255 int enc) 256 { 257 int rc; 258 struct nx_crypto_ctx *nx_ctx = 259 crypto_aead_ctx(crypto_aead_reqtfm(req)); 260 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 261 char out[AES_BLOCK_SIZE]; 262 struct nx_sg *in_sg, *out_sg; 263 int len; 264 265 /* For scenarios where the input message is zero length, AES CTR mode 266 * may be used. Set the source data to be a single block (16B) of all 267 * zeros, and set the input IV value to be the same as the GMAC IV 268 * value. - nx_wb 4.8.1.3 */ 269 270 /* Change to ECB mode */ 271 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; 272 memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key, 273 sizeof(csbcpb->cpb.aes_ecb.key)); 274 if (enc) 275 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 276 else 277 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 278 279 len = AES_BLOCK_SIZE; 280 281 /* Encrypt the counter/IV */ 282 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info, 283 &len, nx_ctx->ap->sglen); 284 285 if (len != AES_BLOCK_SIZE) 286 return -EINVAL; 287 288 len = sizeof(out); 289 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len, 290 nx_ctx->ap->sglen); 291 292 if (len != sizeof(out)) 293 return -EINVAL; 294 295 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 296 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 297 298 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 299 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 300 if (rc) 301 goto out; 302 atomic_inc(&(nx_ctx->stats->aes_ops)); 303 304 /* Copy out the auth tag */ 305 memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out, 306 crypto_aead_authsize(crypto_aead_reqtfm(req))); 307 out: 308 /* Restore XCBC mode */ 309 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; 310 311 /* 312 * ECB key uses the same region that GCM AAD and counter, so it's safe 313 * to just fill it with zeroes. 314 */ 315 memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key)); 316 317 return rc; 318 } 319 320 static int gcm_aes_nx_crypt(struct aead_request *req, int enc, 321 unsigned int assoclen) 322 { 323 struct nx_crypto_ctx *nx_ctx = 324 crypto_aead_ctx(crypto_aead_reqtfm(req)); 325 struct nx_gcm_rctx *rctx = aead_request_ctx(req); 326 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 327 struct blkcipher_desc desc; 328 unsigned int nbytes = req->cryptlen; 329 unsigned int processed = 0, to_process; 330 unsigned long irq_flags; 331 int rc = -EINVAL; 332 333 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 334 335 desc.info = rctx->iv; 336 /* initialize the counter */ 337 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; 338 339 if (nbytes == 0) { 340 if (assoclen == 0) 341 rc = gcm_empty(req, &desc, enc); 342 else 343 rc = gmac(req, &desc, assoclen); 344 if (rc) 345 goto out; 346 else 347 goto mac; 348 } 349 350 /* Process associated data */ 351 csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8; 352 if (assoclen) { 353 rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad, 354 assoclen); 355 if (rc) 356 goto out; 357 } 358 359 /* Set flags for encryption */ 360 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; 361 if (enc) { 362 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 363 } else { 364 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 365 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); 366 } 367 368 do { 369 to_process = nbytes - processed; 370 371 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; 372 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, 373 req->src, &to_process, 374 processed + req->assoclen, 375 csbcpb->cpb.aes_gcm.iv_or_cnt); 376 377 if (rc) 378 goto out; 379 380 if ((to_process + processed) < nbytes) 381 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 382 else 383 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 384 385 386 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 387 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 388 if (rc) 389 goto out; 390 391 memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE); 392 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, 393 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); 394 memcpy(csbcpb->cpb.aes_gcm.in_s0, 395 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); 396 397 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 398 399 atomic_inc(&(nx_ctx->stats->aes_ops)); 400 atomic64_add(csbcpb->csb.processed_byte_count, 401 &(nx_ctx->stats->aes_bytes)); 402 403 processed += to_process; 404 } while (processed < nbytes); 405 406 mac: 407 if (enc) { 408 /* copy out the auth tag */ 409 scatterwalk_map_and_copy( 410 csbcpb->cpb.aes_gcm.out_pat_or_mac, 411 req->dst, req->assoclen + nbytes, 412 crypto_aead_authsize(crypto_aead_reqtfm(req)), 413 SCATTERWALK_TO_SG); 414 } else { 415 u8 *itag = nx_ctx->priv.gcm.iauth_tag; 416 u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; 417 418 scatterwalk_map_and_copy( 419 itag, req->src, req->assoclen + nbytes, 420 crypto_aead_authsize(crypto_aead_reqtfm(req)), 421 SCATTERWALK_FROM_SG); 422 rc = crypto_memneq(itag, otag, 423 crypto_aead_authsize(crypto_aead_reqtfm(req))) ? 424 -EBADMSG : 0; 425 } 426 out: 427 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 428 return rc; 429 } 430 431 static int gcm_aes_nx_encrypt(struct aead_request *req) 432 { 433 struct nx_gcm_rctx *rctx = aead_request_ctx(req); 434 char *iv = rctx->iv; 435 436 memcpy(iv, req->iv, 12); 437 438 return gcm_aes_nx_crypt(req, 1, req->assoclen); 439 } 440 441 static int gcm_aes_nx_decrypt(struct aead_request *req) 442 { 443 struct nx_gcm_rctx *rctx = aead_request_ctx(req); 444 char *iv = rctx->iv; 445 446 memcpy(iv, req->iv, 12); 447 448 return gcm_aes_nx_crypt(req, 0, req->assoclen); 449 } 450 451 static int gcm4106_aes_nx_encrypt(struct aead_request *req) 452 { 453 struct nx_crypto_ctx *nx_ctx = 454 crypto_aead_ctx(crypto_aead_reqtfm(req)); 455 struct nx_gcm_rctx *rctx = aead_request_ctx(req); 456 char *iv = rctx->iv; 457 char *nonce = nx_ctx->priv.gcm.nonce; 458 459 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); 460 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); 461 462 if (req->assoclen < 8) 463 return -EINVAL; 464 465 return gcm_aes_nx_crypt(req, 1, req->assoclen - 8); 466 } 467 468 static int gcm4106_aes_nx_decrypt(struct aead_request *req) 469 { 470 struct nx_crypto_ctx *nx_ctx = 471 crypto_aead_ctx(crypto_aead_reqtfm(req)); 472 struct nx_gcm_rctx *rctx = aead_request_ctx(req); 473 char *iv = rctx->iv; 474 char *nonce = nx_ctx->priv.gcm.nonce; 475 476 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); 477 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); 478 479 if (req->assoclen < 8) 480 return -EINVAL; 481 482 return gcm_aes_nx_crypt(req, 0, req->assoclen - 8); 483 } 484 485 /* tell the block cipher walk routines that this is a stream cipher by 486 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block 487 * during encrypt/decrypt doesn't solve this problem, because it calls 488 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, 489 * but instead uses this tfm->blocksize. */ 490 struct aead_alg nx_gcm_aes_alg = { 491 .base = { 492 .cra_name = "gcm(aes)", 493 .cra_driver_name = "gcm-aes-nx", 494 .cra_priority = 300, 495 .cra_blocksize = 1, 496 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 497 .cra_module = THIS_MODULE, 498 }, 499 .init = nx_crypto_ctx_aes_gcm_init, 500 .exit = nx_crypto_ctx_aead_exit, 501 .ivsize = 12, 502 .maxauthsize = AES_BLOCK_SIZE, 503 .setkey = gcm_aes_nx_set_key, 504 .encrypt = gcm_aes_nx_encrypt, 505 .decrypt = gcm_aes_nx_decrypt, 506 }; 507 508 struct aead_alg nx_gcm4106_aes_alg = { 509 .base = { 510 .cra_name = "rfc4106(gcm(aes))", 511 .cra_driver_name = "rfc4106-gcm-aes-nx", 512 .cra_priority = 300, 513 .cra_blocksize = 1, 514 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 515 .cra_module = THIS_MODULE, 516 }, 517 .init = nx_crypto_ctx_aes_gcm_init, 518 .exit = nx_crypto_ctx_aead_exit, 519 .ivsize = 8, 520 .maxauthsize = AES_BLOCK_SIZE, 521 .setkey = gcm4106_aes_nx_set_key, 522 .setauthsize = gcm4106_aes_nx_setauthsize, 523 .encrypt = gcm4106_aes_nx_encrypt, 524 .decrypt = gcm4106_aes_nx_decrypt, 525 }; 526