1 /** 2 * AES GCM routines supporting the Power 7+ Nest Accelerators driver 3 * 4 * Copyright (C) 2012 International Business Machines Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2 only. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * 19 * Author: Kent Yoder <yoder1@us.ibm.com> 20 */ 21 22 #include <crypto/internal/aead.h> 23 #include <crypto/aes.h> 24 #include <crypto/algapi.h> 25 #include <crypto/scatterwalk.h> 26 #include <linux/module.h> 27 #include <linux/types.h> 28 #include <linux/crypto.h> 29 #include <asm/vio.h> 30 31 #include "nx_csbcpb.h" 32 #include "nx.h" 33 34 35 static int gcm_aes_nx_set_key(struct crypto_aead *tfm, 36 const u8 *in_key, 37 unsigned int key_len) 38 { 39 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); 40 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 41 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; 42 43 nx_ctx_init(nx_ctx, HCOP_FC_AES); 44 45 switch (key_len) { 46 case AES_KEYSIZE_128: 47 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); 48 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128); 49 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; 50 break; 51 case AES_KEYSIZE_192: 52 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); 53 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192); 54 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; 55 break; 56 case AES_KEYSIZE_256: 57 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); 58 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256); 59 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; 60 break; 61 default: 62 return -EINVAL; 63 } 64 65 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; 66 memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len); 67 68 csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA; 69 memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len); 70 71 return 0; 72 } 73 74 static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm, 75 const u8 *in_key, 76 unsigned int key_len) 77 { 78 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); 79 char *nonce = nx_ctx->priv.gcm.nonce; 80 int rc; 81 82 if (key_len < 4) 83 return -EINVAL; 84 85 key_len -= 4; 86 87 rc = gcm_aes_nx_set_key(tfm, in_key, key_len); 88 if (rc) 89 goto out; 90 91 memcpy(nonce, in_key + key_len, 4); 92 out: 93 return rc; 94 } 95 96 static int gcm_aes_nx_setauthsize(struct crypto_aead *tfm, 97 unsigned int authsize) 98 { 99 crypto_aead_crt(tfm)->authsize = authsize; 100 101 return 0; 102 } 103 104 static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, 105 unsigned int authsize) 106 { 107 switch (authsize) { 108 case 8: 109 case 12: 110 case 16: 111 break; 112 default: 113 return -EINVAL; 114 } 115 116 crypto_aead_crt(tfm)->authsize = authsize; 117 118 return 0; 119 } 120 121 static int nx_gca(struct nx_crypto_ctx *nx_ctx, 122 struct aead_request *req, 123 u8 *out) 124 { 125 int rc; 126 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; 127 struct scatter_walk walk; 128 struct nx_sg *nx_sg = nx_ctx->in_sg; 129 unsigned int nbytes = req->assoclen; 130 unsigned int processed = 0, to_process; 131 unsigned int max_sg_len; 132 133 if (nbytes <= AES_BLOCK_SIZE) { 134 scatterwalk_start(&walk, req->assoc); 135 scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); 136 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); 137 return 0; 138 } 139 140 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION; 141 142 /* page_limit: number of sg entries that fit on one page */ 143 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 144 nx_ctx->ap->sglen); 145 max_sg_len = min_t(u64, max_sg_len, 146 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 147 148 do { 149 /* 150 * to_process: the data chunk to process in this update. 151 * This value is bound by sg list limits. 152 */ 153 to_process = min_t(u64, nbytes - processed, 154 nx_ctx->ap->databytelen); 155 to_process = min_t(u64, to_process, 156 NX_PAGE_SIZE * (max_sg_len - 1)); 157 158 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, 159 req->assoc, processed, &to_process); 160 161 if ((to_process + processed) < nbytes) 162 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; 163 else 164 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE; 165 166 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) 167 * sizeof(struct nx_sg); 168 169 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, 170 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 171 if (rc) 172 return rc; 173 174 memcpy(csbcpb_aead->cpb.aes_gca.in_pat, 175 csbcpb_aead->cpb.aes_gca.out_pat, 176 AES_BLOCK_SIZE); 177 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION; 178 179 atomic_inc(&(nx_ctx->stats->aes_ops)); 180 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); 181 182 processed += to_process; 183 } while (processed < nbytes); 184 185 memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); 186 187 return rc; 188 } 189 190 static int gmac(struct aead_request *req, struct blkcipher_desc *desc) 191 { 192 int rc; 193 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 194 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 195 struct nx_sg *nx_sg; 196 unsigned int nbytes = req->assoclen; 197 unsigned int processed = 0, to_process; 198 unsigned int max_sg_len; 199 200 /* Set GMAC mode */ 201 csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC; 202 203 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; 204 205 /* page_limit: number of sg entries that fit on one page */ 206 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 207 nx_ctx->ap->sglen); 208 max_sg_len = min_t(u64, max_sg_len, 209 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 210 211 /* Copy IV */ 212 memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE); 213 214 do { 215 /* 216 * to_process: the data chunk to process in this update. 217 * This value is bound by sg list limits. 218 */ 219 to_process = min_t(u64, nbytes - processed, 220 nx_ctx->ap->databytelen); 221 to_process = min_t(u64, to_process, 222 NX_PAGE_SIZE * (max_sg_len - 1)); 223 224 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, 225 req->assoc, processed, &to_process); 226 227 if ((to_process + processed) < nbytes) 228 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 229 else 230 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 231 232 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg) 233 * sizeof(struct nx_sg); 234 235 csbcpb->cpb.aes_gcm.bit_length_data = 0; 236 csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes; 237 238 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 239 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 240 if (rc) 241 goto out; 242 243 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, 244 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); 245 memcpy(csbcpb->cpb.aes_gcm.in_s0, 246 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); 247 248 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 249 250 atomic_inc(&(nx_ctx->stats->aes_ops)); 251 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); 252 253 processed += to_process; 254 } while (processed < nbytes); 255 256 out: 257 /* Restore GCM mode */ 258 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; 259 return rc; 260 } 261 262 static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, 263 int enc) 264 { 265 int rc; 266 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 267 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 268 char out[AES_BLOCK_SIZE]; 269 struct nx_sg *in_sg, *out_sg; 270 int len; 271 272 /* For scenarios where the input message is zero length, AES CTR mode 273 * may be used. Set the source data to be a single block (16B) of all 274 * zeros, and set the input IV value to be the same as the GMAC IV 275 * value. - nx_wb 4.8.1.3 */ 276 277 /* Change to ECB mode */ 278 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; 279 memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key, 280 sizeof(csbcpb->cpb.aes_ecb.key)); 281 if (enc) 282 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 283 else 284 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 285 286 len = AES_BLOCK_SIZE; 287 288 /* Encrypt the counter/IV */ 289 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info, 290 &len, nx_ctx->ap->sglen); 291 292 if (len != AES_BLOCK_SIZE) 293 return -EINVAL; 294 295 len = sizeof(out); 296 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len, 297 nx_ctx->ap->sglen); 298 299 if (len != sizeof(out)) 300 return -EINVAL; 301 302 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 303 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 304 305 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 306 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 307 if (rc) 308 goto out; 309 atomic_inc(&(nx_ctx->stats->aes_ops)); 310 311 /* Copy out the auth tag */ 312 memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out, 313 crypto_aead_authsize(crypto_aead_reqtfm(req))); 314 out: 315 /* Restore XCBC mode */ 316 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; 317 318 /* 319 * ECB key uses the same region that GCM AAD and counter, so it's safe 320 * to just fill it with zeroes. 321 */ 322 memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key)); 323 324 return rc; 325 } 326 327 static int gcm_aes_nx_crypt(struct aead_request *req, int enc) 328 { 329 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 330 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 331 struct blkcipher_desc desc; 332 unsigned int nbytes = req->cryptlen; 333 unsigned int processed = 0, to_process; 334 unsigned long irq_flags; 335 int rc = -EINVAL; 336 337 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 338 339 desc.info = nx_ctx->priv.gcm.iv; 340 /* initialize the counter */ 341 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; 342 343 if (nbytes == 0) { 344 if (req->assoclen == 0) 345 rc = gcm_empty(req, &desc, enc); 346 else 347 rc = gmac(req, &desc); 348 if (rc) 349 goto out; 350 else 351 goto mac; 352 } 353 354 /* Process associated data */ 355 csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; 356 if (req->assoclen) { 357 rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); 358 if (rc) 359 goto out; 360 } 361 362 /* Set flags for encryption */ 363 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; 364 if (enc) { 365 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 366 } else { 367 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 368 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); 369 } 370 371 do { 372 to_process = nbytes - processed; 373 374 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; 375 desc.tfm = (struct crypto_blkcipher *) req->base.tfm; 376 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, 377 req->src, &to_process, processed, 378 csbcpb->cpb.aes_gcm.iv_or_cnt); 379 380 if (rc) 381 goto out; 382 383 if ((to_process + processed) < nbytes) 384 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 385 else 386 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 387 388 389 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 390 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 391 if (rc) 392 goto out; 393 394 memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE); 395 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, 396 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); 397 memcpy(csbcpb->cpb.aes_gcm.in_s0, 398 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); 399 400 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 401 402 atomic_inc(&(nx_ctx->stats->aes_ops)); 403 atomic64_add(csbcpb->csb.processed_byte_count, 404 &(nx_ctx->stats->aes_bytes)); 405 406 processed += to_process; 407 } while (processed < nbytes); 408 409 mac: 410 if (enc) { 411 /* copy out the auth tag */ 412 scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac, 413 req->dst, nbytes, 414 crypto_aead_authsize(crypto_aead_reqtfm(req)), 415 SCATTERWALK_TO_SG); 416 } else { 417 u8 *itag = nx_ctx->priv.gcm.iauth_tag; 418 u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; 419 420 scatterwalk_map_and_copy(itag, req->src, nbytes, 421 crypto_aead_authsize(crypto_aead_reqtfm(req)), 422 SCATTERWALK_FROM_SG); 423 rc = memcmp(itag, otag, 424 crypto_aead_authsize(crypto_aead_reqtfm(req))) ? 425 -EBADMSG : 0; 426 } 427 out: 428 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 429 return rc; 430 } 431 432 static int gcm_aes_nx_encrypt(struct aead_request *req) 433 { 434 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 435 char *iv = nx_ctx->priv.gcm.iv; 436 437 memcpy(iv, req->iv, 12); 438 439 return gcm_aes_nx_crypt(req, 1); 440 } 441 442 static int gcm_aes_nx_decrypt(struct aead_request *req) 443 { 444 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 445 char *iv = nx_ctx->priv.gcm.iv; 446 447 memcpy(iv, req->iv, 12); 448 449 return gcm_aes_nx_crypt(req, 0); 450 } 451 452 static int gcm4106_aes_nx_encrypt(struct aead_request *req) 453 { 454 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 455 char *iv = nx_ctx->priv.gcm.iv; 456 char *nonce = nx_ctx->priv.gcm.nonce; 457 458 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); 459 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); 460 461 return gcm_aes_nx_crypt(req, 1); 462 } 463 464 static int gcm4106_aes_nx_decrypt(struct aead_request *req) 465 { 466 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 467 char *iv = nx_ctx->priv.gcm.iv; 468 char *nonce = nx_ctx->priv.gcm.nonce; 469 470 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); 471 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); 472 473 return gcm_aes_nx_crypt(req, 0); 474 } 475 476 /* tell the block cipher walk routines that this is a stream cipher by 477 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block 478 * during encrypt/decrypt doesn't solve this problem, because it calls 479 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, 480 * but instead uses this tfm->blocksize. */ 481 struct crypto_alg nx_gcm_aes_alg = { 482 .cra_name = "gcm(aes)", 483 .cra_driver_name = "gcm-aes-nx", 484 .cra_priority = 300, 485 .cra_flags = CRYPTO_ALG_TYPE_AEAD, 486 .cra_blocksize = 1, 487 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 488 .cra_type = &crypto_aead_type, 489 .cra_module = THIS_MODULE, 490 .cra_init = nx_crypto_ctx_aes_gcm_init, 491 .cra_exit = nx_crypto_ctx_exit, 492 .cra_aead = { 493 .ivsize = AES_BLOCK_SIZE, 494 .maxauthsize = AES_BLOCK_SIZE, 495 .setkey = gcm_aes_nx_set_key, 496 .setauthsize = gcm_aes_nx_setauthsize, 497 .encrypt = gcm_aes_nx_encrypt, 498 .decrypt = gcm_aes_nx_decrypt, 499 } 500 }; 501 502 struct crypto_alg nx_gcm4106_aes_alg = { 503 .cra_name = "rfc4106(gcm(aes))", 504 .cra_driver_name = "rfc4106-gcm-aes-nx", 505 .cra_priority = 300, 506 .cra_flags = CRYPTO_ALG_TYPE_AEAD, 507 .cra_blocksize = 1, 508 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 509 .cra_type = &crypto_nivaead_type, 510 .cra_module = THIS_MODULE, 511 .cra_init = nx_crypto_ctx_aes_gcm_init, 512 .cra_exit = nx_crypto_ctx_exit, 513 .cra_aead = { 514 .ivsize = 8, 515 .maxauthsize = AES_BLOCK_SIZE, 516 .geniv = "seqiv", 517 .setkey = gcm4106_aes_nx_set_key, 518 .setauthsize = gcm4106_aes_nx_setauthsize, 519 .encrypt = gcm4106_aes_nx_encrypt, 520 .decrypt = gcm4106_aes_nx_decrypt, 521 } 522 }; 523