1 /** 2 * AES XCBC routines supporting the Power 7+ Nest Accelerators driver 3 * 4 * Copyright (C) 2011-2012 International Business Machines Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2 only. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * 19 * Author: Kent Yoder <yoder1@us.ibm.com> 20 */ 21 22 #include <crypto/internal/hash.h> 23 #include <crypto/aes.h> 24 #include <crypto/algapi.h> 25 #include <linux/module.h> 26 #include <linux/types.h> 27 #include <linux/crypto.h> 28 #include <asm/vio.h> 29 30 #include "nx_csbcpb.h" 31 #include "nx.h" 32 33 34 struct xcbc_state { 35 u8 state[AES_BLOCK_SIZE]; 36 unsigned int count; 37 u8 buffer[AES_BLOCK_SIZE]; 38 }; 39 40 static int nx_xcbc_set_key(struct crypto_shash *desc, 41 const u8 *in_key, 42 unsigned int key_len) 43 { 44 struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc); 45 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 46 47 switch (key_len) { 48 case AES_KEYSIZE_128: 49 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; 50 break; 51 default: 52 return -EINVAL; 53 } 54 55 memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len); 56 57 return 0; 58 } 59 60 /* 61 * Based on RFC 3566, for a zero-length message: 62 * 63 * n = 1 64 * K1 = E(K, 0x01010101010101010101010101010101) 65 * K3 = E(K, 0x03030303030303030303030303030303) 66 * E[0] = 0x00000000000000000000000000000000 67 * M[1] = 0x80000000000000000000000000000000 (0 length message with padding) 68 * E[1] = (K1, M[1] ^ E[0] ^ K3) 69 * Tag = M[1] 70 */ 71 static int nx_xcbc_empty(struct shash_desc *desc, u8 *out) 72 { 73 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 74 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 75 struct nx_sg *in_sg, *out_sg; 76 u8 keys[2][AES_BLOCK_SIZE]; 77 u8 key[32]; 78 int rc = 0; 79 int len; 80 81 /* Change to ECB mode */ 82 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; 83 memcpy(key, csbcpb->cpb.aes_xcbc.key, AES_BLOCK_SIZE); 84 memcpy(csbcpb->cpb.aes_ecb.key, key, AES_BLOCK_SIZE); 85 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 86 87 /* K1 and K3 base patterns */ 88 memset(keys[0], 0x01, sizeof(keys[0])); 89 memset(keys[1], 0x03, sizeof(keys[1])); 90 91 len = sizeof(keys); 92 /* Generate K1 and K3 encrypting the patterns */ 93 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, &len, 94 nx_ctx->ap->sglen); 95 96 if (len != sizeof(keys)) 97 return -EINVAL; 98 99 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, &len, 100 nx_ctx->ap->sglen); 101 102 if (len != sizeof(keys)) 103 return -EINVAL; 104 105 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 106 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 107 108 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0); 109 if (rc) 110 goto out; 111 atomic_inc(&(nx_ctx->stats->aes_ops)); 112 113 /* XOr K3 with the padding for a 0 length message */ 114 keys[1][0] ^= 0x80; 115 116 len = sizeof(keys[1]); 117 118 /* Encrypt the final result */ 119 memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE); 120 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], &len, 121 nx_ctx->ap->sglen); 122 123 if (len != sizeof(keys[1])) 124 return -EINVAL; 125 126 len = AES_BLOCK_SIZE; 127 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, 128 nx_ctx->ap->sglen); 129 130 if (len != AES_BLOCK_SIZE) 131 return -EINVAL; 132 133 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 134 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 135 136 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0); 137 if (rc) 138 goto out; 139 atomic_inc(&(nx_ctx->stats->aes_ops)); 140 141 out: 142 /* Restore XCBC mode */ 143 csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; 144 memcpy(csbcpb->cpb.aes_xcbc.key, key, AES_BLOCK_SIZE); 145 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 146 147 return rc; 148 } 149 150 static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm) 151 { 152 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); 153 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 154 int err; 155 156 err = nx_crypto_ctx_aes_xcbc_init(tfm); 157 if (err) 158 return err; 159 160 nx_ctx_init(nx_ctx, HCOP_FC_AES); 161 162 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); 163 csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; 164 165 return 0; 166 } 167 168 static int nx_xcbc_init(struct shash_desc *desc) 169 { 170 struct xcbc_state *sctx = shash_desc_ctx(desc); 171 172 memset(sctx, 0, sizeof *sctx); 173 174 return 0; 175 } 176 177 static int nx_xcbc_update(struct shash_desc *desc, 178 const u8 *data, 179 unsigned int len) 180 { 181 struct xcbc_state *sctx = shash_desc_ctx(desc); 182 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 183 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 184 struct nx_sg *in_sg; 185 struct nx_sg *out_sg; 186 u32 to_process = 0, leftover, total; 187 unsigned int max_sg_len; 188 unsigned long irq_flags; 189 int rc = 0; 190 int data_len; 191 192 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 193 194 195 total = sctx->count + len; 196 197 /* 2 cases for total data len: 198 * 1: <= AES_BLOCK_SIZE: copy into state, return 0 199 * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover 200 */ 201 if (total <= AES_BLOCK_SIZE) { 202 memcpy(sctx->buffer + sctx->count, data, len); 203 sctx->count += len; 204 goto out; 205 } 206 207 in_sg = nx_ctx->in_sg; 208 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 209 nx_ctx->ap->sglen); 210 max_sg_len = min_t(u64, max_sg_len, 211 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 212 213 data_len = AES_BLOCK_SIZE; 214 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, 215 &len, nx_ctx->ap->sglen); 216 217 if (data_len != AES_BLOCK_SIZE) { 218 rc = -EINVAL; 219 goto out; 220 } 221 222 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 223 224 do { 225 to_process = total - to_process; 226 to_process = to_process & ~(AES_BLOCK_SIZE - 1); 227 228 leftover = total - to_process; 229 230 /* the hardware will not accept a 0 byte operation for this 231 * algorithm and the operation MUST be finalized to be correct. 232 * So if we happen to get an update that falls on a block sized 233 * boundary, we must save off the last block to finalize with 234 * later. */ 235 if (!leftover) { 236 to_process -= AES_BLOCK_SIZE; 237 leftover = AES_BLOCK_SIZE; 238 } 239 240 if (sctx->count) { 241 data_len = sctx->count; 242 in_sg = nx_build_sg_list(nx_ctx->in_sg, 243 (u8 *) sctx->buffer, 244 &data_len, 245 max_sg_len); 246 if (data_len != sctx->count) { 247 rc = -EINVAL; 248 goto out; 249 } 250 } 251 252 data_len = to_process - sctx->count; 253 in_sg = nx_build_sg_list(in_sg, 254 (u8 *) data, 255 &data_len, 256 max_sg_len); 257 258 if (data_len != to_process - sctx->count) { 259 rc = -EINVAL; 260 goto out; 261 } 262 263 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * 264 sizeof(struct nx_sg); 265 266 /* we've hit the nx chip previously and we're updating again, 267 * so copy over the partial digest */ 268 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 269 memcpy(csbcpb->cpb.aes_xcbc.cv, 270 csbcpb->cpb.aes_xcbc.out_cv_mac, 271 AES_BLOCK_SIZE); 272 } 273 274 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 275 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { 276 rc = -EINVAL; 277 goto out; 278 } 279 280 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0); 281 if (rc) 282 goto out; 283 284 atomic_inc(&(nx_ctx->stats->aes_ops)); 285 286 /* everything after the first update is continuation */ 287 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 288 289 total -= to_process; 290 data += to_process - sctx->count; 291 sctx->count = 0; 292 in_sg = nx_ctx->in_sg; 293 } while (leftover > AES_BLOCK_SIZE); 294 295 /* copy the leftover back into the state struct */ 296 memcpy(sctx->buffer, data, leftover); 297 sctx->count = leftover; 298 299 out: 300 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 301 return rc; 302 } 303 304 static int nx_xcbc_final(struct shash_desc *desc, u8 *out) 305 { 306 struct xcbc_state *sctx = shash_desc_ctx(desc); 307 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 308 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 309 struct nx_sg *in_sg, *out_sg; 310 unsigned long irq_flags; 311 int rc = 0; 312 int len; 313 314 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 315 316 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 317 /* we've hit the nx chip previously, now we're finalizing, 318 * so copy over the partial digest */ 319 memcpy(csbcpb->cpb.aes_xcbc.cv, 320 csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); 321 } else if (sctx->count == 0) { 322 /* 323 * we've never seen an update, so this is a 0 byte op. The 324 * hardware cannot handle a 0 byte op, so just ECB to 325 * generate the hash. 326 */ 327 rc = nx_xcbc_empty(desc, out); 328 goto out; 329 } 330 331 /* final is represented by continuing the operation and indicating that 332 * this is not an intermediate operation */ 333 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 334 335 len = sctx->count; 336 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, 337 &len, nx_ctx->ap->sglen); 338 339 if (len != sctx->count) { 340 rc = -EINVAL; 341 goto out; 342 } 343 344 len = AES_BLOCK_SIZE; 345 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, 346 nx_ctx->ap->sglen); 347 348 if (len != AES_BLOCK_SIZE) { 349 rc = -EINVAL; 350 goto out; 351 } 352 353 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 354 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 355 356 if (!nx_ctx->op.outlen) { 357 rc = -EINVAL; 358 goto out; 359 } 360 361 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0); 362 if (rc) 363 goto out; 364 365 atomic_inc(&(nx_ctx->stats->aes_ops)); 366 367 memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); 368 out: 369 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 370 return rc; 371 } 372 373 struct shash_alg nx_shash_aes_xcbc_alg = { 374 .digestsize = AES_BLOCK_SIZE, 375 .init = nx_xcbc_init, 376 .update = nx_xcbc_update, 377 .final = nx_xcbc_final, 378 .setkey = nx_xcbc_set_key, 379 .descsize = sizeof(struct xcbc_state), 380 .statesize = sizeof(struct xcbc_state), 381 .base = { 382 .cra_name = "xcbc(aes)", 383 .cra_driver_name = "xcbc-aes-nx", 384 .cra_priority = 300, 385 .cra_blocksize = AES_BLOCK_SIZE, 386 .cra_module = THIS_MODULE, 387 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 388 .cra_init = nx_crypto_ctx_aes_xcbc_init2, 389 .cra_exit = nx_crypto_ctx_exit, 390 } 391 }; 392