1 /** 2 * AES XCBC routines supporting the Power 7+ Nest Accelerators driver 3 * 4 * Copyright (C) 2011-2012 International Business Machines Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2 only. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * 19 * Author: Kent Yoder <yoder1@us.ibm.com> 20 */ 21 22 #include <crypto/internal/hash.h> 23 #include <crypto/aes.h> 24 #include <crypto/algapi.h> 25 #include <linux/module.h> 26 #include <linux/types.h> 27 #include <linux/crypto.h> 28 #include <asm/vio.h> 29 30 #include "nx_csbcpb.h" 31 #include "nx.h" 32 33 34 struct xcbc_state { 35 u8 state[AES_BLOCK_SIZE]; 36 unsigned int count; 37 u8 buffer[AES_BLOCK_SIZE]; 38 }; 39 40 static int nx_xcbc_set_key(struct crypto_shash *desc, 41 const u8 *in_key, 42 unsigned int key_len) 43 { 44 struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc); 45 46 switch (key_len) { 47 case AES_KEYSIZE_128: 48 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; 49 break; 50 default: 51 return -EINVAL; 52 } 53 54 memcpy(nx_ctx->priv.xcbc.key, in_key, key_len); 55 56 return 0; 57 } 58 59 /* 60 * Based on RFC 3566, for a zero-length message: 61 * 62 * n = 1 63 * K1 = E(K, 0x01010101010101010101010101010101) 64 * K3 = E(K, 0x03030303030303030303030303030303) 65 * E[0] = 0x00000000000000000000000000000000 66 * M[1] = 0x80000000000000000000000000000000 (0 length message with padding) 67 * E[1] = (K1, M[1] ^ E[0] ^ K3) 68 * Tag = M[1] 69 */ 70 static int nx_xcbc_empty(struct shash_desc *desc, u8 *out) 71 { 72 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 73 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 74 struct nx_sg *in_sg, *out_sg; 75 u8 keys[2][AES_BLOCK_SIZE]; 76 u8 key[32]; 77 int rc = 0; 78 int len; 79 80 /* Change to ECB mode */ 81 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; 82 memcpy(key, csbcpb->cpb.aes_xcbc.key, AES_BLOCK_SIZE); 83 memcpy(csbcpb->cpb.aes_ecb.key, key, AES_BLOCK_SIZE); 84 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 85 86 /* K1 and K3 base patterns */ 87 memset(keys[0], 0x01, sizeof(keys[0])); 88 memset(keys[1], 0x03, sizeof(keys[1])); 89 90 len = sizeof(keys); 91 /* Generate K1 and K3 encrypting the patterns */ 92 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, &len, 93 nx_ctx->ap->sglen); 94 95 if (len != sizeof(keys)) 96 return -EINVAL; 97 98 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, &len, 99 nx_ctx->ap->sglen); 100 101 if (len != sizeof(keys)) 102 return -EINVAL; 103 104 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 105 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 106 107 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 108 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 109 if (rc) 110 goto out; 111 atomic_inc(&(nx_ctx->stats->aes_ops)); 112 113 /* XOr K3 with the padding for a 0 length message */ 114 keys[1][0] ^= 0x80; 115 116 len = sizeof(keys[1]); 117 118 /* Encrypt the final result */ 119 memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE); 120 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], &len, 121 nx_ctx->ap->sglen); 122 123 if (len != sizeof(keys[1])) 124 return -EINVAL; 125 126 len = AES_BLOCK_SIZE; 127 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, 128 nx_ctx->ap->sglen); 129 130 if (len != AES_BLOCK_SIZE) 131 return -EINVAL; 132 133 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 134 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 135 136 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 137 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 138 if (rc) 139 goto out; 140 atomic_inc(&(nx_ctx->stats->aes_ops)); 141 142 out: 143 /* Restore XCBC mode */ 144 csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; 145 memcpy(csbcpb->cpb.aes_xcbc.key, key, AES_BLOCK_SIZE); 146 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 147 148 return rc; 149 } 150 151 static int nx_xcbc_init(struct shash_desc *desc) 152 { 153 struct xcbc_state *sctx = shash_desc_ctx(desc); 154 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 155 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 156 struct nx_sg *out_sg; 157 int len; 158 159 nx_ctx_init(nx_ctx, HCOP_FC_AES); 160 161 memset(sctx, 0, sizeof *sctx); 162 163 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); 164 csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; 165 166 memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE); 167 memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key); 168 169 len = AES_BLOCK_SIZE; 170 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, 171 &len, nx_ctx->ap->sglen); 172 173 if (len != AES_BLOCK_SIZE) 174 return -EINVAL; 175 176 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 177 178 return 0; 179 } 180 181 static int nx_xcbc_update(struct shash_desc *desc, 182 const u8 *data, 183 unsigned int len) 184 { 185 struct xcbc_state *sctx = shash_desc_ctx(desc); 186 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 187 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 188 struct nx_sg *in_sg; 189 u32 to_process = 0, leftover, total; 190 unsigned int max_sg_len; 191 unsigned long irq_flags; 192 int rc = 0; 193 int data_len; 194 195 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 196 197 198 total = sctx->count + len; 199 200 /* 2 cases for total data len: 201 * 1: <= AES_BLOCK_SIZE: copy into state, return 0 202 * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover 203 */ 204 if (total <= AES_BLOCK_SIZE) { 205 memcpy(sctx->buffer + sctx->count, data, len); 206 sctx->count += len; 207 goto out; 208 } 209 210 in_sg = nx_ctx->in_sg; 211 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 212 nx_ctx->ap->sglen); 213 max_sg_len = min_t(u64, max_sg_len, 214 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 215 216 do { 217 to_process = total - to_process; 218 to_process = to_process & ~(AES_BLOCK_SIZE - 1); 219 220 leftover = total - to_process; 221 222 /* the hardware will not accept a 0 byte operation for this 223 * algorithm and the operation MUST be finalized to be correct. 224 * So if we happen to get an update that falls on a block sized 225 * boundary, we must save off the last block to finalize with 226 * later. */ 227 if (!leftover) { 228 to_process -= AES_BLOCK_SIZE; 229 leftover = AES_BLOCK_SIZE; 230 } 231 232 if (sctx->count) { 233 data_len = sctx->count; 234 in_sg = nx_build_sg_list(nx_ctx->in_sg, 235 (u8 *) sctx->buffer, 236 &data_len, 237 max_sg_len); 238 if (data_len != sctx->count) 239 return -EINVAL; 240 } 241 242 data_len = to_process - sctx->count; 243 in_sg = nx_build_sg_list(in_sg, 244 (u8 *) data, 245 &data_len, 246 max_sg_len); 247 248 if (data_len != to_process - sctx->count) 249 return -EINVAL; 250 251 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * 252 sizeof(struct nx_sg); 253 254 /* we've hit the nx chip previously and we're updating again, 255 * so copy over the partial digest */ 256 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 257 memcpy(csbcpb->cpb.aes_xcbc.cv, 258 csbcpb->cpb.aes_xcbc.out_cv_mac, 259 AES_BLOCK_SIZE); 260 } 261 262 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 263 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { 264 rc = -EINVAL; 265 goto out; 266 } 267 268 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 269 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 270 if (rc) 271 goto out; 272 273 atomic_inc(&(nx_ctx->stats->aes_ops)); 274 275 /* everything after the first update is continuation */ 276 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 277 278 total -= to_process; 279 data += to_process - sctx->count; 280 sctx->count = 0; 281 in_sg = nx_ctx->in_sg; 282 } while (leftover > AES_BLOCK_SIZE); 283 284 /* copy the leftover back into the state struct */ 285 memcpy(sctx->buffer, data, leftover); 286 sctx->count = leftover; 287 288 out: 289 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 290 return rc; 291 } 292 293 static int nx_xcbc_final(struct shash_desc *desc, u8 *out) 294 { 295 struct xcbc_state *sctx = shash_desc_ctx(desc); 296 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 297 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 298 struct nx_sg *in_sg, *out_sg; 299 unsigned long irq_flags; 300 int rc = 0; 301 int len; 302 303 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 304 305 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 306 /* we've hit the nx chip previously, now we're finalizing, 307 * so copy over the partial digest */ 308 memcpy(csbcpb->cpb.aes_xcbc.cv, 309 csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); 310 } else if (sctx->count == 0) { 311 /* 312 * we've never seen an update, so this is a 0 byte op. The 313 * hardware cannot handle a 0 byte op, so just ECB to 314 * generate the hash. 315 */ 316 rc = nx_xcbc_empty(desc, out); 317 goto out; 318 } 319 320 /* final is represented by continuing the operation and indicating that 321 * this is not an intermediate operation */ 322 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 323 324 len = sctx->count; 325 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, 326 &len, nx_ctx->ap->sglen); 327 328 if (len != sctx->count) 329 return -EINVAL; 330 331 len = AES_BLOCK_SIZE; 332 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, 333 nx_ctx->ap->sglen); 334 335 if (len != AES_BLOCK_SIZE) 336 return -EINVAL; 337 338 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 339 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 340 341 if (!nx_ctx->op.outlen) { 342 rc = -EINVAL; 343 goto out; 344 } 345 346 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 347 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 348 if (rc) 349 goto out; 350 351 atomic_inc(&(nx_ctx->stats->aes_ops)); 352 353 memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); 354 out: 355 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 356 return rc; 357 } 358 359 struct shash_alg nx_shash_aes_xcbc_alg = { 360 .digestsize = AES_BLOCK_SIZE, 361 .init = nx_xcbc_init, 362 .update = nx_xcbc_update, 363 .final = nx_xcbc_final, 364 .setkey = nx_xcbc_set_key, 365 .descsize = sizeof(struct xcbc_state), 366 .statesize = sizeof(struct xcbc_state), 367 .base = { 368 .cra_name = "xcbc(aes)", 369 .cra_driver_name = "xcbc-aes-nx", 370 .cra_priority = 300, 371 .cra_flags = CRYPTO_ALG_TYPE_SHASH, 372 .cra_blocksize = AES_BLOCK_SIZE, 373 .cra_module = THIS_MODULE, 374 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 375 .cra_init = nx_crypto_ctx_aes_xcbc_init, 376 .cra_exit = nx_crypto_ctx_exit, 377 } 378 }; 379