1 /** 2 * AES XCBC routines supporting the Power 7+ Nest Accelerators driver 3 * 4 * Copyright (C) 2011-2012 International Business Machines Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2 only. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * 19 * Author: Kent Yoder <yoder1@us.ibm.com> 20 */ 21 22 #include <crypto/internal/hash.h> 23 #include <crypto/aes.h> 24 #include <crypto/algapi.h> 25 #include <linux/module.h> 26 #include <linux/types.h> 27 #include <linux/crypto.h> 28 #include <asm/vio.h> 29 30 #include "nx_csbcpb.h" 31 #include "nx.h" 32 33 34 struct xcbc_state { 35 u8 state[AES_BLOCK_SIZE]; 36 unsigned int count; 37 u8 buffer[AES_BLOCK_SIZE]; 38 }; 39 40 static int nx_xcbc_set_key(struct crypto_shash *desc, 41 const u8 *in_key, 42 unsigned int key_len) 43 { 44 struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc); 45 46 switch (key_len) { 47 case AES_KEYSIZE_128: 48 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; 49 break; 50 default: 51 return -EINVAL; 52 } 53 54 memcpy(nx_ctx->priv.xcbc.key, in_key, key_len); 55 56 return 0; 57 } 58 59 /* 60 * Based on RFC 3566, for a zero-length message: 61 * 62 * n = 1 63 * K1 = E(K, 0x01010101010101010101010101010101) 64 * K3 = E(K, 0x03030303030303030303030303030303) 65 * E[0] = 0x00000000000000000000000000000000 66 * M[1] = 0x80000000000000000000000000000000 (0 length message with padding) 67 * E[1] = (K1, M[1] ^ E[0] ^ K3) 68 * Tag = M[1] 69 */ 70 static int nx_xcbc_empty(struct shash_desc *desc, u8 *out) 71 { 72 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 73 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 74 struct nx_sg *in_sg, *out_sg; 75 u8 keys[2][AES_BLOCK_SIZE]; 76 u8 key[32]; 77 int rc = 0; 78 79 /* Change to ECB mode */ 80 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; 81 memcpy(key, csbcpb->cpb.aes_xcbc.key, AES_BLOCK_SIZE); 82 memcpy(csbcpb->cpb.aes_ecb.key, key, AES_BLOCK_SIZE); 83 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 84 85 /* K1 and K3 base patterns */ 86 memset(keys[0], 0x01, sizeof(keys[0])); 87 memset(keys[1], 0x03, sizeof(keys[1])); 88 89 /* Generate K1 and K3 encrypting the patterns */ 90 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, sizeof(keys), 91 nx_ctx->ap->sglen); 92 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, sizeof(keys), 93 nx_ctx->ap->sglen); 94 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 95 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 96 97 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 98 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 99 if (rc) 100 goto out; 101 atomic_inc(&(nx_ctx->stats->aes_ops)); 102 103 /* XOr K3 with the padding for a 0 length message */ 104 keys[1][0] ^= 0x80; 105 106 /* Encrypt the final result */ 107 memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE); 108 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], sizeof(keys[1]), 109 nx_ctx->ap->sglen); 110 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE, 111 nx_ctx->ap->sglen); 112 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 113 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 114 115 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 116 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 117 if (rc) 118 goto out; 119 atomic_inc(&(nx_ctx->stats->aes_ops)); 120 121 out: 122 /* Restore XCBC mode */ 123 csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; 124 memcpy(csbcpb->cpb.aes_xcbc.key, key, AES_BLOCK_SIZE); 125 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 126 127 return rc; 128 } 129 130 static int nx_xcbc_init(struct shash_desc *desc) 131 { 132 struct xcbc_state *sctx = shash_desc_ctx(desc); 133 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 134 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 135 struct nx_sg *out_sg; 136 137 nx_ctx_init(nx_ctx, HCOP_FC_AES); 138 139 memset(sctx, 0, sizeof *sctx); 140 141 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); 142 csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; 143 144 memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE); 145 memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key); 146 147 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, 148 AES_BLOCK_SIZE, nx_ctx->ap->sglen); 149 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 150 151 return 0; 152 } 153 154 static int nx_xcbc_update(struct shash_desc *desc, 155 const u8 *data, 156 unsigned int len) 157 { 158 struct xcbc_state *sctx = shash_desc_ctx(desc); 159 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 160 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 161 struct nx_sg *in_sg; 162 u32 to_process, leftover, total; 163 u32 max_sg_len; 164 unsigned long irq_flags; 165 int rc = 0; 166 167 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 168 169 170 total = sctx->count + len; 171 172 /* 2 cases for total data len: 173 * 1: <= AES_BLOCK_SIZE: copy into state, return 0 174 * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover 175 */ 176 if (total <= AES_BLOCK_SIZE) { 177 memcpy(sctx->buffer + sctx->count, data, len); 178 sctx->count += len; 179 goto out; 180 } 181 182 in_sg = nx_ctx->in_sg; 183 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 184 nx_ctx->ap->sglen); 185 186 do { 187 188 /* to_process: the AES_BLOCK_SIZE data chunk to process in this 189 * update */ 190 to_process = min_t(u64, total, nx_ctx->ap->databytelen); 191 to_process = min_t(u64, to_process, 192 NX_PAGE_SIZE * (max_sg_len - 1)); 193 to_process = to_process & ~(AES_BLOCK_SIZE - 1); 194 leftover = total - to_process; 195 196 /* the hardware will not accept a 0 byte operation for this 197 * algorithm and the operation MUST be finalized to be correct. 198 * So if we happen to get an update that falls on a block sized 199 * boundary, we must save off the last block to finalize with 200 * later. */ 201 if (!leftover) { 202 to_process -= AES_BLOCK_SIZE; 203 leftover = AES_BLOCK_SIZE; 204 } 205 206 if (sctx->count) { 207 in_sg = nx_build_sg_list(nx_ctx->in_sg, 208 (u8 *) sctx->buffer, 209 sctx->count, 210 max_sg_len); 211 } 212 in_sg = nx_build_sg_list(in_sg, 213 (u8 *) data, 214 to_process - sctx->count, 215 max_sg_len); 216 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * 217 sizeof(struct nx_sg); 218 219 /* we've hit the nx chip previously and we're updating again, 220 * so copy over the partial digest */ 221 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 222 memcpy(csbcpb->cpb.aes_xcbc.cv, 223 csbcpb->cpb.aes_xcbc.out_cv_mac, 224 AES_BLOCK_SIZE); 225 } 226 227 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 228 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { 229 rc = -EINVAL; 230 goto out; 231 } 232 233 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 234 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 235 if (rc) 236 goto out; 237 238 atomic_inc(&(nx_ctx->stats->aes_ops)); 239 240 /* everything after the first update is continuation */ 241 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 242 243 total -= to_process; 244 data += to_process - sctx->count; 245 sctx->count = 0; 246 in_sg = nx_ctx->in_sg; 247 } while (leftover > AES_BLOCK_SIZE); 248 249 /* copy the leftover back into the state struct */ 250 memcpy(sctx->buffer, data, leftover); 251 sctx->count = leftover; 252 253 out: 254 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 255 return rc; 256 } 257 258 static int nx_xcbc_final(struct shash_desc *desc, u8 *out) 259 { 260 struct xcbc_state *sctx = shash_desc_ctx(desc); 261 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 262 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 263 struct nx_sg *in_sg, *out_sg; 264 unsigned long irq_flags; 265 int rc = 0; 266 267 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 268 269 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 270 /* we've hit the nx chip previously, now we're finalizing, 271 * so copy over the partial digest */ 272 memcpy(csbcpb->cpb.aes_xcbc.cv, 273 csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); 274 } else if (sctx->count == 0) { 275 /* 276 * we've never seen an update, so this is a 0 byte op. The 277 * hardware cannot handle a 0 byte op, so just ECB to 278 * generate the hash. 279 */ 280 rc = nx_xcbc_empty(desc, out); 281 goto out; 282 } 283 284 /* final is represented by continuing the operation and indicating that 285 * this is not an intermediate operation */ 286 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 287 288 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, 289 sctx->count, nx_ctx->ap->sglen); 290 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE, 291 nx_ctx->ap->sglen); 292 293 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 294 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 295 296 if (!nx_ctx->op.outlen) { 297 rc = -EINVAL; 298 goto out; 299 } 300 301 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 302 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 303 if (rc) 304 goto out; 305 306 atomic_inc(&(nx_ctx->stats->aes_ops)); 307 308 memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); 309 out: 310 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 311 return rc; 312 } 313 314 struct shash_alg nx_shash_aes_xcbc_alg = { 315 .digestsize = AES_BLOCK_SIZE, 316 .init = nx_xcbc_init, 317 .update = nx_xcbc_update, 318 .final = nx_xcbc_final, 319 .setkey = nx_xcbc_set_key, 320 .descsize = sizeof(struct xcbc_state), 321 .statesize = sizeof(struct xcbc_state), 322 .base = { 323 .cra_name = "xcbc(aes)", 324 .cra_driver_name = "xcbc-aes-nx", 325 .cra_priority = 300, 326 .cra_flags = CRYPTO_ALG_TYPE_SHASH, 327 .cra_blocksize = AES_BLOCK_SIZE, 328 .cra_module = THIS_MODULE, 329 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 330 .cra_init = nx_crypto_ctx_aes_xcbc_init, 331 .cra_exit = nx_crypto_ctx_exit, 332 } 333 }; 334