1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/err.h> 7 #include <linux/interrupt.h> 8 #include <linux/types.h> 9 #include <crypto/scatterwalk.h> 10 #include <crypto/sha.h> 11 12 #include "cipher.h" 13 #include "common.h" 14 #include "core.h" 15 #include "regs-v5.h" 16 #include "sha.h" 17 18 #define QCE_SECTOR_SIZE 512 19 20 static inline u32 qce_read(struct qce_device *qce, u32 offset) 21 { 22 return readl(qce->base + offset); 23 } 24 25 static inline void qce_write(struct qce_device *qce, u32 offset, u32 val) 26 { 27 writel(val, qce->base + offset); 28 } 29 30 static inline void qce_write_array(struct qce_device *qce, u32 offset, 31 const u32 *val, unsigned int len) 32 { 33 int i; 34 35 for (i = 0; i < len; i++) 36 qce_write(qce, offset + i * sizeof(u32), val[i]); 37 } 38 39 static inline void 40 qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len) 41 { 42 int i; 43 44 for (i = 0; i < len; i++) 45 qce_write(qce, offset + i * sizeof(u32), 0); 46 } 47 48 static u32 qce_config_reg(struct qce_device *qce, int little) 49 { 50 u32 beats = (qce->burst_size >> 3) - 1; 51 u32 pipe_pair = qce->pipe_pair_id; 52 u32 config; 53 54 config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK; 55 config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) | 56 BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT); 57 config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK; 58 config &= ~HIGH_SPD_EN_N_SHIFT; 59 60 if (little) 61 config |= BIT(LITTLE_ENDIAN_MODE_SHIFT); 62 63 return config; 64 } 65 66 void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len) 67 { 68 __be32 *d = dst; 69 const u8 *s = src; 70 unsigned int n; 71 72 n = len / sizeof(u32); 73 for (; n > 0; n--) { 74 *d = cpu_to_be32p((const __u32 *) s); 75 s += sizeof(__u32); 76 d++; 77 } 78 } 79 80 static void qce_setup_config(struct qce_device *qce) 81 { 82 u32 config; 83 84 /* get big endianness */ 85 config = qce_config_reg(qce, 0); 86 87 /* clear status */ 88 qce_write(qce, REG_STATUS, 0); 89 qce_write(qce, REG_CONFIG, config); 90 } 91 92 static inline void qce_crypto_go(struct qce_device *qce) 93 { 94 qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT)); 95 } 96 97 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA 98 static u32 qce_auth_cfg(unsigned long flags, u32 key_size) 99 { 100 u32 cfg = 0; 101 102 if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags))) 103 cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT; 104 else 105 cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT; 106 107 if (IS_CCM(flags) || IS_CMAC(flags)) { 108 if (key_size == AES_KEYSIZE_128) 109 cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT; 110 else if (key_size == AES_KEYSIZE_256) 111 cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT; 112 } 113 114 if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) 115 cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT; 116 else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) 117 cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT; 118 else if (IS_CMAC(flags)) 119 cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT; 120 121 if (IS_SHA1(flags) || IS_SHA256(flags)) 122 cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT; 123 else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) || 124 IS_CBC(flags) || IS_CTR(flags)) 125 cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT; 126 else if (IS_AES(flags) && IS_CCM(flags)) 127 cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT; 128 else if (IS_AES(flags) && IS_CMAC(flags)) 129 cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT; 130 131 if (IS_SHA(flags) || IS_SHA_HMAC(flags)) 132 cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT; 133 134 if (IS_CCM(flags)) 135 cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT; 136 137 if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) || 138 IS_CMAC(flags)) 139 cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT); 140 141 return cfg; 142 } 143 144 static int qce_setup_regs_ahash(struct crypto_async_request *async_req, 145 u32 totallen, u32 offset) 146 { 147 struct ahash_request *req = ahash_request_cast(async_req); 148 struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm); 149 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); 150 struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); 151 struct qce_device *qce = tmpl->qce; 152 unsigned int digestsize = crypto_ahash_digestsize(ahash); 153 unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm); 154 __be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0}; 155 __be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0}; 156 u32 auth_cfg = 0, config; 157 unsigned int iv_words; 158 159 /* if not the last, the size has to be on the block boundary */ 160 if (!rctx->last_blk && req->nbytes % blocksize) 161 return -EINVAL; 162 163 qce_setup_config(qce); 164 165 if (IS_CMAC(rctx->flags)) { 166 qce_write(qce, REG_AUTH_SEG_CFG, 0); 167 qce_write(qce, REG_ENCR_SEG_CFG, 0); 168 qce_write(qce, REG_ENCR_SEG_SIZE, 0); 169 qce_clear_array(qce, REG_AUTH_IV0, 16); 170 qce_clear_array(qce, REG_AUTH_KEY0, 16); 171 qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); 172 173 auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen); 174 } 175 176 if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) { 177 u32 authkey_words = rctx->authklen / sizeof(u32); 178 179 qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen); 180 qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey, 181 authkey_words); 182 } 183 184 if (IS_CMAC(rctx->flags)) 185 goto go_proc; 186 187 if (rctx->first_blk) 188 memcpy(auth, rctx->digest, digestsize); 189 else 190 qce_cpu_to_be32p_array(auth, rctx->digest, digestsize); 191 192 iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8; 193 qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words); 194 195 if (rctx->first_blk) 196 qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); 197 else 198 qce_write_array(qce, REG_AUTH_BYTECNT0, 199 (u32 *)rctx->byte_count, 2); 200 201 auth_cfg = qce_auth_cfg(rctx->flags, 0); 202 203 if (rctx->last_blk) 204 auth_cfg |= BIT(AUTH_LAST_SHIFT); 205 else 206 auth_cfg &= ~BIT(AUTH_LAST_SHIFT); 207 208 if (rctx->first_blk) 209 auth_cfg |= BIT(AUTH_FIRST_SHIFT); 210 else 211 auth_cfg &= ~BIT(AUTH_FIRST_SHIFT); 212 213 go_proc: 214 qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); 215 qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes); 216 qce_write(qce, REG_AUTH_SEG_START, 0); 217 qce_write(qce, REG_ENCR_SEG_CFG, 0); 218 qce_write(qce, REG_SEG_SIZE, req->nbytes); 219 220 /* get little endianness */ 221 config = qce_config_reg(qce, 1); 222 qce_write(qce, REG_CONFIG, config); 223 224 qce_crypto_go(qce); 225 226 return 0; 227 } 228 #endif 229 230 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER 231 static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size) 232 { 233 u32 cfg = 0; 234 235 if (IS_AES(flags)) { 236 if (aes_key_size == AES_KEYSIZE_128) 237 cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT; 238 else if (aes_key_size == AES_KEYSIZE_256) 239 cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT; 240 } 241 242 if (IS_AES(flags)) 243 cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT; 244 else if (IS_DES(flags) || IS_3DES(flags)) 245 cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT; 246 247 if (IS_DES(flags)) 248 cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT; 249 250 if (IS_3DES(flags)) 251 cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT; 252 253 switch (flags & QCE_MODE_MASK) { 254 case QCE_MODE_ECB: 255 cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT; 256 break; 257 case QCE_MODE_CBC: 258 cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT; 259 break; 260 case QCE_MODE_CTR: 261 cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT; 262 break; 263 case QCE_MODE_XTS: 264 cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT; 265 break; 266 case QCE_MODE_CCM: 267 cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT; 268 cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT; 269 break; 270 default: 271 return ~0; 272 } 273 274 return cfg; 275 } 276 277 static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize) 278 { 279 u8 swap[QCE_AES_IV_LENGTH]; 280 u32 i, j; 281 282 if (ivsize > QCE_AES_IV_LENGTH) 283 return; 284 285 memset(swap, 0, QCE_AES_IV_LENGTH); 286 287 for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1; 288 i < QCE_AES_IV_LENGTH; i++, j--) 289 swap[i] = src[j]; 290 291 qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH); 292 } 293 294 static void qce_xtskey(struct qce_device *qce, const u8 *enckey, 295 unsigned int enckeylen, unsigned int cryptlen) 296 { 297 u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; 298 unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); 299 unsigned int xtsdusize; 300 301 qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, 302 enckeylen / 2); 303 qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); 304 305 /* xts du size 512B */ 306 xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen); 307 qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize); 308 } 309 310 static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, 311 u32 totallen, u32 offset) 312 { 313 struct skcipher_request *req = skcipher_request_cast(async_req); 314 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); 315 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm); 316 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); 317 struct qce_device *qce = tmpl->qce; 318 __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0}; 319 __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0}; 320 unsigned int enckey_words, enciv_words; 321 unsigned int keylen; 322 u32 encr_cfg = 0, auth_cfg = 0, config; 323 unsigned int ivsize = rctx->ivsize; 324 unsigned long flags = rctx->flags; 325 326 qce_setup_config(qce); 327 328 if (IS_XTS(flags)) 329 keylen = ctx->enc_keylen / 2; 330 else 331 keylen = ctx->enc_keylen; 332 333 qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen); 334 enckey_words = keylen / sizeof(u32); 335 336 qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); 337 338 encr_cfg = qce_encr_cfg(flags, keylen); 339 340 if (IS_DES(flags)) { 341 enciv_words = 2; 342 enckey_words = 2; 343 } else if (IS_3DES(flags)) { 344 enciv_words = 2; 345 enckey_words = 6; 346 } else if (IS_AES(flags)) { 347 if (IS_XTS(flags)) 348 qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen, 349 rctx->cryptlen); 350 enciv_words = 4; 351 } else { 352 return -EINVAL; 353 } 354 355 qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words); 356 357 if (!IS_ECB(flags)) { 358 if (IS_XTS(flags)) 359 qce_xts_swapiv(enciv, rctx->iv, ivsize); 360 else 361 qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize); 362 363 qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words); 364 } 365 366 if (IS_ENCRYPT(flags)) 367 encr_cfg |= BIT(ENCODE_SHIFT); 368 369 qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg); 370 qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen); 371 qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff); 372 373 if (IS_CTR(flags)) { 374 qce_write(qce, REG_CNTR_MASK, ~0); 375 qce_write(qce, REG_CNTR_MASK0, ~0); 376 qce_write(qce, REG_CNTR_MASK1, ~0); 377 qce_write(qce, REG_CNTR_MASK2, ~0); 378 } 379 380 qce_write(qce, REG_SEG_SIZE, totallen); 381 382 /* get little endianness */ 383 config = qce_config_reg(qce, 1); 384 qce_write(qce, REG_CONFIG, config); 385 386 qce_crypto_go(qce); 387 388 return 0; 389 } 390 #endif 391 392 int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, 393 u32 offset) 394 { 395 switch (type) { 396 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER 397 case CRYPTO_ALG_TYPE_SKCIPHER: 398 return qce_setup_regs_skcipher(async_req, totallen, offset); 399 #endif 400 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA 401 case CRYPTO_ALG_TYPE_AHASH: 402 return qce_setup_regs_ahash(async_req, totallen, offset); 403 #endif 404 default: 405 return -EINVAL; 406 } 407 } 408 409 #define STATUS_ERRORS \ 410 (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT)) 411 412 int qce_check_status(struct qce_device *qce, u32 *status) 413 { 414 int ret = 0; 415 416 *status = qce_read(qce, REG_STATUS); 417 418 /* 419 * Don't use result dump status. The operation may not be complete. 420 * Instead, use the status we just read from device. In case, we need to 421 * use result_status from result dump the result_status needs to be byte 422 * swapped, since we set the device to little endian. 423 */ 424 if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT))) 425 ret = -ENXIO; 426 427 return ret; 428 } 429 430 void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step) 431 { 432 u32 val; 433 434 val = qce_read(qce, REG_VERSION); 435 *major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT; 436 *minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT; 437 *step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT; 438 } 439