/openbmc/qemu/crypto/ |
H A D | aes.c | 1016 const AESState *rk, bool swap) in aesenc_SB_SR_AK_swap() argument 1043 ret->v = t.v ^ rk->v; in aesenc_SB_SR_AK_swap() 1061 const AESState *rk, bool swap) in aesenc_SB_SR_MC_AK_swap() argument 1096 r->w[swap_w ^ 0] = rk->w[swap_w ^ 0] ^ w0; in aesenc_SB_SR_MC_AK_swap() 1097 r->w[swap_w ^ 1] = rk->w[swap_w ^ 1] ^ w1; in aesenc_SB_SR_MC_AK_swap() 1098 r->w[swap_w ^ 2] = rk->w[swap_w ^ 2] ^ w2; in aesenc_SB_SR_MC_AK_swap() 1099 r->w[swap_w ^ 3] = rk->w[swap_w ^ 3] ^ w3; in aesenc_SB_SR_MC_AK_swap() 1103 const AESState *rk) in aesenc_SB_SR_MC_AK_gen() argument 1105 aesenc_SB_SR_MC_AK_swap(r, st, rk, false); in aesenc_SB_SR_MC_AK_gen() 1109 const AESState *rk) in aesenc_SB_SR_MC_AK_genrev() argument [all …]
|
/openbmc/linux/crypto/ |
H A D | sm4.c | 104 static inline u32 sm4_round(u32 x0, u32 x1, u32 x2, u32 x3, u32 rk) in sm4_round() argument 106 return x0 ^ sm4_enc_sub(x1 ^ x2 ^ x3 ^ rk); in sm4_round() 122 u32 rk[4]; in sm4_expandkey() local 129 rk[0] = get_unaligned_be32(&key[0]) ^ fk[0]; in sm4_expandkey() 130 rk[1] = get_unaligned_be32(&key[1]) ^ fk[1]; in sm4_expandkey() 131 rk[2] = get_unaligned_be32(&key[2]) ^ fk[2]; in sm4_expandkey() 132 rk[3] = get_unaligned_be32(&key[3]) ^ fk[3]; in sm4_expandkey() 135 rk[0] ^= sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i + 0]); in sm4_expandkey() 136 rk[1] ^= sm4_key_sub(rk[2] ^ rk[3] ^ rk[0] ^ ck[i + 1]); in sm4_expandkey() 137 rk[2] ^= sm4_key_sub(rk[3] ^ rk[0] ^ rk[1] ^ ck[i + 2]); in sm4_expandkey() [all …]
|
/openbmc/qemu/include/crypto/ |
H A D | aes-round.h | 46 const AESState *rk); 48 const AESState *rk); 51 const AESState *rk, bool be) in aesenc_SB_SR_AK() argument 54 aesenc_SB_SR_AK_accel(r, st, rk, be); in aesenc_SB_SR_AK() 56 aesenc_SB_SR_AK_gen(r, st, rk); in aesenc_SB_SR_AK() 58 aesenc_SB_SR_AK_genrev(r, st, rk); in aesenc_SB_SR_AK() 67 const AESState *rk); 69 const AESState *rk); 72 const AESState *rk, bool be) in aesenc_SB_SR_MC_AK() argument 75 aesenc_SB_SR_MC_AK_accel(r, st, rk, be); in aesenc_SB_SR_MC_AK() [all …]
|
/openbmc/u-boot/arch/x86/cpu/quark/ |
H A D | smc.c | 256 uint8_t rk; /* rank counter */ in ddrphy_init() local 841 for (rk = 0; rk < NUM_RANKS; rk++) { in ddrphy_init() 842 if (mrc_params->rank_enables & (1 << rk)) { in ddrphy_init() 843 set_wclk(ch, rk, ddr_wclk[PLATFORM_ID]); in ddrphy_init() 845 set_wctl(ch, rk, ddr_wctl[PLATFORM_ID]); in ddrphy_init() 847 set_wctl(ch, rk, ddr_wclk[PLATFORM_ID] + HALF_CLK); in ddrphy_init() 1353 uint8_t ch, rk, bl; in restore_timings() local 1357 for (rk = 0; rk < NUM_RANKS; rk++) { in restore_timings() 1359 set_rcvn(ch, rk, bl, mt->rcvn[ch][rk][bl]); in restore_timings() 1360 set_rdqs(ch, rk, bl, mt->rdqs[ch][rk][bl]); in restore_timings() [all …]
|
/openbmc/linux/arch/arm64/crypto/ |
H A D | aes-neonbs-glue.c | 26 asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds); 28 asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], 30 asmlinkage void aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], 33 asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], 36 asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], 39 asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], 41 asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], 45 asmlinkage void neon_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[], 47 asmlinkage void neon_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[], 49 asmlinkage void neon_aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[], [all …]
|
H A D | aes-ce.S | 28 .macro load_round_keys, rounds, rk argument 32 ld1 {v17.4s-v18.4s}, [\rk], #32 33 1111: ld1 {v19.4s-v20.4s}, [\rk], #32 34 2222: ld1 {v21.4s-v24.4s}, [\rk], #64 35 ld1 {v25.4s-v28.4s}, [\rk], #64 36 ld1 {v29.4s-v31.4s}, [\rk] 40 .macro enc_prepare, rounds, rk, temp 41 mov \temp, \rk 46 .macro enc_switch_key, rounds, rk, temp 47 mov \temp, \rk [all …]
|
H A D | aes-neon.S | 98 .macro do_block, enc, in, rounds, rk, rkp, i 99 ld1 {v15.4s}, [\rk] 100 add \rkp, \rk, #16 114 .macro encrypt_block, in, rounds, rk, rkp, i 115 do_block 1, \in, \rounds, \rk, \rkp, \i 118 .macro decrypt_block, in, rounds, rk, rkp, i 119 do_block 0, \in, \rounds, \rk, \rkp, \i 205 .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i 206 ld1 {v15.4s}, [\rk] 207 add \rkp, \rk, #16 [all …]
|
H A D | aes-cipher-core.S | 14 rk .req x0 57 ldp \out0, \out1, [rk], #8 87 ldp w8, w9, [rk], #16 88 ldp w10, w11, [rk, #-8]
|
H A D | aes-cipher-glue.c | 12 asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); 13 asmlinkage void __aes_arm64_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
|
/openbmc/qemu/target/riscv/ |
H A D | vcrypto_helper.c | 320 uint32_t rk[8], tmp; local 326 rk[0] = vs2[i * 4 + H4(0)]; 327 rk[1] = vs2[i * 4 + H4(1)]; 328 rk[2] = vs2[i * 4 + H4(2)]; 329 rk[3] = vs2[i * 4 + H4(3)]; 330 tmp = ror32(rk[3], 8); 332 rk[4] = rk[0] ^ (((uint32_t)AES_sbox[(tmp >> 24) & 0xff] << 24) | 337 rk[5] = rk[1] ^ rk[4]; 338 rk[6] = rk[2] ^ rk[5]; 339 rk[7] = rk[3] ^ rk[6]; [all …]
|
/openbmc/linux/arch/arm/crypto/ |
H A D | aes-neonbs-glue.c | 29 asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds); 31 asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], 33 asmlinkage void aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], 36 asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], 39 asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], 42 asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], 44 asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], 49 u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32] __aligned(AES_BLOCK_SIZE); 72 struct crypto_aes_ctx rk; in aesbs_setkey() local 75 err = aes_expandkey(&rk, in_key, key_len); in aesbs_setkey() [all …]
|
H A D | sha2-ce-core.S | 18 rk .req r3 35 vld1.32 {k\ev}, [rk, :128]! 90 adr rk, .Lsha256_rcon 91 vld1.32 {k0}, [rk, :128]!
|
/openbmc/qemu/host/include/i386/host/crypto/ |
H A D | aes-round.h | 47 const AESState *rk, bool be) in aesenc_SB_SR_AK_accel() argument 50 __m128i k = (__m128i)rk->v; in aesenc_SB_SR_AK_accel() 65 const AESState *rk, bool be) in aesenc_SB_SR_MC_AK_accel() argument 68 __m128i k = (__m128i)rk->v; in aesenc_SB_SR_MC_AK_accel() 98 const AESState *rk, bool be) in aesdec_ISB_ISR_AK_accel() argument 101 __m128i k = (__m128i)rk->v; in aesdec_ISB_ISR_AK_accel() 116 const AESState *rk, bool be) in aesdec_ISB_ISR_AK_IMC_accel() argument 119 __m128i k = (__m128i)rk->v; in aesdec_ISB_ISR_AK_IMC_accel() 136 const AESState *rk, bool be) in aesdec_ISB_ISR_IMC_AK_accel() argument 139 __m128i k = (__m128i)rk->v; in aesdec_ISB_ISR_IMC_AK_accel()
|
/openbmc/qemu/host/include/ppc/host/crypto/ |
H A D | aes-round.h | 109 const AESState *rk, bool be) in aesenc_SB_SR_AK_accel() argument 114 k = aes_accel_ld(rk, be); in aesenc_SB_SR_AK_accel() 121 const AESState *rk, bool be) in aesenc_SB_SR_MC_AK_accel() argument 126 k = aes_accel_ld(rk, be); in aesenc_SB_SR_MC_AK_accel() 144 const AESState *rk, bool be) in aesdec_ISB_ISR_AK_accel() argument 149 k = aes_accel_ld(rk, be); in aesdec_ISB_ISR_AK_accel() 156 const AESState *rk, bool be) in aesdec_ISB_ISR_AK_IMC_accel() argument 161 k = aes_accel_ld(rk, be); in aesdec_ISB_ISR_AK_IMC_accel() 168 const AESState *rk, bool be) in aesdec_ISB_ISR_IMC_AK_accel() argument 173 k = aes_accel_ld(rk, be); in aesdec_ISB_ISR_IMC_AK_accel()
|
/openbmc/qemu/host/include/aarch64/host/crypto/ |
H A D | aes-round.h | 101 const AESState *rk, bool be) in aesenc_SB_SR_AK_accel() argument 113 ret->v = (AESStateVec)t ^ rk->v; in aesenc_SB_SR_AK_accel() 118 const AESState *rk, bool be) in aesenc_SB_SR_MC_AK_accel() argument 130 ret->v = (AESStateVec)t ^ rk->v; in aesenc_SB_SR_MC_AK_accel() 150 const AESState *rk, bool be) in aesdec_ISB_ISR_AK_accel() argument 162 ret->v = (AESStateVec)t ^ rk->v; in aesdec_ISB_ISR_AK_accel() 167 const AESState *rk, bool be) in aesdec_ISB_ISR_AK_IMC_accel() argument 170 uint8x16_t k = (uint8x16_t)rk->v; in aesdec_ISB_ISR_AK_IMC_accel() 190 const AESState *rk, bool be) in aesdec_ISB_ISR_IMC_AK_accel() argument 202 ret->v = (AESStateVec)t ^ rk->v; in aesdec_ISB_ISR_IMC_AK_accel()
|
/openbmc/linux/include/crypto/ |
H A D | aria.h | 407 static inline void aria_add_round_key(u32 *rk, u32 *t0, u32 *t1, u32 *t2, in aria_add_round_key() argument 410 *t0 ^= rk[0]; in aria_add_round_key() 411 *t1 ^= rk[1]; in aria_add_round_key() 412 *t2 ^= rk[2]; in aria_add_round_key() 413 *t3 ^= rk[3]; in aria_add_round_key() 434 static inline void aria_gsrk(u32 *rk, u32 *x, u32 *y, u32 n) in aria_gsrk() argument 439 rk[0] = (x[0]) ^ in aria_gsrk() 442 rk[1] = (x[1]) ^ in aria_gsrk() 445 rk[2] = (x[2]) ^ in aria_gsrk() 448 rk[3] = (x[3]) ^ in aria_gsrk()
|
/openbmc/qemu/tests/tcg/loongarch64/ |
H A D | test_div.c | 7 uint ## M ## _t rk, \ 14 : "r"(rj), "r"(rk) \ 21 uint ## M ## _t rk, \ 28 : "r"(rj), "r"(rk) \
|
/openbmc/qemu/target/loongarch/tcg/insn_trans/ |
H A D | trans_fmemory.c.inc | 46 TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); 63 TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); 78 TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); 96 TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); 112 TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); 130 TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE);
|
H A D | trans_memory.c.inc | 33 TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); 46 TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); 58 TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); 72 TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); 86 TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); 99 TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE);
|
/openbmc/qemu/target/loongarch/tcg/ |
H A D | op_helper.c | 48 void helper_asrtle_d(CPULoongArchState *env, target_ulong rj, target_ulong rk) in helper_asrtle_d() argument 50 if (rj > rk) { in helper_asrtle_d() 56 void helper_asrtgt_d(CPULoongArchState *env, target_ulong rj, target_ulong rk) in helper_asrtgt_d() argument 58 if (rj <= rk) { in helper_asrtgt_d()
|
/openbmc/linux/arch/x86/crypto/ |
H A D | aria-gfni-avx512-asm_64.S | 272 t0, rk, round) \ 274 vpbroadcastb ((round * 16) + 3)(rk), t0; \ 276 vpbroadcastb ((round * 16) + 2)(rk), t0; \ 278 vpbroadcastb ((round * 16) + 1)(rk), t0; \ 280 vpbroadcastb ((round * 16) + 0)(rk), t0; \ 282 vpbroadcastb ((round * 16) + 7)(rk), t0; \ 284 vpbroadcastb ((round * 16) + 6)(rk), t0; \ 286 vpbroadcastb ((round * 16) + 5)(rk), t0; \ 288 vpbroadcastb ((round * 16) + 4)(rk), t0; \ 290 vpbroadcastb ((round * 16) + 11)(rk), t0; \ [all …]
|
H A D | aria-aesni-avx2-asm_64.S | 286 t0, rk, idx, round) \ 288 vpbroadcastb ((round * 16) + idx + 3)(rk), t0; \ 290 vpbroadcastb ((round * 16) + idx + 2)(rk), t0; \ 292 vpbroadcastb ((round * 16) + idx + 1)(rk), t0; \ 294 vpbroadcastb ((round * 16) + idx + 0)(rk), t0; \ 296 vpbroadcastb ((round * 16) + idx + 7)(rk), t0; \ 298 vpbroadcastb ((round * 16) + idx + 6)(rk), t0; \ 300 vpbroadcastb ((round * 16) + idx + 5)(rk), t0; \ 302 vpbroadcastb ((round * 16) + idx + 4)(rk), t0; \ 464 mem_tmp, rk, round) \ argument [all …]
|
H A D | sm4_aesni_avx2_glue.c | 22 asmlinkage void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst, 24 asmlinkage void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst, 26 asmlinkage void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
|
H A D | aria-aesni-avx-asm_64.S | 270 t0, t1, t2, rk, \ 273 vbroadcastss ((round * 16) + idx + 0)(rk), t0; \ 285 vbroadcastss ((round * 16) + idx + 4)(rk), t0; \ 423 mem_tmp, rk, round) \ argument 426 y0, y7, y2, rk, 8, round); \ 441 y0, y7, y2, rk, 0, round); \ 478 mem_tmp, rk, round) \ argument 481 y0, y7, y2, rk, 8, round); \ 496 y0, y7, y2, rk, 0, round); \ 533 mem_tmp, rk, round, last_round) \ argument [all …]
|
/openbmc/linux/arch/loongarch/include/asm/ |
H A D | inst.h | 324 unsigned int rk : 5; member 331 unsigned int rk : 5; member 487 u32 larch_insn_gen_or(enum loongarch_gpr rd, enum loongarch_gpr rj, enum loongarch_gpr rk); 690 enum loongarch_gpr rk) \ 695 insn->reg3_format.rk = rk; \ 735 enum loongarch_gpr rk, \ 742 insn->reg3sa2_format.rk = rk; \
|