/openbmc/qemu/target/riscv/ |
H A D | bitmanip_helper.c | 69 dup_const(MO_8, 0x44), 70 dup_const(MO_8, 0x30), 71 dup_const(MO_16, 0x0f00), 72 dup_const(MO_32, 0xff0000)
|
/openbmc/qemu/tcg/ |
H A D | tcg-op-gvec.c | 385 uint64_t (dup_const)(unsigned vece, uint64_t c) in uint64_t() argument 551 in_c = dup_const(vece, in_c); in do_dup() 555 } else if (in_c == dup_const(MO_8, in_c)) { in do_dup() 1874 TCGv_i64 m = tcg_constant_i64(dup_const(MO_8, 0x80)); in tcg_gen_vec_add8_i64() 1880 TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80)); in tcg_gen_vec_add8_i32() 1899 TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000)); in tcg_gen_vec_add16_i64() 2057 TCGv_i64 m = tcg_constant_i64(dup_const(MO_8, 0x80)); in tcg_gen_vec_sub8_i64() 2063 TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80)); in tcg_gen_vec_sub8_i32() 2082 TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000)); in tcg_gen_vec_sub16_i64() 2478 TCGv_i64 m = tcg_constant_i64(dup_const(MO_8, 0x80)); in tcg_gen_vec_neg8_i64() [all …]
|
H A D | tcg.c | 1903 val = dup_const(vece, val); in tcg_constant_vec() 4451 if (val == dup_const(MO_8, val)) { in temp_load() 4453 } else if (val == dup_const(MO_16, val)) { in temp_load() 4455 } else if (val == dup_const(MO_32, val)) { in temp_load() 5244 if (val == dup_const(MO_8, val)) { in tcg_reg_alloc_dup2() 5246 } else if (val == dup_const(MO_16, val)) { in tcg_reg_alloc_dup2() 5248 } else if (val == dup_const(MO_32, val)) { in tcg_reg_alloc_dup2()
|
H A D | optimize.c | 1655 t = dup_const(TCGOP_VECE(op), t); in fold_dup()
|
/openbmc/qemu/target/arm/tcg/ |
H A D | gengvec.c | 277 tcg_gen_andi_i64(t, t, dup_const(MO_8, 1)); in gen_srshr8_i64() 287 tcg_gen_andi_i64(t, t, dup_const(MO_16, 1)); in gen_srshr16_i64() 471 tcg_gen_andi_i64(t, t, dup_const(MO_8, 1)); in gen_urshr8_i64() 481 tcg_gen_andi_i64(t, t, dup_const(MO_16, 1)); in gen_urshr16_i64() 670 uint64_t mask = dup_const(MO_8, 0xff >> shift); in gen_shr8_ins_i64() 681 uint64_t mask = dup_const(MO_16, 0xffff >> shift); in gen_shr16_ins_i64() 760 uint64_t mask = dup_const(MO_8, 0xff << shift); in gen_shl8_ins_i64() 771 uint64_t mask = dup_const(MO_16, 0xffff << shift); in gen_shl16_ins_i64() 1937 tcg_gen_andi_i64(t, t, dup_const(MO_8, 1)); in gen_shadd8_i64() 1949 tcg_gen_andi_i64(t, t, dup_const(MO_16, 1)); in gen_shadd16_i64() [all …]
|
H A D | gengvec64.c | 54 uint64_t mask = dup_const(MO_8, 0xff >> sh); in gen_xar8_i64() 67 uint64_t mask = dup_const(MO_16, 0xffff >> sh); in gen_xar16_i64()
|
H A D | sve_helper.c | 2677 mm = dup_const(MO_8, mm); in HELPER() 2692 mm = dup_const(MO_16, mm); in HELPER() 2707 mm = dup_const(MO_32, mm); in HELPER() 2734 val = dup_const(MO_8, val); in HELPER() 2746 val = dup_const(MO_16, val); in HELPER() 2758 val = dup_const(MO_32, val); in HELPER() 7165 uint64_t ones = dup_const(esz, 1); in do_match2() 7169 cmp1 = dup_const(esz, n); in do_match2() 7297 const uint64_t mask = dup_const(MO_8, 0x7f); in do_histseg_cnt() 7300 cmp1 = dup_const(MO_8, n); in do_histseg_cnt() [all …]
|
H A D | mve_helper.c | 826 #define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff)) 827 #define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff)) 838 #define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000)) 839 #define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000)) 2087 shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \
|
H A D | translate-sve.c | 6107 mask = dup_const(vece, mask); in gen_ushll_i64() 6515 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits)); in gen_shrnb_i64() 6568 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits)); in TRANS_FEAT()
|
H A D | translate-neon.c | 1321 widen_mask = dup_const(a->size + 1, widen_mask); in DO_2SN_64()
|
H A D | translate.c | 149 return dup_const(MO_32, imm); in asimd_imm_const()
|
/openbmc/qemu/include/tcg/ |
H A D | tcg.h | 1035 uint64_t dup_const(unsigned vece, uint64_t c); 1037 #define dup_const(VECE, C) \ macro 1044 : dup_const(VECE, C))
|
H A D | tcg-op.h | 282 #define dup_const_tl dup_const 407 : (target_long)dup_const(VECE, C))
|
/openbmc/qemu/target/s390x/tcg/ |
H A D | vec_string_helper.c | 75 return dup_const(es, get_single_element_lsbs_mask(es)); in get_element_lsbs_mask()
|
H A D | translate_vx.c.inc | 1301 TCGv_i64 msb_mask = tcg_constant_i64(dup_const(es, 1ull << msb_bit_nr));
|
/openbmc/qemu/target/mips/tcg/ |
H A D | msa_translate.c | 201 uint64_t eval_zero_or_big = dup_const(df, 1); in gen_check_zero_element()
|
H A D | nanomips_translate.c.inc | 3536 tcg_gen_movi_tl(cpu_gpr[rt], dup_const(MO_16, imm));
|
/openbmc/qemu/target/ppc/translate/ |
H A D | vmx-impl.c.inc | 1986 mask = dup_const(vece, 1ULL << (elem_width - 1)); 2103 c = dup_const(vece, 1); 2139 const uint64_t mask = dup_const(MO_8, 1); 2168 mask = tcg_constant_i64(dup_const(vece, 1ULL << ((8 << vece) - 1)));
|
H A D | vsx-impl.c.inc | 1758 mask = tcg_constant_i64(dup_const(MO_8, 1));
|
/openbmc/qemu/target/hppa/ |
H A D | translate.c | 1447 test_cb = dup_const(MO_8, 0x88); in do_unit_addsub() 1451 test_cb = dup_const(MO_32, INT32_MIN); in do_unit_addsub() 1457 test_cb = dup_const(MO_8, INT8_MIN); in do_unit_addsub() 1460 test_cb = dup_const(MO_16, INT16_MIN); in do_unit_addsub()
|
/openbmc/qemu/target/loongarch/tcg/ |
H A D | vec_helper.c | 3118 uint64_t ones = dup_const(esz, 1); in do_match2() 3122 cmp1 = dup_const(esz, n); in do_match2()
|
/openbmc/qemu/target/loongarch/tcg/insn_trans/ |
H A D | trans_vec.c.inc | 3632 tcg_gen_movi_i64(t, dup_const(MO_8, imm));
|