1 /* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 /* 26 * DEF(name, oargs, iargs, cargs, flags) 27 */ 28 29 /* predefined ops */ 30 DEF(discard, 1, 0, 0, TCG_OPF_NOT_PRESENT) 31 DEF(set_label, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT) 32 33 /* variable number of parameters */ 34 DEF(call, 0, 0, 3, TCG_OPF_CALL_CLOBBER | TCG_OPF_NOT_PRESENT) 35 36 DEF(br, 0, 0, 1, TCG_OPF_BB_END) 37 38 #define IMPL(X) (__builtin_constant_p(X) && (X) <= 0 ? TCG_OPF_NOT_PRESENT : 0) 39 #if TCG_TARGET_REG_BITS == 32 40 # define IMPL64 TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT 41 #else 42 # define IMPL64 TCG_OPF_64BIT 43 #endif 44 45 DEF(mb, 0, 0, 1, 0) 46 47 DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT) 48 DEF(setcond_i32, 1, 2, 1, 0) 49 DEF(movcond_i32, 1, 4, 1, IMPL(TCG_TARGET_HAS_movcond_i32)) 50 /* load/store */ 51 DEF(ld8u_i32, 1, 1, 1, 0) 52 DEF(ld8s_i32, 1, 1, 1, 0) 53 DEF(ld16u_i32, 1, 1, 1, 0) 54 DEF(ld16s_i32, 1, 1, 1, 0) 55 DEF(ld_i32, 1, 1, 1, 0) 56 DEF(st8_i32, 0, 2, 1, 0) 57 DEF(st16_i32, 0, 2, 1, 0) 58 DEF(st_i32, 0, 2, 1, 0) 59 /* arith */ 60 DEF(add_i32, 1, 2, 0, 0) 61 DEF(sub_i32, 1, 2, 0, 0) 62 DEF(mul_i32, 1, 2, 0, 0) 63 DEF(div_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32)) 64 DEF(divu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32)) 65 DEF(rem_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32)) 66 DEF(remu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32)) 67 DEF(div2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32)) 68 DEF(divu2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32)) 69 DEF(and_i32, 1, 2, 0, 0) 70 DEF(or_i32, 1, 2, 0, 0) 71 DEF(xor_i32, 1, 2, 0, 0) 72 /* shifts/rotates */ 73 DEF(shl_i32, 1, 2, 0, 0) 74 DEF(shr_i32, 1, 2, 0, 0) 75 DEF(sar_i32, 1, 2, 0, 0) 76 DEF(rotl_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32)) 77 DEF(rotr_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32)) 78 DEF(deposit_i32, 1, 2, 2, IMPL(TCG_TARGET_HAS_deposit_i32)) 79 DEF(extract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_extract_i32)) 80 DEF(sextract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_sextract_i32)) 81 DEF(extract2_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_extract2_i32)) 82 83 DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH) 84 85 DEF(add2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_add2_i32)) 86 DEF(sub2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_sub2_i32)) 87 DEF(mulu2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_mulu2_i32)) 88 DEF(muls2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_muls2_i32)) 89 DEF(muluh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_muluh_i32)) 90 DEF(mulsh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_mulsh_i32)) 91 DEF(brcond2_i32, 0, 4, 2, 92 TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | IMPL(TCG_TARGET_REG_BITS == 32)) 93 DEF(setcond2_i32, 1, 4, 1, IMPL(TCG_TARGET_REG_BITS == 32)) 94 95 DEF(ext8s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8s_i32)) 96 DEF(ext16s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16s_i32)) 97 DEF(ext8u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8u_i32)) 98 DEF(ext16u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16u_i32)) 99 DEF(bswap16_i32, 1, 1, 1, IMPL(TCG_TARGET_HAS_bswap16_i32)) 100 DEF(bswap32_i32, 1, 1, 1, IMPL(TCG_TARGET_HAS_bswap32_i32)) 101 DEF(not_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_not_i32)) 102 DEF(neg_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_neg_i32)) 103 DEF(andc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_andc_i32)) 104 DEF(orc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_orc_i32)) 105 DEF(eqv_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_eqv_i32)) 106 DEF(nand_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nand_i32)) 107 DEF(nor_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nor_i32)) 108 DEF(clz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_clz_i32)) 109 DEF(ctz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_ctz_i32)) 110 DEF(ctpop_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ctpop_i32)) 111 112 DEF(mov_i64, 1, 1, 0, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT) 113 DEF(setcond_i64, 1, 2, 1, IMPL64) 114 DEF(movcond_i64, 1, 4, 1, IMPL64 | IMPL(TCG_TARGET_HAS_movcond_i64)) 115 /* load/store */ 116 DEF(ld8u_i64, 1, 1, 1, IMPL64) 117 DEF(ld8s_i64, 1, 1, 1, IMPL64) 118 DEF(ld16u_i64, 1, 1, 1, IMPL64) 119 DEF(ld16s_i64, 1, 1, 1, IMPL64) 120 DEF(ld32u_i64, 1, 1, 1, IMPL64) 121 DEF(ld32s_i64, 1, 1, 1, IMPL64) 122 DEF(ld_i64, 1, 1, 1, IMPL64) 123 DEF(st8_i64, 0, 2, 1, IMPL64) 124 DEF(st16_i64, 0, 2, 1, IMPL64) 125 DEF(st32_i64, 0, 2, 1, IMPL64) 126 DEF(st_i64, 0, 2, 1, IMPL64) 127 /* arith */ 128 DEF(add_i64, 1, 2, 0, IMPL64) 129 DEF(sub_i64, 1, 2, 0, IMPL64) 130 DEF(mul_i64, 1, 2, 0, IMPL64) 131 DEF(div_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64)) 132 DEF(divu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64)) 133 DEF(rem_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64)) 134 DEF(remu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64)) 135 DEF(div2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64)) 136 DEF(divu2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64)) 137 DEF(and_i64, 1, 2, 0, IMPL64) 138 DEF(or_i64, 1, 2, 0, IMPL64) 139 DEF(xor_i64, 1, 2, 0, IMPL64) 140 /* shifts/rotates */ 141 DEF(shl_i64, 1, 2, 0, IMPL64) 142 DEF(shr_i64, 1, 2, 0, IMPL64) 143 DEF(sar_i64, 1, 2, 0, IMPL64) 144 DEF(rotl_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64)) 145 DEF(rotr_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64)) 146 DEF(deposit_i64, 1, 2, 2, IMPL64 | IMPL(TCG_TARGET_HAS_deposit_i64)) 147 DEF(extract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_extract_i64)) 148 DEF(sextract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_sextract_i64)) 149 DEF(extract2_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_extract2_i64)) 150 151 /* size changing ops */ 152 DEF(ext_i32_i64, 1, 1, 0, IMPL64) 153 DEF(extu_i32_i64, 1, 1, 0, IMPL64) 154 DEF(extrl_i64_i32, 1, 1, 0, 155 IMPL(TCG_TARGET_HAS_extrl_i64_i32) 156 | (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0)) 157 DEF(extrh_i64_i32, 1, 1, 0, 158 IMPL(TCG_TARGET_HAS_extrh_i64_i32) 159 | (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0)) 160 161 DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | IMPL64) 162 DEF(ext8s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8s_i64)) 163 DEF(ext16s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16s_i64)) 164 DEF(ext32s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32s_i64)) 165 DEF(ext8u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8u_i64)) 166 DEF(ext16u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16u_i64)) 167 DEF(ext32u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32u_i64)) 168 DEF(bswap16_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap16_i64)) 169 DEF(bswap32_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap32_i64)) 170 DEF(bswap64_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap64_i64)) 171 DEF(not_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_not_i64)) 172 DEF(neg_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_neg_i64)) 173 DEF(andc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_andc_i64)) 174 DEF(orc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_orc_i64)) 175 DEF(eqv_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_eqv_i64)) 176 DEF(nand_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nand_i64)) 177 DEF(nor_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nor_i64)) 178 DEF(clz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_clz_i64)) 179 DEF(ctz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctz_i64)) 180 DEF(ctpop_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctpop_i64)) 181 182 DEF(add2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_add2_i64)) 183 DEF(sub2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_sub2_i64)) 184 DEF(mulu2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulu2_i64)) 185 DEF(muls2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muls2_i64)) 186 DEF(muluh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muluh_i64)) 187 DEF(mulsh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulsh_i64)) 188 189 #define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2) 190 191 /* There are tcg_ctx->insn_start_words here, not just one. */ 192 DEF(insn_start, 0, 0, DATA64_ARGS, TCG_OPF_NOT_PRESENT) 193 194 DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) 195 DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) 196 DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) 197 198 DEF(plugin_cb_start, 0, 0, 3, TCG_OPF_NOT_PRESENT) 199 DEF(plugin_cb_end, 0, 0, 0, TCG_OPF_NOT_PRESENT) 200 201 /* Replicate ld/st ops for 32 and 64-bit guest addresses. */ 202 DEF(qemu_ld_a32_i32, 1, 1, 1, 203 TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 204 DEF(qemu_st_a32_i32, 0, 1 + 1, 1, 205 TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 206 DEF(qemu_ld_a32_i64, DATA64_ARGS, 1, 1, 207 TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) 208 DEF(qemu_st_a32_i64, 0, DATA64_ARGS + 1, 1, 209 TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) 210 211 DEF(qemu_ld_a64_i32, 1, DATA64_ARGS, 1, 212 TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 213 DEF(qemu_st_a64_i32, 0, 1 + DATA64_ARGS, 1, 214 TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 215 DEF(qemu_ld_a64_i64, DATA64_ARGS, DATA64_ARGS, 1, 216 TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) 217 DEF(qemu_st_a64_i64, 0, DATA64_ARGS + DATA64_ARGS, 1, 218 TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) 219 220 /* Only used by i386 to cope with stupid register constraints. */ 221 DEF(qemu_st8_a32_i32, 0, 1 + 1, 1, 222 TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | 223 IMPL(TCG_TARGET_HAS_qemu_st8_i32)) 224 DEF(qemu_st8_a64_i32, 0, 1 + DATA64_ARGS, 1, 225 TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | 226 IMPL(TCG_TARGET_HAS_qemu_st8_i32)) 227 228 /* Only for 64-bit hosts at the moment. */ 229 DEF(qemu_ld_a32_i128, 2, 1, 1, 230 TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | 231 IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) 232 DEF(qemu_ld_a64_i128, 2, 1, 1, 233 TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | 234 IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) 235 DEF(qemu_st_a32_i128, 0, 3, 1, 236 TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | 237 IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) 238 DEF(qemu_st_a64_i128, 0, 3, 1, 239 TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | 240 IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) 241 242 /* Host vector support. */ 243 244 #define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec) 245 246 DEF(mov_vec, 1, 1, 0, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT) 247 248 DEF(dup_vec, 1, 1, 0, IMPLVEC) 249 DEF(dup2_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_REG_BITS == 32)) 250 251 DEF(ld_vec, 1, 1, 1, IMPLVEC) 252 DEF(st_vec, 0, 2, 1, IMPLVEC) 253 DEF(dupm_vec, 1, 1, 1, IMPLVEC) 254 255 DEF(add_vec, 1, 2, 0, IMPLVEC) 256 DEF(sub_vec, 1, 2, 0, IMPLVEC) 257 DEF(mul_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_mul_vec)) 258 DEF(neg_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_neg_vec)) 259 DEF(abs_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_abs_vec)) 260 DEF(ssadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec)) 261 DEF(usadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec)) 262 DEF(sssub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec)) 263 DEF(ussub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec)) 264 DEF(smin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec)) 265 DEF(umin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec)) 266 DEF(smax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec)) 267 DEF(umax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec)) 268 269 DEF(and_vec, 1, 2, 0, IMPLVEC) 270 DEF(or_vec, 1, 2, 0, IMPLVEC) 271 DEF(xor_vec, 1, 2, 0, IMPLVEC) 272 DEF(andc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_andc_vec)) 273 DEF(orc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_orc_vec)) 274 DEF(nand_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nand_vec)) 275 DEF(nor_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nor_vec)) 276 DEF(eqv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_eqv_vec)) 277 DEF(not_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_not_vec)) 278 279 DEF(shli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec)) 280 DEF(shri_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec)) 281 DEF(sari_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec)) 282 DEF(rotli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_roti_vec)) 283 284 DEF(shls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec)) 285 DEF(shrs_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec)) 286 DEF(sars_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec)) 287 DEF(rotls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rots_vec)) 288 289 DEF(shlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec)) 290 DEF(shrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec)) 291 DEF(sarv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec)) 292 DEF(rotlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rotv_vec)) 293 DEF(rotrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rotv_vec)) 294 295 DEF(cmp_vec, 1, 2, 1, IMPLVEC) 296 297 DEF(bitsel_vec, 1, 3, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_bitsel_vec)) 298 DEF(cmpsel_vec, 1, 4, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_cmpsel_vec)) 299 300 DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT) 301 302 #if TCG_TARGET_MAYBE_vec 303 #include "tcg-target.opc.h" 304 #endif 305 306 #ifdef TCG_TARGET_INTERPRETER 307 /* These opcodes are only for use between the tci generator and interpreter. */ 308 DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT) 309 DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT) 310 #endif 311 312 #undef DATA64_ARGS 313 #undef IMPL 314 #undef IMPL64 315 #undef IMPLVEC 316 #undef DEF 317