Lines Matching +full:ts +full:- +full:inv

28 #include "qemu/interval-tree.h"
29 #include "tcg/tcg-op-common.h"
30 #include "tcg-internal.h"
31 #include "tcg-has.h"
45 TCGTemp *ts; member
71 static inline TempOptInfo *ts_info(TCGTemp *ts) in ts_info() argument
73 return ts->state_ptr; in ts_info()
83 return ti->is_const; in ti_is_const()
88 return ti->val; in ti_const_val()
96 static inline bool ts_is_const(TCGTemp *ts) in ts_is_const() argument
98 return ti_is_const(ts_info(ts)); in ts_is_const()
101 static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val) in ts_is_const_val() argument
103 return ti_is_const_val(ts_info(ts), val); in ts_is_const_val()
116 static inline bool ts_is_copy(TCGTemp *ts) in ts_is_copy() argument
118 return ts_info(ts)->next_copy != ts; in ts_is_copy()
123 return a->kind < b->kind ? b : a; in cmp_better_copy()
127 static void init_ts_info(OptContext *ctx, TCGTemp *ts) in init_ts_info() argument
129 size_t idx = temp_idx(ts); in init_ts_info()
132 if (test_bit(idx, ctx->temps_used.l)) { in init_ts_info()
135 set_bit(idx, ctx->temps_used.l); in init_ts_info()
137 ti = ts->state_ptr; in init_ts_info()
140 ts->state_ptr = ti; in init_ts_info()
143 ti->next_copy = ts; in init_ts_info()
144 ti->prev_copy = ts; in init_ts_info()
145 QSIMPLEQ_INIT(&ti->mem_copy); in init_ts_info()
146 if (ts->kind == TEMP_CONST) { in init_ts_info()
147 ti->is_const = true; in init_ts_info()
148 ti->val = ts->val; in init_ts_info()
149 ti->z_mask = ts->val; in init_ts_info()
150 ti->s_mask = INT64_MIN >> clrsb64(ts->val); in init_ts_info()
152 ti->is_const = false; in init_ts_info()
153 ti->z_mask = -1; in init_ts_info()
154 ti->s_mask = 0; in init_ts_info()
160 IntervalTreeNode *r = interval_tree_iter_first(&ctx->mem_copy, s, l); in mem_copy_first()
166 IntervalTreeNode *r = interval_tree_iter_next(&mem->itree, s, l); in mem_copy_next()
172 TCGTemp *ts = mc->ts; in remove_mem_copy() local
173 TempOptInfo *ti = ts_info(ts); in remove_mem_copy()
175 interval_tree_remove(&mc->itree, &ctx->mem_copy); in remove_mem_copy()
176 QSIMPLEQ_REMOVE(&ti->mem_copy, mc, MemCopyInfo, next); in remove_mem_copy()
177 QSIMPLEQ_INSERT_TAIL(&ctx->mem_free, mc, next); in remove_mem_copy()
193 remove_mem_copy_in(ctx, 0, -1); in remove_mem_copy_all()
194 tcg_debug_assert(interval_tree_is_empty(&ctx->mem_copy)); in remove_mem_copy_all()
197 static TCGTemp *find_better_copy(TCGTemp *ts) in find_better_copy() argument
202 if (temp_readonly(ts)) { in find_better_copy()
203 return ts; in find_better_copy()
206 ret = ts; in find_better_copy()
207 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) { in find_better_copy()
219 QSIMPLEQ_FOREACH(mc, &si->mem_copy, next) { in move_mem_copies()
220 tcg_debug_assert(mc->ts == src_ts); in move_mem_copies()
221 mc->ts = dst_ts; in move_mem_copies()
223 QSIMPLEQ_CONCAT(&di->mem_copy, &si->mem_copy); in move_mem_copies()
227 static void reset_ts(OptContext *ctx, TCGTemp *ts) in reset_ts() argument
229 TempOptInfo *ti = ts_info(ts); in reset_ts()
230 TCGTemp *pts = ti->prev_copy; in reset_ts()
231 TCGTemp *nts = ti->next_copy; in reset_ts()
235 ni->prev_copy = ti->prev_copy; in reset_ts()
236 pi->next_copy = ti->next_copy; in reset_ts()
237 ti->next_copy = ts; in reset_ts()
238 ti->prev_copy = ts; in reset_ts()
239 ti->is_const = false; in reset_ts()
240 ti->z_mask = -1; in reset_ts()
241 ti->s_mask = 0; in reset_ts()
243 if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) { in reset_ts()
244 if (ts == nts) { in reset_ts()
247 QSIMPLEQ_FOREACH(mc, &ti->mem_copy, next) { in reset_ts()
248 interval_tree_remove(&mc->itree, &ctx->mem_copy); in reset_ts()
250 QSIMPLEQ_CONCAT(&ctx->mem_free, &ti->mem_copy); in reset_ts()
252 move_mem_copies(find_better_copy(nts), ts); in reset_ts()
263 TCGTemp *ts, intptr_t start, intptr_t last) in record_mem_copy() argument
268 mc = QSIMPLEQ_FIRST(&ctx->mem_free); in record_mem_copy()
270 QSIMPLEQ_REMOVE_HEAD(&ctx->mem_free, next); in record_mem_copy()
276 mc->itree.start = start; in record_mem_copy()
277 mc->itree.last = last; in record_mem_copy()
278 mc->type = type; in record_mem_copy()
279 interval_tree_insert(&mc->itree, &ctx->mem_copy); in record_mem_copy()
281 ts = find_better_copy(ts); in record_mem_copy()
282 ti = ts_info(ts); in record_mem_copy()
283 mc->ts = ts; in record_mem_copy()
284 QSIMPLEQ_INSERT_TAIL(&ti->mem_copy, mc, next); in record_mem_copy()
299 for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) { in ts_are_copies()
318 if (mc->itree.start == s && mc->type == type) { in find_mem_copy_for()
319 return find_better_copy(mc->ts); in find_mem_copy_for()
327 TCGType type = ctx->type; in arg_new_constant()
328 TCGTemp *ts; in arg_new_constant() local
334 ts = tcg_constant_internal(type, val); in arg_new_constant()
335 init_ts_info(ctx, ts); in arg_new_constant()
337 return temp_arg(ts); in arg_new_constant()
342 TCGTemp *ts = tcg_temp_new_internal(ctx->type, TEMP_EBB); in arg_new_temp() local
343 init_ts_info(ctx, ts); in arg_new_temp()
344 return temp_arg(ts); in arg_new_temp()
356 tcg_op_remove(ctx->tcg, op); in tcg_opt_gen_mov()
364 switch (ctx->type) { in tcg_opt_gen_mov()
380 op->opc = new_op; in tcg_opt_gen_mov()
381 op->args[0] = dst; in tcg_opt_gen_mov()
382 op->args[1] = src; in tcg_opt_gen_mov()
384 di->z_mask = si->z_mask; in tcg_opt_gen_mov()
385 di->s_mask = si->s_mask; in tcg_opt_gen_mov()
387 if (src_ts->type == dst_ts->type) { in tcg_opt_gen_mov()
388 TempOptInfo *ni = ts_info(si->next_copy); in tcg_opt_gen_mov()
390 di->next_copy = si->next_copy; in tcg_opt_gen_mov()
391 di->prev_copy = src_ts; in tcg_opt_gen_mov()
392 ni->prev_copy = dst_ts; in tcg_opt_gen_mov()
393 si->next_copy = dst_ts; in tcg_opt_gen_mov()
394 di->is_const = si->is_const; in tcg_opt_gen_mov()
395 di->val = si->val; in tcg_opt_gen_mov()
397 if (!QSIMPLEQ_EMPTY(&si->mem_copy) in tcg_opt_gen_mov()
421 return x - y; in do_constant_folding_2()
469 return -x; in do_constant_folding_2()
670 return -1; in do_constant_folding_cond_eq()
679 * Return -1 if the condition can't be simplified,
686 uint64_t xv = arg_info(x)->val; in do_constant_folding_cond()
687 uint64_t yv = arg_info(y)->val; in do_constant_folding_cond()
696 return -1; in do_constant_folding_cond()
709 return -1; in do_constant_folding_cond()
712 return -1; in do_constant_folding_cond()
733 sum -= arg_is_const(a2); in swap_commutative()
736 op a, a, b, which is better handled on non-RISC hosts. */ in swap_commutative()
750 sum -= arg_is_const(p2[0]); in swap_commutative2()
751 sum -= arg_is_const(p2[1]); in swap_commutative2()
762 * Return -1 if the condition can't be simplified,
779 r = do_constant_folding_cond(ctx->type, *p1, *p2, cond); in do_constant_folding_cond1()
784 return -1; in do_constant_folding_cond1()
790 * TSTNE x,x -> NE x,0 in do_constant_folding_cond1()
791 * TSTNE x,i -> NE x,0 if i includes all nonzero bits of x in do_constant_folding_cond1()
794 (arg_is_const(*p2) && (i1->z_mask & ~arg_info(*p2)->val) == 0)) { in do_constant_folding_cond1()
797 return -1; in do_constant_folding_cond1()
800 /* TSTNE x,i -> LT x,0 if i only includes sign bit copies */ in do_constant_folding_cond1()
801 if (arg_is_const(*p2) && (arg_info(*p2)->val & ~i1->s_mask) == 0) { in do_constant_folding_cond1()
804 return -1; in do_constant_folding_cond1()
809 TCGOpcode and_opc = (ctx->type == TCG_TYPE_I32 in do_constant_folding_cond1()
811 TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, and_opc, 3); in do_constant_folding_cond1()
814 op2->args[0] = tmp; in do_constant_folding_cond1()
815 op2->args[1] = *p1; in do_constant_folding_cond1()
816 op2->args[2] = *p2; in do_constant_folding_cond1()
822 return -1; in do_constant_folding_cond1()
844 tcg_target_ulong blv = arg_info(bl)->val; in do_constant_folding_cond2()
845 tcg_target_ulong bhv = arg_info(bh)->val; in do_constant_folding_cond2()
849 tcg_target_ulong alv = arg_info(al)->val; in do_constant_folding_cond2()
850 tcg_target_ulong ahv = arg_info(ah)->val; in do_constant_folding_cond2()
872 /* TSTNE x,-1 -> NE x,0 */ in do_constant_folding_cond2()
873 if (b == -1 && is_tst_cond(c)) { in do_constant_folding_cond2()
876 return -1; in do_constant_folding_cond2()
879 /* TSTNE x,sign -> LT x,0 */ in do_constant_folding_cond2()
884 return -1; in do_constant_folding_cond2()
894 /* TSTNE x,x -> NE x,0 */ in do_constant_folding_cond2()
898 return -1; in do_constant_folding_cond2()
904 TCGOp *op1 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3); in do_constant_folding_cond2()
905 TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3); in do_constant_folding_cond2()
909 op1->args[0] = t1; in do_constant_folding_cond2()
910 op1->args[1] = al; in do_constant_folding_cond2()
911 op1->args[2] = bl; in do_constant_folding_cond2()
912 op2->args[0] = t2; in do_constant_folding_cond2()
913 op2->args[1] = ah; in do_constant_folding_cond2()
914 op2->args[2] = bh; in do_constant_folding_cond2()
921 return -1; in do_constant_folding_cond2()
927 TCGTemp *ts = arg_temp(op->args[i]); in init_arguments() local
928 init_ts_info(ctx, ts); in init_arguments()
936 TCGTemp *ts = arg_temp(op->args[i]); in copy_propagate() local
937 if (ts_is_copy(ts)) { in copy_propagate()
938 op->args[i] = temp_arg(find_better_copy(ts)); in copy_propagate()
946 ctx->prev_mb = NULL; in finish_bb()
953 memset(&ctx->temps_used, 0, sizeof(ctx->temps_used)); in finish_ebb()
959 const TCGOpDef *def = &tcg_op_defs[op->opc]; in finish_folding()
962 nb_oargs = def->nb_oargs; in finish_folding()
964 TCGTemp *ts = arg_temp(op->args[i]); in finish_folding() local
965 reset_ts(ctx, ts); in finish_folding()
983 if (arg_is_const(op->args[1])) { in fold_const1()
986 t = arg_info(op->args[1])->val; in fold_const1()
987 t = do_constant_folding(op->opc, ctx->type, t, 0); in fold_const1()
988 return tcg_opt_gen_movi(ctx, op, op->args[0], t); in fold_const1()
995 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { in fold_const2()
996 uint64_t t1 = arg_info(op->args[1])->val; in fold_const2()
997 uint64_t t2 = arg_info(op->args[2])->val; in fold_const2()
999 t1 = do_constant_folding(op->opc, ctx->type, t1, t2); in fold_const2()
1000 return tcg_opt_gen_movi(ctx, op, op->args[0], t1); in fold_const2()
1007 swap_commutative(op->args[0], &op->args[1], &op->args[2]); in fold_commutative()
1013 swap_commutative(op->args[0], &op->args[1], &op->args[2]); in fold_const2_commutative()
1026 const TCGOpDef *def = &tcg_op_defs[op->opc]; in fold_masks_zs()
1027 TCGTemp *ts; in fold_masks_zs() local
1031 /* Only single-output opcodes are supported here. */ in fold_masks_zs()
1032 tcg_debug_assert(def->nb_oargs == 1); in fold_masks_zs()
1035 * 32-bit ops generate 32-bit results, which for the purpose of in fold_masks_zs()
1036 * simplifying tcg are sign-extended. Certainly that's how we in fold_masks_zs()
1038 * be reset properly for a 64-bit value when encountering the in fold_masks_zs()
1041 if (ctx->type == TCG_TYPE_I32) { in fold_masks_zs()
1047 return tcg_opt_gen_movi(ctx, op, op->args[0], 0); in fold_masks_zs()
1050 ts = arg_temp(op->args[0]); in fold_masks_zs()
1051 reset_ts(ctx, ts); in fold_masks_zs()
1053 ti = ts_info(ts); in fold_masks_zs()
1054 ti->z_mask = z_mask; in fold_masks_zs()
1059 rep = MAX(rep - 1, 0); in fold_masks_zs()
1060 ti->s_mask = INT64_MIN >> rep; in fold_masks_zs()
1072 return fold_masks_zs(ctx, op, -1, s_mask); in fold_masks_s()
1082 if (ctx->type == TCG_TYPE_I32) { in fold_affected_mask()
1086 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); in fold_affected_mask()
1102 switch (ctx->type) { in fold_to_not()
1121 op->opc = not_op; in fold_to_not()
1122 op->args[1] = op->args[idx]; in fold_to_not()
1131 if (arg_is_const_val(op->args[1], i)) { in fold_ix_to_i()
1132 return tcg_opt_gen_movi(ctx, op, op->args[0], i); in fold_ix_to_i()
1140 if (arg_is_const_val(op->args[1], i)) { in fold_ix_to_not()
1149 if (arg_is_const_val(op->args[2], i)) { in fold_xi_to_i()
1150 return tcg_opt_gen_movi(ctx, op, op->args[0], i); in fold_xi_to_i()
1158 if (arg_is_const_val(op->args[2], i)) { in fold_xi_to_x()
1159 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); in fold_xi_to_x()
1167 if (arg_is_const_val(op->args[2], i)) { in fold_xi_to_not()
1176 if (args_are_copies(op->args[1], op->args[2])) { in fold_xx_to_i()
1177 return tcg_opt_gen_movi(ctx, op, op->args[0], i); in fold_xx_to_i()
1185 if (args_are_copies(op->args[1], op->args[2])) { in fold_xx_to_x()
1186 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); in fold_xx_to_x()
1225 bool a_const = arg_is_const(op->args[2]) && arg_is_const(op->args[3]); in fold_addsub2()
1226 bool b_const = arg_is_const(op->args[4]) && arg_is_const(op->args[5]); in fold_addsub2()
1229 uint64_t al = arg_info(op->args[2])->val; in fold_addsub2()
1230 uint64_t ah = arg_info(op->args[3])->val; in fold_addsub2()
1231 uint64_t bl = arg_info(op->args[4])->val; in fold_addsub2()
1232 uint64_t bh = arg_info(op->args[5])->val; in fold_addsub2()
1236 if (ctx->type == TCG_TYPE_I32) { in fold_addsub2()
1243 a -= b; in fold_addsub2()
1262 rl = op->args[0]; in fold_addsub2()
1263 rh = op->args[1]; in fold_addsub2()
1266 op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2); in fold_addsub2()
1273 /* Fold sub2 r,x,i to add2 r,x,-i */ in fold_addsub2()
1275 uint64_t bl = arg_info(op->args[4])->val; in fold_addsub2()
1276 uint64_t bh = arg_info(op->args[5])->val; in fold_addsub2()
1279 bl = -bl; in fold_addsub2()
1282 op->opc = (ctx->type == TCG_TYPE_I32 in fold_addsub2()
1284 op->args[4] = arg_new_constant(ctx, bl); in fold_addsub2()
1285 op->args[5] = arg_new_constant(ctx, bh); in fold_addsub2()
1293 swap_commutative(op->args[0], &op->args[2], &op->args[4]); in fold_add2()
1294 swap_commutative(op->args[1], &op->args[3], &op->args[5]); in fold_add2()
1306 fold_xi_to_x(ctx, op, -1) || in fold_and()
1311 t1 = arg_info(op->args[1]); in fold_and()
1312 t2 = arg_info(op->args[2]); in fold_and()
1313 z1 = t1->z_mask; in fold_and()
1314 z2 = t2->z_mask; in fold_and()
1317 * Known-zeros does not imply known-ones. Therefore unless in fold_and()
1330 s_mask = t1->s_mask & t2->s_mask; in fold_and()
1343 fold_ix_to_not(ctx, op, -1)) { in fold_andc()
1347 t1 = arg_info(op->args[1]); in fold_andc()
1348 t2 = arg_info(op->args[2]); in fold_andc()
1349 z_mask = t1->z_mask; in fold_andc()
1352 * Known-zeros does not imply known-ones. Therefore unless in fold_andc()
1363 s_mask = t1->s_mask & t2->s_mask; in fold_andc()
1370 if (args_are_copies(op->args[2], op->args[3])) { in fold_bitsel_vec()
1371 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]); in fold_bitsel_vec()
1374 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { in fold_bitsel_vec()
1375 uint64_t tv = arg_info(op->args[2])->val; in fold_bitsel_vec()
1376 uint64_t fv = arg_info(op->args[3])->val; in fold_bitsel_vec()
1378 if (tv == -1 && fv == 0) { in fold_bitsel_vec()
1379 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); in fold_bitsel_vec()
1381 if (tv == 0 && fv == -1) { in fold_bitsel_vec()
1383 op->opc = INDEX_op_not_vec; in fold_bitsel_vec()
1386 op->opc = INDEX_op_xor_vec; in fold_bitsel_vec()
1387 op->args[2] = arg_new_constant(ctx, -1); in fold_bitsel_vec()
1392 if (arg_is_const(op->args[2])) { in fold_bitsel_vec()
1393 uint64_t tv = arg_info(op->args[2])->val; in fold_bitsel_vec()
1394 if (tv == -1) { in fold_bitsel_vec()
1395 op->opc = INDEX_op_or_vec; in fold_bitsel_vec()
1396 op->args[2] = op->args[3]; in fold_bitsel_vec()
1400 op->opc = INDEX_op_andc_vec; in fold_bitsel_vec()
1401 op->args[2] = op->args[1]; in fold_bitsel_vec()
1402 op->args[1] = op->args[3]; in fold_bitsel_vec()
1406 if (arg_is_const(op->args[3])) { in fold_bitsel_vec()
1407 uint64_t fv = arg_info(op->args[3])->val; in fold_bitsel_vec()
1409 op->opc = INDEX_op_and_vec; in fold_bitsel_vec()
1412 if (fv == -1 && TCG_TARGET_HAS_orc_vec) { in fold_bitsel_vec()
1413 op->opc = INDEX_op_orc_vec; in fold_bitsel_vec()
1414 op->args[2] = op->args[1]; in fold_bitsel_vec()
1415 op->args[1] = op->args[3]; in fold_bitsel_vec()
1424 int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0], in fold_brcond()
1425 &op->args[1], &op->args[2]); in fold_brcond()
1427 tcg_op_remove(ctx->tcg, op); in fold_brcond()
1431 op->opc = INDEX_op_br; in fold_brcond()
1432 op->args[0] = op->args[3]; in fold_brcond()
1444 int i, inv = 0; in fold_brcond2() local
1446 i = do_constant_folding_cond2(ctx, op, &op->args[0]); in fold_brcond2()
1447 cond = op->args[4]; in fold_brcond2()
1448 label = op->args[5]; in fold_brcond2()
1460 if (arg_is_const_val(op->args[2], 0) && in fold_brcond2()
1461 arg_is_const_val(op->args[3], 0)) { in fold_brcond2()
1467 inv = 1; in fold_brcond2()
1474 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0], in fold_brcond2()
1475 op->args[2], cond); in fold_brcond2()
1476 switch (i ^ inv) { in fold_brcond2()
1483 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1], in fold_brcond2()
1484 op->args[3], cond); in fold_brcond2()
1485 switch (i ^ inv) { in fold_brcond2()
1495 if (arg_is_const_val(op->args[2], 0)) { in fold_brcond2()
1498 if (arg_is_const_val(op->args[3], 0)) { in fold_brcond2()
1507 op->opc = INDEX_op_brcond_i32; in fold_brcond2()
1508 op->args[1] = op->args[2]; in fold_brcond2()
1509 op->args[2] = cond; in fold_brcond2()
1510 op->args[3] = label; in fold_brcond2()
1514 op->opc = INDEX_op_brcond_i32; in fold_brcond2()
1515 op->args[0] = op->args[1]; in fold_brcond2()
1516 op->args[1] = op->args[3]; in fold_brcond2()
1517 op->args[2] = cond; in fold_brcond2()
1518 op->args[3] = label; in fold_brcond2()
1523 tcg_op_remove(ctx->tcg, op); in fold_brcond2()
1526 op->opc = INDEX_op_br; in fold_brcond2()
1527 op->args[0] = label; in fold_brcond2()
1539 TempOptInfo *t1 = arg_info(op->args[1]); in fold_bswap()
1542 return tcg_opt_gen_movi(ctx, op, op->args[0], in fold_bswap()
1543 do_constant_folding(op->opc, ctx->type, in fold_bswap()
1545 op->args[2])); in fold_bswap()
1548 z_mask = t1->z_mask; in fold_bswap()
1549 switch (op->opc) { in fold_bswap()
1569 switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { in fold_bswap()
1577 /* The value and therefore s_mask is explicitly sign-extended. */ in fold_bswap()
1591 TCGContext *s = ctx->tcg; in fold_call()
1602 int nb_globals = s->nb_globals; in fold_call()
1605 if (test_bit(i, ctx->temps_used.l)) { in fold_call()
1606 reset_ts(ctx, &ctx->tcg->temps[i]); in fold_call()
1618 reset_temp(ctx, op->args[i]); in fold_call()
1622 ctx->prev_mb = NULL; in fold_call()
1629 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) { in fold_cmp_vec()
1630 op->args[3] = tcg_swap_cond(op->args[3]); in fold_cmp_vec()
1638 if (args_are_copies(op->args[3], op->args[4])) { in fold_cmpsel_vec()
1639 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]); in fold_cmpsel_vec()
1643 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) { in fold_cmpsel_vec()
1644 op->args[5] = tcg_swap_cond(op->args[5]); in fold_cmpsel_vec()
1650 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { in fold_cmpsel_vec()
1651 op->args[5] = tcg_invert_cond(op->args[5]); in fold_cmpsel_vec()
1659 TempOptInfo *t1 = arg_info(op->args[1]); in fold_count_zeros()
1660 TempOptInfo *t2 = arg_info(op->args[2]); in fold_count_zeros()
1666 t = do_constant_folding(op->opc, ctx->type, t, 0); in fold_count_zeros()
1667 return tcg_opt_gen_movi(ctx, op, op->args[0], t); in fold_count_zeros()
1669 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]); in fold_count_zeros()
1672 switch (ctx->type) { in fold_count_zeros()
1683 z_mask |= t2->z_mask; in fold_count_zeros()
1684 s_mask &= t2->s_mask; in fold_count_zeros()
1697 switch (ctx->type) { in fold_ctpop()
1712 TempOptInfo *t1 = arg_info(op->args[1]); in fold_deposit()
1713 TempOptInfo *t2 = arg_info(op->args[2]); in fold_deposit()
1714 int ofs = op->args[3]; in fold_deposit()
1715 int len = op->args[4]; in fold_deposit()
1721 return tcg_opt_gen_movi(ctx, op, op->args[0], in fold_deposit()
1726 switch (ctx->type) { in fold_deposit()
1743 op->opc = and_opc; in fold_deposit()
1744 op->args[1] = op->args[2]; in fold_deposit()
1745 op->args[2] = arg_new_constant(ctx, mask); in fold_deposit()
1751 uint64_t mask = deposit64(-1, ofs, len, 0); in fold_deposit()
1753 op->opc = and_opc; in fold_deposit()
1754 op->args[2] = arg_new_constant(ctx, mask); in fold_deposit()
1760 s_mask = t2->s_mask << ofs; in fold_deposit()
1762 s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len); in fold_deposit()
1765 z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask); in fold_deposit()
1780 if (arg_is_const(op->args[1])) { in fold_dup()
1781 uint64_t t = arg_info(op->args[1])->val; in fold_dup()
1783 return tcg_opt_gen_movi(ctx, op, op->args[0], t); in fold_dup()
1790 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { in fold_dup2()
1791 uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32, in fold_dup2()
1792 arg_info(op->args[2])->val); in fold_dup2()
1793 return tcg_opt_gen_movi(ctx, op, op->args[0], t); in fold_dup2()
1796 if (args_are_copies(op->args[1], op->args[2])) { in fold_dup2()
1797 op->opc = INDEX_op_dup_vec; in fold_dup2()
1808 fold_xi_to_x(ctx, op, -1) || in fold_eqv()
1813 s_mask = arg_info(op->args[1])->s_mask in fold_eqv()
1814 & arg_info(op->args[2])->s_mask; in fold_eqv()
1821 TempOptInfo *t1 = arg_info(op->args[1]); in fold_extract()
1822 int pos = op->args[2]; in fold_extract()
1823 int len = op->args[3]; in fold_extract()
1826 return tcg_opt_gen_movi(ctx, op, op->args[0], in fold_extract()
1830 z_mask_old = t1->z_mask; in fold_extract()
1841 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { in fold_extract2()
1842 uint64_t v1 = arg_info(op->args[1])->val; in fold_extract2()
1843 uint64_t v2 = arg_info(op->args[2])->val; in fold_extract2()
1844 int shr = op->args[3]; in fold_extract2()
1846 if (op->opc == INDEX_op_extract2_i64) { in fold_extract2()
1848 v2 <<= 64 - shr; in fold_extract2()
1851 v2 = (uint64_t)((int32_t)v2 << (32 - shr)); in fold_extract2()
1853 return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2); in fold_extract2()
1868 t1 = arg_info(op->args[1]); in fold_exts()
1869 z_mask = t1->z_mask; in fold_exts()
1870 s_mask = t1->s_mask; in fold_exts()
1873 switch (op->opc) { in fold_exts()
1909 z_mask_old = z_mask = arg_info(op->args[1])->z_mask; in fold_extu()
1911 switch (op->opc) { in fold_extu()
1943 if (ctx->prev_mb) { in fold_mb()
1956 ctx->prev_mb->args[0] |= op->args[0]; in fold_mb()
1957 tcg_op_remove(ctx->tcg, op); in fold_mb()
1959 ctx->prev_mb = op; in fold_mb()
1966 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); in fold_mov()
1976 if (args_are_copies(op->args[3], op->args[4])) { in fold_movcond()
1977 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]); in fold_movcond()
1984 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { in fold_movcond()
1985 op->args[5] = tcg_invert_cond(op->args[5]); in fold_movcond()
1988 i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[1], in fold_movcond()
1989 &op->args[2], &op->args[5]); in fold_movcond()
1991 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]); in fold_movcond()
1994 tt = arg_info(op->args[3]); in fold_movcond()
1995 ft = arg_info(op->args[4]); in fold_movcond()
1996 z_mask = tt->z_mask | ft->z_mask; in fold_movcond()
1997 s_mask = tt->s_mask & ft->s_mask; in fold_movcond()
2003 TCGCond cond = op->args[5]; in fold_movcond()
2005 switch (ctx->type) { in fold_movcond()
2025 op->opc = opc; in fold_movcond()
2026 op->args[3] = cond; in fold_movcond()
2028 op->opc = opc; in fold_movcond()
2029 op->args[3] = tcg_invert_cond(cond); in fold_movcond()
2031 if (tv == -1 && fv == 0) { in fold_movcond()
2032 op->opc = negopc; in fold_movcond()
2033 op->args[3] = cond; in fold_movcond()
2034 } else if (fv == -1 && tv == 0) { in fold_movcond()
2035 op->opc = negopc; in fold_movcond()
2036 op->args[3] = tcg_invert_cond(cond); in fold_movcond()
2065 swap_commutative(op->args[0], &op->args[2], &op->args[3]); in fold_multiply2()
2067 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { in fold_multiply2()
2068 uint64_t a = arg_info(op->args[2])->val; in fold_multiply2()
2069 uint64_t b = arg_info(op->args[3])->val; in fold_multiply2()
2074 switch (op->opc) { in fold_multiply2()
2095 rl = op->args[0]; in fold_multiply2()
2096 rh = op->args[1]; in fold_multiply2()
2099 op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2); in fold_multiply2()
2113 fold_xi_to_not(ctx, op, -1)) { in fold_nand()
2117 s_mask = arg_info(op->args[1])->s_mask in fold_nand()
2118 & arg_info(op->args[2])->s_mask; in fold_nand()
2125 uint64_t z_mask = arg_info(op->args[1])->z_mask; in fold_neg_no_const()
2126 z_mask = -(z_mask & -z_mask); in fold_neg_no_const()
2145 s_mask = arg_info(op->args[1])->s_mask in fold_nor()
2146 & arg_info(op->args[2])->s_mask; in fold_nor()
2155 return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask); in fold_not()
2169 t1 = arg_info(op->args[1]); in fold_or()
2170 t2 = arg_info(op->args[2]); in fold_or()
2171 z_mask = t1->z_mask | t2->z_mask; in fold_or()
2172 s_mask = t1->s_mask & t2->s_mask; in fold_or()
2181 fold_xx_to_i(ctx, op, -1) || in fold_orc()
2182 fold_xi_to_x(ctx, op, -1) || in fold_orc()
2187 s_mask = arg_info(op->args[1])->s_mask in fold_orc()
2188 & arg_info(op->args[2])->s_mask; in fold_orc()
2194 const TCGOpDef *def = &tcg_op_defs[op->opc]; in fold_qemu_ld_1reg()
2195 MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs]; in fold_qemu_ld_1reg()
2198 uint64_t z_mask = -1, s_mask = 0; in fold_qemu_ld_1reg()
2202 s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1)); in fold_qemu_ld_1reg()
2209 ctx->prev_mb = NULL; in fold_qemu_ld_1reg()
2217 ctx->prev_mb = NULL; in fold_qemu_ld_2reg()
2224 ctx->prev_mb = NULL; in fold_qemu_st()
2237 /* Return 1 if finished, -1 if simplified, 0 if unchanged. */
2243 if (!arg_is_const(op->args[2])) { in fold_setcond_zmask()
2247 a_zmask = arg_info(op->args[1])->z_mask; in fold_setcond_zmask()
2248 b_val = arg_info(op->args[2])->val; in fold_setcond_zmask()
2249 cond = op->args[3]; in fold_setcond_zmask()
2251 if (ctx->type == TCG_TYPE_I32) { in fold_setcond_zmask()
2260 bool inv = false; in fold_setcond_zmask() local
2266 inv = true; in fold_setcond_zmask()
2271 return tcg_opt_gen_movi(ctx, op, op->args[0], neg ? -inv : inv); in fold_setcond_zmask()
2282 bool inv = false; in fold_setcond_zmask() local
2286 inv = true; in fold_setcond_zmask()
2293 inv = true; in fold_setcond_zmask()
2305 if (!inv && !neg) { in fold_setcond_zmask()
2306 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); in fold_setcond_zmask()
2309 switch (ctx->type) { in fold_setcond_zmask()
2324 if (!inv) { in fold_setcond_zmask()
2325 op->opc = neg_opc; in fold_setcond_zmask()
2327 op->opc = add_opc; in fold_setcond_zmask()
2328 op->args[2] = arg_new_constant(ctx, -1); in fold_setcond_zmask()
2330 op->opc = xor_opc; in fold_setcond_zmask()
2331 op->args[2] = arg_new_constant(ctx, 1); in fold_setcond_zmask()
2333 return -1; in fold_setcond_zmask()
2343 TCGCond cond = op->args[3]; in fold_setcond_tst_pow2()
2348 bool inv; in fold_setcond_tst_pow2() local
2350 if (!is_tst_cond(cond) || !arg_is_const(op->args[2])) { in fold_setcond_tst_pow2()
2354 src2 = op->args[2]; in fold_setcond_tst_pow2()
2355 val = arg_info(src2)->val; in fold_setcond_tst_pow2()
2361 switch (ctx->type) { in fold_setcond_tst_pow2()
2392 ret = op->args[0]; in fold_setcond_tst_pow2()
2393 src1 = op->args[1]; in fold_setcond_tst_pow2()
2394 inv = cond == TCG_COND_TSTEQ; in fold_setcond_tst_pow2()
2396 if (sh && sext_opc && neg && !inv) { in fold_setcond_tst_pow2()
2397 op->opc = sext_opc; in fold_setcond_tst_pow2()
2398 op->args[1] = src1; in fold_setcond_tst_pow2()
2399 op->args[2] = sh; in fold_setcond_tst_pow2()
2400 op->args[3] = 1; in fold_setcond_tst_pow2()
2403 op->opc = uext_opc; in fold_setcond_tst_pow2()
2404 op->args[1] = src1; in fold_setcond_tst_pow2()
2405 op->args[2] = sh; in fold_setcond_tst_pow2()
2406 op->args[3] = 1; in fold_setcond_tst_pow2()
2409 op2 = tcg_op_insert_before(ctx->tcg, op, shr_opc, 3); in fold_setcond_tst_pow2()
2410 op2->args[0] = ret; in fold_setcond_tst_pow2()
2411 op2->args[1] = src1; in fold_setcond_tst_pow2()
2412 op2->args[2] = arg_new_constant(ctx, sh); in fold_setcond_tst_pow2()
2415 op->opc = and_opc; in fold_setcond_tst_pow2()
2416 op->args[1] = src1; in fold_setcond_tst_pow2()
2417 op->args[2] = arg_new_constant(ctx, 1); in fold_setcond_tst_pow2()
2420 if (neg && inv) { in fold_setcond_tst_pow2()
2421 op2 = tcg_op_insert_after(ctx->tcg, op, sub_opc, 3); in fold_setcond_tst_pow2()
2422 op2->args[0] = ret; in fold_setcond_tst_pow2()
2423 op2->args[1] = ret; in fold_setcond_tst_pow2()
2424 op2->args[2] = arg_new_constant(ctx, 1); in fold_setcond_tst_pow2()
2425 } else if (inv) { in fold_setcond_tst_pow2()
2426 op2 = tcg_op_insert_after(ctx->tcg, op, xor_opc, 3); in fold_setcond_tst_pow2()
2427 op2->args[0] = ret; in fold_setcond_tst_pow2()
2428 op2->args[1] = ret; in fold_setcond_tst_pow2()
2429 op2->args[2] = arg_new_constant(ctx, 1); in fold_setcond_tst_pow2()
2431 op2 = tcg_op_insert_after(ctx->tcg, op, neg_opc, 2); in fold_setcond_tst_pow2()
2432 op2->args[0] = ret; in fold_setcond_tst_pow2()
2433 op2->args[1] = ret; in fold_setcond_tst_pow2()
2439 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1], in fold_setcond()
2440 &op->args[2], &op->args[3]); in fold_setcond()
2442 return tcg_opt_gen_movi(ctx, op, op->args[0], i); in fold_setcond()
2458 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1], in fold_negsetcond()
2459 &op->args[2], &op->args[3]); in fold_negsetcond()
2461 return tcg_opt_gen_movi(ctx, op, op->args[0], -i); in fold_negsetcond()
2472 /* Value is {0,-1} so all bits are repetitions of the sign. */ in fold_negsetcond()
2473 return fold_masks_s(ctx, op, -1); in fold_negsetcond()
2479 int i, inv = 0; in fold_setcond2() local
2481 i = do_constant_folding_cond2(ctx, op, &op->args[1]); in fold_setcond2()
2482 cond = op->args[5]; in fold_setcond2()
2494 if (arg_is_const_val(op->args[3], 0) && in fold_setcond2()
2495 arg_is_const_val(op->args[4], 0)) { in fold_setcond2()
2501 inv = 1; in fold_setcond2()
2508 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1], in fold_setcond2()
2509 op->args[3], cond); in fold_setcond2()
2510 switch (i ^ inv) { in fold_setcond2()
2517 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2], in fold_setcond2()
2518 op->args[4], cond); in fold_setcond2()
2519 switch (i ^ inv) { in fold_setcond2()
2529 if (arg_is_const_val(op->args[3], 0)) { in fold_setcond2()
2532 if (arg_is_const_val(op->args[4], 0)) { in fold_setcond2()
2541 op->args[2] = op->args[3]; in fold_setcond2()
2542 op->args[3] = cond; in fold_setcond2()
2543 op->opc = INDEX_op_setcond_i32; in fold_setcond2()
2547 op->args[1] = op->args[2]; in fold_setcond2()
2548 op->args[2] = op->args[4]; in fold_setcond2()
2549 op->args[3] = cond; in fold_setcond2()
2550 op->opc = INDEX_op_setcond_i32; in fold_setcond2()
2557 return tcg_opt_gen_movi(ctx, op, op->args[0], i); in fold_setcond2()
2563 TempOptInfo *t1 = arg_info(op->args[1]); in fold_sextract()
2564 int pos = op->args[2]; in fold_sextract()
2565 int len = op->args[3]; in fold_sextract()
2568 return tcg_opt_gen_movi(ctx, op, op->args[0], in fold_sextract()
2572 s_mask_old = t1->s_mask; in fold_sextract()
2574 s_mask |= -1ull << (len - 1); in fold_sextract()
2580 z_mask = sextract64(t1->z_mask, pos, len); in fold_sextract()
2595 t1 = arg_info(op->args[1]); in fold_shift()
2596 t2 = arg_info(op->args[2]); in fold_shift()
2597 s_mask = t1->s_mask; in fold_shift()
2598 z_mask = t1->z_mask; in fold_shift()
2603 z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh); in fold_shift()
2604 s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh); in fold_shift()
2609 switch (op->opc) { in fold_shift()
2621 if (~z_mask & -s_mask) { in fold_shift()
2637 if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) { in fold_sub_to_neg()
2641 switch (ctx->type) { in fold_sub_to_neg()
2655 tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0); in fold_sub_to_neg()
2661 op->opc = neg_op; in fold_sub_to_neg()
2662 op->args[1] = op->args[2]; in fold_sub_to_neg()
2688 /* Fold sub r,x,i to add r,x,-i */ in fold_sub()
2689 if (arg_is_const(op->args[2])) { in fold_sub()
2690 uint64_t val = arg_info(op->args[2])->val; in fold_sub()
2692 op->opc = (ctx->type == TCG_TYPE_I32 in fold_sub()
2694 op->args[2] = arg_new_constant(ctx, -val); in fold_sub()
2706 uint64_t z_mask = -1, s_mask = 0; in fold_tcg_ld()
2709 switch (op->opc) { in fold_tcg_ld()
2740 if (op->args[1] != tcgv_ptr_arg(tcg_env)) { in fold_tcg_ld_memcopy()
2744 type = ctx->type; in fold_tcg_ld_memcopy()
2745 ofs = op->args[2]; in fold_tcg_ld_memcopy()
2746 dst = arg_temp(op->args[0]); in fold_tcg_ld_memcopy()
2748 if (src && src->base_type == type) { in fold_tcg_ld_memcopy()
2753 record_mem_copy(ctx, type, dst, ofs, ofs + tcg_type_size(type) - 1); in fold_tcg_ld_memcopy()
2759 intptr_t ofs = op->args[2]; in fold_tcg_st()
2762 if (op->args[1] != tcgv_ptr_arg(tcg_env)) { in fold_tcg_st()
2767 switch (op->opc) { in fold_tcg_st()
2782 lm1 = tcg_type_size(ctx->type) - 1; in fold_tcg_st()
2797 if (op->args[1] != tcgv_ptr_arg(tcg_env)) { in fold_tcg_st_memcopy()
2801 src = arg_temp(op->args[0]); in fold_tcg_st_memcopy()
2802 ofs = op->args[2]; in fold_tcg_st_memcopy()
2803 type = ctx->type; in fold_tcg_st_memcopy()
2807 * This happens frequently when the target ISA zero-extends. in fold_tcg_st_memcopy()
2812 tcg_op_remove(ctx->tcg, op); in fold_tcg_st_memcopy()
2817 last = ofs + tcg_type_size(type) - 1; in fold_tcg_st_memcopy()
2831 fold_xi_to_not(ctx, op, -1)) { in fold_xor()
2835 t1 = arg_info(op->args[1]); in fold_xor()
2836 t2 = arg_info(op->args[2]); in fold_xor()
2837 z_mask = t1->z_mask | t2->z_mask; in fold_xor()
2838 s_mask = t1->s_mask & t2->s_mask; in fold_xor()
2856 nb_temps = s->nb_temps; in tcg_optimize()
2858 s->temps[i].state_ptr = NULL; in tcg_optimize()
2861 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { in tcg_optimize()
2862 TCGOpcode opc = op->opc; in tcg_optimize()
2873 init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs); in tcg_optimize()
2874 copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs); in tcg_optimize()
2876 /* Pre-compute the type of the operation. */ in tcg_optimize()