1 /*
2 * Optimizations for Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26 #include "qemu/osdep.h"
27 #include "qemu/int128.h"
28 #include "qemu/interval-tree.h"
29 #include "tcg/tcg-op-common.h"
30 #include "tcg-internal.h"
31
32 #define CASE_OP_32_64(x) \
33 glue(glue(case INDEX_op_, x), _i32): \
34 glue(glue(case INDEX_op_, x), _i64)
35
36 #define CASE_OP_32_64_VEC(x) \
37 glue(glue(case INDEX_op_, x), _i32): \
38 glue(glue(case INDEX_op_, x), _i64): \
39 glue(glue(case INDEX_op_, x), _vec)
40
41 typedef struct MemCopyInfo {
42 IntervalTreeNode itree;
43 QSIMPLEQ_ENTRY (MemCopyInfo) next;
44 TCGTemp *ts;
45 TCGType type;
46 } MemCopyInfo;
47
48 typedef struct TempOptInfo {
49 bool is_const;
50 TCGTemp *prev_copy;
51 TCGTemp *next_copy;
52 QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
53 uint64_t val;
54 uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
55 uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
56 } TempOptInfo;
57
58 typedef struct OptContext {
59 TCGContext *tcg;
60 TCGOp *prev_mb;
61 TCGTempSet temps_used;
62
63 IntervalTreeRoot mem_copy;
64 QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
65
66 /* In flight values from optimization. */
67 uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
68 uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
69 uint64_t s_mask; /* mask of clrsb(value) bits */
70 TCGType type;
71 } OptContext;
72
73 /* Calculate the smask for a specific value. */
smask_from_value(uint64_t value)74 static uint64_t smask_from_value(uint64_t value)
75 {
76 int rep = clrsb64(value);
77 return ~(~0ull >> rep);
78 }
79
80 /*
81 * Calculate the smask for a given set of known-zeros.
82 * If there are lots of zeros on the left, we can consider the remainder
83 * an unsigned field, and thus the corresponding signed field is one bit
84 * larger.
85 */
smask_from_zmask(uint64_t zmask)86 static uint64_t smask_from_zmask(uint64_t zmask)
87 {
88 /*
89 * Only the 0 bits are significant for zmask, thus the msb itself
90 * must be zero, else we have no sign information.
91 */
92 int rep = clz64(zmask);
93 if (rep == 0) {
94 return 0;
95 }
96 rep -= 1;
97 return ~(~0ull >> rep);
98 }
99
100 /*
101 * Recreate a properly left-aligned smask after manipulation.
102 * Some bit-shuffling, particularly shifts and rotates, may
103 * retain sign bits on the left, but may scatter disconnected
104 * sign bits on the right. Retain only what remains to the left.
105 */
smask_from_smask(int64_t smask)106 static uint64_t smask_from_smask(int64_t smask)
107 {
108 /* Only the 1 bits are significant for smask */
109 return smask_from_zmask(~smask);
110 }
111
ts_info(TCGTemp * ts)112 static inline TempOptInfo *ts_info(TCGTemp *ts)
113 {
114 return ts->state_ptr;
115 }
116
arg_info(TCGArg arg)117 static inline TempOptInfo *arg_info(TCGArg arg)
118 {
119 return ts_info(arg_temp(arg));
120 }
121
ts_is_const(TCGTemp * ts)122 static inline bool ts_is_const(TCGTemp *ts)
123 {
124 return ts_info(ts)->is_const;
125 }
126
ts_is_const_val(TCGTemp * ts,uint64_t val)127 static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
128 {
129 TempOptInfo *ti = ts_info(ts);
130 return ti->is_const && ti->val == val;
131 }
132
arg_is_const(TCGArg arg)133 static inline bool arg_is_const(TCGArg arg)
134 {
135 return ts_is_const(arg_temp(arg));
136 }
137
arg_is_const_val(TCGArg arg,uint64_t val)138 static inline bool arg_is_const_val(TCGArg arg, uint64_t val)
139 {
140 return ts_is_const_val(arg_temp(arg), val);
141 }
142
ts_is_copy(TCGTemp * ts)143 static inline bool ts_is_copy(TCGTemp *ts)
144 {
145 return ts_info(ts)->next_copy != ts;
146 }
147
cmp_better_copy(TCGTemp * a,TCGTemp * b)148 static TCGTemp *cmp_better_copy(TCGTemp *a, TCGTemp *b)
149 {
150 return a->kind < b->kind ? b : a;
151 }
152
153 /* Initialize and activate a temporary. */
init_ts_info(OptContext * ctx,TCGTemp * ts)154 static void init_ts_info(OptContext *ctx, TCGTemp *ts)
155 {
156 size_t idx = temp_idx(ts);
157 TempOptInfo *ti;
158
159 if (test_bit(idx, ctx->temps_used.l)) {
160 return;
161 }
162 set_bit(idx, ctx->temps_used.l);
163
164 ti = ts->state_ptr;
165 if (ti == NULL) {
166 ti = tcg_malloc(sizeof(TempOptInfo));
167 ts->state_ptr = ti;
168 }
169
170 ti->next_copy = ts;
171 ti->prev_copy = ts;
172 QSIMPLEQ_INIT(&ti->mem_copy);
173 if (ts->kind == TEMP_CONST) {
174 ti->is_const = true;
175 ti->val = ts->val;
176 ti->z_mask = ts->val;
177 ti->s_mask = smask_from_value(ts->val);
178 } else {
179 ti->is_const = false;
180 ti->z_mask = -1;
181 ti->s_mask = 0;
182 }
183 }
184
mem_copy_first(OptContext * ctx,intptr_t s,intptr_t l)185 static MemCopyInfo *mem_copy_first(OptContext *ctx, intptr_t s, intptr_t l)
186 {
187 IntervalTreeNode *r = interval_tree_iter_first(&ctx->mem_copy, s, l);
188 return r ? container_of(r, MemCopyInfo, itree) : NULL;
189 }
190
mem_copy_next(MemCopyInfo * mem,intptr_t s,intptr_t l)191 static MemCopyInfo *mem_copy_next(MemCopyInfo *mem, intptr_t s, intptr_t l)
192 {
193 IntervalTreeNode *r = interval_tree_iter_next(&mem->itree, s, l);
194 return r ? container_of(r, MemCopyInfo, itree) : NULL;
195 }
196
remove_mem_copy(OptContext * ctx,MemCopyInfo * mc)197 static void remove_mem_copy(OptContext *ctx, MemCopyInfo *mc)
198 {
199 TCGTemp *ts = mc->ts;
200 TempOptInfo *ti = ts_info(ts);
201
202 interval_tree_remove(&mc->itree, &ctx->mem_copy);
203 QSIMPLEQ_REMOVE(&ti->mem_copy, mc, MemCopyInfo, next);
204 QSIMPLEQ_INSERT_TAIL(&ctx->mem_free, mc, next);
205 }
206
remove_mem_copy_in(OptContext * ctx,intptr_t s,intptr_t l)207 static void remove_mem_copy_in(OptContext *ctx, intptr_t s, intptr_t l)
208 {
209 while (true) {
210 MemCopyInfo *mc = mem_copy_first(ctx, s, l);
211 if (!mc) {
212 break;
213 }
214 remove_mem_copy(ctx, mc);
215 }
216 }
217
remove_mem_copy_all(OptContext * ctx)218 static void remove_mem_copy_all(OptContext *ctx)
219 {
220 remove_mem_copy_in(ctx, 0, -1);
221 tcg_debug_assert(interval_tree_is_empty(&ctx->mem_copy));
222 }
223
find_better_copy(TCGTemp * ts)224 static TCGTemp *find_better_copy(TCGTemp *ts)
225 {
226 TCGTemp *i, *ret;
227
228 /* If this is already readonly, we can't do better. */
229 if (temp_readonly(ts)) {
230 return ts;
231 }
232
233 ret = ts;
234 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
235 ret = cmp_better_copy(ret, i);
236 }
237 return ret;
238 }
239
move_mem_copies(TCGTemp * dst_ts,TCGTemp * src_ts)240 static void move_mem_copies(TCGTemp *dst_ts, TCGTemp *src_ts)
241 {
242 TempOptInfo *si = ts_info(src_ts);
243 TempOptInfo *di = ts_info(dst_ts);
244 MemCopyInfo *mc;
245
246 QSIMPLEQ_FOREACH(mc, &si->mem_copy, next) {
247 tcg_debug_assert(mc->ts == src_ts);
248 mc->ts = dst_ts;
249 }
250 QSIMPLEQ_CONCAT(&di->mem_copy, &si->mem_copy);
251 }
252
253 /* Reset TEMP's state, possibly removing the temp for the list of copies. */
reset_ts(OptContext * ctx,TCGTemp * ts)254 static void reset_ts(OptContext *ctx, TCGTemp *ts)
255 {
256 TempOptInfo *ti = ts_info(ts);
257 TCGTemp *pts = ti->prev_copy;
258 TCGTemp *nts = ti->next_copy;
259 TempOptInfo *pi = ts_info(pts);
260 TempOptInfo *ni = ts_info(nts);
261
262 ni->prev_copy = ti->prev_copy;
263 pi->next_copy = ti->next_copy;
264 ti->next_copy = ts;
265 ti->prev_copy = ts;
266 ti->is_const = false;
267 ti->z_mask = -1;
268 ti->s_mask = 0;
269
270 if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) {
271 if (ts == nts) {
272 /* Last temp copy being removed, the mem copies die. */
273 MemCopyInfo *mc;
274 QSIMPLEQ_FOREACH(mc, &ti->mem_copy, next) {
275 interval_tree_remove(&mc->itree, &ctx->mem_copy);
276 }
277 QSIMPLEQ_CONCAT(&ctx->mem_free, &ti->mem_copy);
278 } else {
279 move_mem_copies(find_better_copy(nts), ts);
280 }
281 }
282 }
283
reset_temp(OptContext * ctx,TCGArg arg)284 static void reset_temp(OptContext *ctx, TCGArg arg)
285 {
286 reset_ts(ctx, arg_temp(arg));
287 }
288
record_mem_copy(OptContext * ctx,TCGType type,TCGTemp * ts,intptr_t start,intptr_t last)289 static void record_mem_copy(OptContext *ctx, TCGType type,
290 TCGTemp *ts, intptr_t start, intptr_t last)
291 {
292 MemCopyInfo *mc;
293 TempOptInfo *ti;
294
295 mc = QSIMPLEQ_FIRST(&ctx->mem_free);
296 if (mc) {
297 QSIMPLEQ_REMOVE_HEAD(&ctx->mem_free, next);
298 } else {
299 mc = tcg_malloc(sizeof(*mc));
300 }
301
302 memset(mc, 0, sizeof(*mc));
303 mc->itree.start = start;
304 mc->itree.last = last;
305 mc->type = type;
306 interval_tree_insert(&mc->itree, &ctx->mem_copy);
307
308 ts = find_better_copy(ts);
309 ti = ts_info(ts);
310 mc->ts = ts;
311 QSIMPLEQ_INSERT_TAIL(&ti->mem_copy, mc, next);
312 }
313
ts_are_copies(TCGTemp * ts1,TCGTemp * ts2)314 static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
315 {
316 TCGTemp *i;
317
318 if (ts1 == ts2) {
319 return true;
320 }
321
322 if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) {
323 return false;
324 }
325
326 for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) {
327 if (i == ts2) {
328 return true;
329 }
330 }
331
332 return false;
333 }
334
args_are_copies(TCGArg arg1,TCGArg arg2)335 static bool args_are_copies(TCGArg arg1, TCGArg arg2)
336 {
337 return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
338 }
339
find_mem_copy_for(OptContext * ctx,TCGType type,intptr_t s)340 static TCGTemp *find_mem_copy_for(OptContext *ctx, TCGType type, intptr_t s)
341 {
342 MemCopyInfo *mc;
343
344 for (mc = mem_copy_first(ctx, s, s); mc; mc = mem_copy_next(mc, s, s)) {
345 if (mc->itree.start == s && mc->type == type) {
346 return find_better_copy(mc->ts);
347 }
348 }
349 return NULL;
350 }
351
arg_new_constant(OptContext * ctx,uint64_t val)352 static TCGArg arg_new_constant(OptContext *ctx, uint64_t val)
353 {
354 TCGType type = ctx->type;
355 TCGTemp *ts;
356
357 if (type == TCG_TYPE_I32) {
358 val = (int32_t)val;
359 }
360
361 ts = tcg_constant_internal(type, val);
362 init_ts_info(ctx, ts);
363
364 return temp_arg(ts);
365 }
366
arg_new_temp(OptContext * ctx)367 static TCGArg arg_new_temp(OptContext *ctx)
368 {
369 TCGTemp *ts = tcg_temp_new_internal(ctx->type, TEMP_EBB);
370 init_ts_info(ctx, ts);
371 return temp_arg(ts);
372 }
373
tcg_opt_gen_mov(OptContext * ctx,TCGOp * op,TCGArg dst,TCGArg src)374 static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
375 {
376 TCGTemp *dst_ts = arg_temp(dst);
377 TCGTemp *src_ts = arg_temp(src);
378 TempOptInfo *di;
379 TempOptInfo *si;
380 TCGOpcode new_op;
381
382 if (ts_are_copies(dst_ts, src_ts)) {
383 tcg_op_remove(ctx->tcg, op);
384 return true;
385 }
386
387 reset_ts(ctx, dst_ts);
388 di = ts_info(dst_ts);
389 si = ts_info(src_ts);
390
391 switch (ctx->type) {
392 case TCG_TYPE_I32:
393 new_op = INDEX_op_mov_i32;
394 break;
395 case TCG_TYPE_I64:
396 new_op = INDEX_op_mov_i64;
397 break;
398 case TCG_TYPE_V64:
399 case TCG_TYPE_V128:
400 case TCG_TYPE_V256:
401 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
402 new_op = INDEX_op_mov_vec;
403 break;
404 default:
405 g_assert_not_reached();
406 }
407 op->opc = new_op;
408 op->args[0] = dst;
409 op->args[1] = src;
410
411 di->z_mask = si->z_mask;
412 di->s_mask = si->s_mask;
413
414 if (src_ts->type == dst_ts->type) {
415 TempOptInfo *ni = ts_info(si->next_copy);
416
417 di->next_copy = si->next_copy;
418 di->prev_copy = src_ts;
419 ni->prev_copy = dst_ts;
420 si->next_copy = dst_ts;
421 di->is_const = si->is_const;
422 di->val = si->val;
423
424 if (!QSIMPLEQ_EMPTY(&si->mem_copy)
425 && cmp_better_copy(src_ts, dst_ts) == dst_ts) {
426 move_mem_copies(dst_ts, src_ts);
427 }
428 }
429 return true;
430 }
431
tcg_opt_gen_movi(OptContext * ctx,TCGOp * op,TCGArg dst,uint64_t val)432 static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
433 TCGArg dst, uint64_t val)
434 {
435 /* Convert movi to mov with constant temp. */
436 return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val));
437 }
438
do_constant_folding_2(TCGOpcode op,uint64_t x,uint64_t y)439 static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
440 {
441 uint64_t l64, h64;
442
443 switch (op) {
444 CASE_OP_32_64(add):
445 return x + y;
446
447 CASE_OP_32_64(sub):
448 return x - y;
449
450 CASE_OP_32_64(mul):
451 return x * y;
452
453 CASE_OP_32_64_VEC(and):
454 return x & y;
455
456 CASE_OP_32_64_VEC(or):
457 return x | y;
458
459 CASE_OP_32_64_VEC(xor):
460 return x ^ y;
461
462 case INDEX_op_shl_i32:
463 return (uint32_t)x << (y & 31);
464
465 case INDEX_op_shl_i64:
466 return (uint64_t)x << (y & 63);
467
468 case INDEX_op_shr_i32:
469 return (uint32_t)x >> (y & 31);
470
471 case INDEX_op_shr_i64:
472 return (uint64_t)x >> (y & 63);
473
474 case INDEX_op_sar_i32:
475 return (int32_t)x >> (y & 31);
476
477 case INDEX_op_sar_i64:
478 return (int64_t)x >> (y & 63);
479
480 case INDEX_op_rotr_i32:
481 return ror32(x, y & 31);
482
483 case INDEX_op_rotr_i64:
484 return ror64(x, y & 63);
485
486 case INDEX_op_rotl_i32:
487 return rol32(x, y & 31);
488
489 case INDEX_op_rotl_i64:
490 return rol64(x, y & 63);
491
492 CASE_OP_32_64_VEC(not):
493 return ~x;
494
495 CASE_OP_32_64(neg):
496 return -x;
497
498 CASE_OP_32_64_VEC(andc):
499 return x & ~y;
500
501 CASE_OP_32_64_VEC(orc):
502 return x | ~y;
503
504 CASE_OP_32_64_VEC(eqv):
505 return ~(x ^ y);
506
507 CASE_OP_32_64_VEC(nand):
508 return ~(x & y);
509
510 CASE_OP_32_64_VEC(nor):
511 return ~(x | y);
512
513 case INDEX_op_clz_i32:
514 return (uint32_t)x ? clz32(x) : y;
515
516 case INDEX_op_clz_i64:
517 return x ? clz64(x) : y;
518
519 case INDEX_op_ctz_i32:
520 return (uint32_t)x ? ctz32(x) : y;
521
522 case INDEX_op_ctz_i64:
523 return x ? ctz64(x) : y;
524
525 case INDEX_op_ctpop_i32:
526 return ctpop32(x);
527
528 case INDEX_op_ctpop_i64:
529 return ctpop64(x);
530
531 CASE_OP_32_64(ext8s):
532 return (int8_t)x;
533
534 CASE_OP_32_64(ext16s):
535 return (int16_t)x;
536
537 CASE_OP_32_64(ext8u):
538 return (uint8_t)x;
539
540 CASE_OP_32_64(ext16u):
541 return (uint16_t)x;
542
543 CASE_OP_32_64(bswap16):
544 x = bswap16(x);
545 return y & TCG_BSWAP_OS ? (int16_t)x : x;
546
547 CASE_OP_32_64(bswap32):
548 x = bswap32(x);
549 return y & TCG_BSWAP_OS ? (int32_t)x : x;
550
551 case INDEX_op_bswap64_i64:
552 return bswap64(x);
553
554 case INDEX_op_ext_i32_i64:
555 case INDEX_op_ext32s_i64:
556 return (int32_t)x;
557
558 case INDEX_op_extu_i32_i64:
559 case INDEX_op_extrl_i64_i32:
560 case INDEX_op_ext32u_i64:
561 return (uint32_t)x;
562
563 case INDEX_op_extrh_i64_i32:
564 return (uint64_t)x >> 32;
565
566 case INDEX_op_muluh_i32:
567 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
568 case INDEX_op_mulsh_i32:
569 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
570
571 case INDEX_op_muluh_i64:
572 mulu64(&l64, &h64, x, y);
573 return h64;
574 case INDEX_op_mulsh_i64:
575 muls64(&l64, &h64, x, y);
576 return h64;
577
578 case INDEX_op_div_i32:
579 /* Avoid crashing on divide by zero, otherwise undefined. */
580 return (int32_t)x / ((int32_t)y ? : 1);
581 case INDEX_op_divu_i32:
582 return (uint32_t)x / ((uint32_t)y ? : 1);
583 case INDEX_op_div_i64:
584 return (int64_t)x / ((int64_t)y ? : 1);
585 case INDEX_op_divu_i64:
586 return (uint64_t)x / ((uint64_t)y ? : 1);
587
588 case INDEX_op_rem_i32:
589 return (int32_t)x % ((int32_t)y ? : 1);
590 case INDEX_op_remu_i32:
591 return (uint32_t)x % ((uint32_t)y ? : 1);
592 case INDEX_op_rem_i64:
593 return (int64_t)x % ((int64_t)y ? : 1);
594 case INDEX_op_remu_i64:
595 return (uint64_t)x % ((uint64_t)y ? : 1);
596
597 default:
598 g_assert_not_reached();
599 }
600 }
601
do_constant_folding(TCGOpcode op,TCGType type,uint64_t x,uint64_t y)602 static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
603 uint64_t x, uint64_t y)
604 {
605 uint64_t res = do_constant_folding_2(op, x, y);
606 if (type == TCG_TYPE_I32) {
607 res = (int32_t)res;
608 }
609 return res;
610 }
611
do_constant_folding_cond_32(uint32_t x,uint32_t y,TCGCond c)612 static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
613 {
614 switch (c) {
615 case TCG_COND_EQ:
616 return x == y;
617 case TCG_COND_NE:
618 return x != y;
619 case TCG_COND_LT:
620 return (int32_t)x < (int32_t)y;
621 case TCG_COND_GE:
622 return (int32_t)x >= (int32_t)y;
623 case TCG_COND_LE:
624 return (int32_t)x <= (int32_t)y;
625 case TCG_COND_GT:
626 return (int32_t)x > (int32_t)y;
627 case TCG_COND_LTU:
628 return x < y;
629 case TCG_COND_GEU:
630 return x >= y;
631 case TCG_COND_LEU:
632 return x <= y;
633 case TCG_COND_GTU:
634 return x > y;
635 case TCG_COND_TSTEQ:
636 return (x & y) == 0;
637 case TCG_COND_TSTNE:
638 return (x & y) != 0;
639 case TCG_COND_ALWAYS:
640 case TCG_COND_NEVER:
641 break;
642 }
643 g_assert_not_reached();
644 }
645
do_constant_folding_cond_64(uint64_t x,uint64_t y,TCGCond c)646 static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
647 {
648 switch (c) {
649 case TCG_COND_EQ:
650 return x == y;
651 case TCG_COND_NE:
652 return x != y;
653 case TCG_COND_LT:
654 return (int64_t)x < (int64_t)y;
655 case TCG_COND_GE:
656 return (int64_t)x >= (int64_t)y;
657 case TCG_COND_LE:
658 return (int64_t)x <= (int64_t)y;
659 case TCG_COND_GT:
660 return (int64_t)x > (int64_t)y;
661 case TCG_COND_LTU:
662 return x < y;
663 case TCG_COND_GEU:
664 return x >= y;
665 case TCG_COND_LEU:
666 return x <= y;
667 case TCG_COND_GTU:
668 return x > y;
669 case TCG_COND_TSTEQ:
670 return (x & y) == 0;
671 case TCG_COND_TSTNE:
672 return (x & y) != 0;
673 case TCG_COND_ALWAYS:
674 case TCG_COND_NEVER:
675 break;
676 }
677 g_assert_not_reached();
678 }
679
do_constant_folding_cond_eq(TCGCond c)680 static int do_constant_folding_cond_eq(TCGCond c)
681 {
682 switch (c) {
683 case TCG_COND_GT:
684 case TCG_COND_LTU:
685 case TCG_COND_LT:
686 case TCG_COND_GTU:
687 case TCG_COND_NE:
688 return 0;
689 case TCG_COND_GE:
690 case TCG_COND_GEU:
691 case TCG_COND_LE:
692 case TCG_COND_LEU:
693 case TCG_COND_EQ:
694 return 1;
695 case TCG_COND_TSTEQ:
696 case TCG_COND_TSTNE:
697 return -1;
698 case TCG_COND_ALWAYS:
699 case TCG_COND_NEVER:
700 break;
701 }
702 g_assert_not_reached();
703 }
704
705 /*
706 * Return -1 if the condition can't be simplified,
707 * and the result of the condition (0 or 1) if it can.
708 */
do_constant_folding_cond(TCGType type,TCGArg x,TCGArg y,TCGCond c)709 static int do_constant_folding_cond(TCGType type, TCGArg x,
710 TCGArg y, TCGCond c)
711 {
712 if (arg_is_const(x) && arg_is_const(y)) {
713 uint64_t xv = arg_info(x)->val;
714 uint64_t yv = arg_info(y)->val;
715
716 switch (type) {
717 case TCG_TYPE_I32:
718 return do_constant_folding_cond_32(xv, yv, c);
719 case TCG_TYPE_I64:
720 return do_constant_folding_cond_64(xv, yv, c);
721 default:
722 /* Only scalar comparisons are optimizable */
723 return -1;
724 }
725 } else if (args_are_copies(x, y)) {
726 return do_constant_folding_cond_eq(c);
727 } else if (arg_is_const_val(y, 0)) {
728 switch (c) {
729 case TCG_COND_LTU:
730 case TCG_COND_TSTNE:
731 return 0;
732 case TCG_COND_GEU:
733 case TCG_COND_TSTEQ:
734 return 1;
735 default:
736 return -1;
737 }
738 }
739 return -1;
740 }
741
742 /**
743 * swap_commutative:
744 * @dest: TCGArg of the destination argument, or NO_DEST.
745 * @p1: first paired argument
746 * @p2: second paired argument
747 *
748 * If *@p1 is a constant and *@p2 is not, swap.
749 * If *@p2 matches @dest, swap.
750 * Return true if a swap was performed.
751 */
752
753 #define NO_DEST temp_arg(NULL)
754
swap_commutative(TCGArg dest,TCGArg * p1,TCGArg * p2)755 static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
756 {
757 TCGArg a1 = *p1, a2 = *p2;
758 int sum = 0;
759 sum += arg_is_const(a1);
760 sum -= arg_is_const(a2);
761
762 /* Prefer the constant in second argument, and then the form
763 op a, a, b, which is better handled on non-RISC hosts. */
764 if (sum > 0 || (sum == 0 && dest == a2)) {
765 *p1 = a2;
766 *p2 = a1;
767 return true;
768 }
769 return false;
770 }
771
swap_commutative2(TCGArg * p1,TCGArg * p2)772 static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
773 {
774 int sum = 0;
775 sum += arg_is_const(p1[0]);
776 sum += arg_is_const(p1[1]);
777 sum -= arg_is_const(p2[0]);
778 sum -= arg_is_const(p2[1]);
779 if (sum > 0) {
780 TCGArg t;
781 t = p1[0], p1[0] = p2[0], p2[0] = t;
782 t = p1[1], p1[1] = p2[1], p2[1] = t;
783 return true;
784 }
785 return false;
786 }
787
788 /*
789 * Return -1 if the condition can't be simplified,
790 * and the result of the condition (0 or 1) if it can.
791 */
do_constant_folding_cond1(OptContext * ctx,TCGOp * op,TCGArg dest,TCGArg * p1,TCGArg * p2,TCGArg * pcond)792 static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
793 TCGArg *p1, TCGArg *p2, TCGArg *pcond)
794 {
795 TCGCond cond;
796 bool swap;
797 int r;
798
799 swap = swap_commutative(dest, p1, p2);
800 cond = *pcond;
801 if (swap) {
802 *pcond = cond = tcg_swap_cond(cond);
803 }
804
805 r = do_constant_folding_cond(ctx->type, *p1, *p2, cond);
806 if (r >= 0) {
807 return r;
808 }
809 if (!is_tst_cond(cond)) {
810 return -1;
811 }
812
813 /*
814 * TSTNE x,x -> NE x,0
815 * TSTNE x,-1 -> NE x,0
816 */
817 if (args_are_copies(*p1, *p2) || arg_is_const_val(*p2, -1)) {
818 *p2 = arg_new_constant(ctx, 0);
819 *pcond = tcg_tst_eqne_cond(cond);
820 return -1;
821 }
822
823 /* TSTNE x,sign -> LT x,0 */
824 if (arg_is_const_val(*p2, (ctx->type == TCG_TYPE_I32
825 ? INT32_MIN : INT64_MIN))) {
826 *p2 = arg_new_constant(ctx, 0);
827 *pcond = tcg_tst_ltge_cond(cond);
828 return -1;
829 }
830
831 /* Expand to AND with a temporary if no backend support. */
832 if (!TCG_TARGET_HAS_tst) {
833 TCGOpcode and_opc = (ctx->type == TCG_TYPE_I32
834 ? INDEX_op_and_i32 : INDEX_op_and_i64);
835 TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, and_opc, 3);
836 TCGArg tmp = arg_new_temp(ctx);
837
838 op2->args[0] = tmp;
839 op2->args[1] = *p1;
840 op2->args[2] = *p2;
841
842 *p1 = tmp;
843 *p2 = arg_new_constant(ctx, 0);
844 *pcond = tcg_tst_eqne_cond(cond);
845 }
846 return -1;
847 }
848
do_constant_folding_cond2(OptContext * ctx,TCGOp * op,TCGArg * args)849 static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
850 {
851 TCGArg al, ah, bl, bh;
852 TCGCond c;
853 bool swap;
854 int r;
855
856 swap = swap_commutative2(args, args + 2);
857 c = args[4];
858 if (swap) {
859 args[4] = c = tcg_swap_cond(c);
860 }
861
862 al = args[0];
863 ah = args[1];
864 bl = args[2];
865 bh = args[3];
866
867 if (arg_is_const(bl) && arg_is_const(bh)) {
868 tcg_target_ulong blv = arg_info(bl)->val;
869 tcg_target_ulong bhv = arg_info(bh)->val;
870 uint64_t b = deposit64(blv, 32, 32, bhv);
871
872 if (arg_is_const(al) && arg_is_const(ah)) {
873 tcg_target_ulong alv = arg_info(al)->val;
874 tcg_target_ulong ahv = arg_info(ah)->val;
875 uint64_t a = deposit64(alv, 32, 32, ahv);
876
877 r = do_constant_folding_cond_64(a, b, c);
878 if (r >= 0) {
879 return r;
880 }
881 }
882
883 if (b == 0) {
884 switch (c) {
885 case TCG_COND_LTU:
886 case TCG_COND_TSTNE:
887 return 0;
888 case TCG_COND_GEU:
889 case TCG_COND_TSTEQ:
890 return 1;
891 default:
892 break;
893 }
894 }
895
896 /* TSTNE x,-1 -> NE x,0 */
897 if (b == -1 && is_tst_cond(c)) {
898 args[3] = args[2] = arg_new_constant(ctx, 0);
899 args[4] = tcg_tst_eqne_cond(c);
900 return -1;
901 }
902
903 /* TSTNE x,sign -> LT x,0 */
904 if (b == INT64_MIN && is_tst_cond(c)) {
905 /* bl must be 0, so copy that to bh */
906 args[3] = bl;
907 args[4] = tcg_tst_ltge_cond(c);
908 return -1;
909 }
910 }
911
912 if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
913 r = do_constant_folding_cond_eq(c);
914 if (r >= 0) {
915 return r;
916 }
917
918 /* TSTNE x,x -> NE x,0 */
919 if (is_tst_cond(c)) {
920 args[3] = args[2] = arg_new_constant(ctx, 0);
921 args[4] = tcg_tst_eqne_cond(c);
922 return -1;
923 }
924 }
925
926 /* Expand to AND with a temporary if no backend support. */
927 if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
928 TCGOp *op1 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
929 TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
930 TCGArg t1 = arg_new_temp(ctx);
931 TCGArg t2 = arg_new_temp(ctx);
932
933 op1->args[0] = t1;
934 op1->args[1] = al;
935 op1->args[2] = bl;
936 op2->args[0] = t2;
937 op2->args[1] = ah;
938 op2->args[2] = bh;
939
940 args[0] = t1;
941 args[1] = t2;
942 args[3] = args[2] = arg_new_constant(ctx, 0);
943 args[4] = tcg_tst_eqne_cond(c);
944 }
945 return -1;
946 }
947
init_arguments(OptContext * ctx,TCGOp * op,int nb_args)948 static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
949 {
950 for (int i = 0; i < nb_args; i++) {
951 TCGTemp *ts = arg_temp(op->args[i]);
952 init_ts_info(ctx, ts);
953 }
954 }
955
copy_propagate(OptContext * ctx,TCGOp * op,int nb_oargs,int nb_iargs)956 static void copy_propagate(OptContext *ctx, TCGOp *op,
957 int nb_oargs, int nb_iargs)
958 {
959 for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
960 TCGTemp *ts = arg_temp(op->args[i]);
961 if (ts_is_copy(ts)) {
962 op->args[i] = temp_arg(find_better_copy(ts));
963 }
964 }
965 }
966
finish_folding(OptContext * ctx,TCGOp * op)967 static void finish_folding(OptContext *ctx, TCGOp *op)
968 {
969 const TCGOpDef *def = &tcg_op_defs[op->opc];
970 int i, nb_oargs;
971
972 /*
973 * We only optimize extended basic blocks. If the opcode ends a BB
974 * and is not a conditional branch, reset all temp data.
975 */
976 if (def->flags & TCG_OPF_BB_END) {
977 ctx->prev_mb = NULL;
978 if (!(def->flags & TCG_OPF_COND_BRANCH)) {
979 memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
980 remove_mem_copy_all(ctx);
981 }
982 return;
983 }
984
985 nb_oargs = def->nb_oargs;
986 for (i = 0; i < nb_oargs; i++) {
987 TCGTemp *ts = arg_temp(op->args[i]);
988 reset_ts(ctx, ts);
989 /*
990 * Save the corresponding known-zero/sign bits mask for the
991 * first output argument (only one supported so far).
992 */
993 if (i == 0) {
994 ts_info(ts)->z_mask = ctx->z_mask;
995 ts_info(ts)->s_mask = ctx->s_mask;
996 }
997 }
998 }
999
1000 /*
1001 * The fold_* functions return true when processing is complete,
1002 * usually by folding the operation to a constant or to a copy,
1003 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
1004 * like collect information about the value produced, for use in
1005 * optimizing a subsequent operation.
1006 *
1007 * These first fold_* functions are all helpers, used by other
1008 * folders for more specific operations.
1009 */
1010
fold_const1(OptContext * ctx,TCGOp * op)1011 static bool fold_const1(OptContext *ctx, TCGOp *op)
1012 {
1013 if (arg_is_const(op->args[1])) {
1014 uint64_t t;
1015
1016 t = arg_info(op->args[1])->val;
1017 t = do_constant_folding(op->opc, ctx->type, t, 0);
1018 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1019 }
1020 return false;
1021 }
1022
fold_const2(OptContext * ctx,TCGOp * op)1023 static bool fold_const2(OptContext *ctx, TCGOp *op)
1024 {
1025 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1026 uint64_t t1 = arg_info(op->args[1])->val;
1027 uint64_t t2 = arg_info(op->args[2])->val;
1028
1029 t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
1030 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
1031 }
1032 return false;
1033 }
1034
fold_commutative(OptContext * ctx,TCGOp * op)1035 static bool fold_commutative(OptContext *ctx, TCGOp *op)
1036 {
1037 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1038 return false;
1039 }
1040
fold_const2_commutative(OptContext * ctx,TCGOp * op)1041 static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
1042 {
1043 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1044 return fold_const2(ctx, op);
1045 }
1046
fold_masks(OptContext * ctx,TCGOp * op)1047 static bool fold_masks(OptContext *ctx, TCGOp *op)
1048 {
1049 uint64_t a_mask = ctx->a_mask;
1050 uint64_t z_mask = ctx->z_mask;
1051 uint64_t s_mask = ctx->s_mask;
1052
1053 /*
1054 * 32-bit ops generate 32-bit results, which for the purpose of
1055 * simplifying tcg are sign-extended. Certainly that's how we
1056 * represent our constants elsewhere. Note that the bits will
1057 * be reset properly for a 64-bit value when encountering the
1058 * type changing opcodes.
1059 */
1060 if (ctx->type == TCG_TYPE_I32) {
1061 a_mask = (int32_t)a_mask;
1062 z_mask = (int32_t)z_mask;
1063 s_mask |= MAKE_64BIT_MASK(32, 32);
1064 ctx->z_mask = z_mask;
1065 ctx->s_mask = s_mask;
1066 }
1067
1068 if (z_mask == 0) {
1069 return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
1070 }
1071 if (a_mask == 0) {
1072 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1073 }
1074 return false;
1075 }
1076
1077 /*
1078 * Convert @op to NOT, if NOT is supported by the host.
1079 * Return true f the conversion is successful, which will still
1080 * indicate that the processing is complete.
1081 */
1082 static bool fold_not(OptContext *ctx, TCGOp *op);
fold_to_not(OptContext * ctx,TCGOp * op,int idx)1083 static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
1084 {
1085 TCGOpcode not_op;
1086 bool have_not;
1087
1088 switch (ctx->type) {
1089 case TCG_TYPE_I32:
1090 not_op = INDEX_op_not_i32;
1091 have_not = TCG_TARGET_HAS_not_i32;
1092 break;
1093 case TCG_TYPE_I64:
1094 not_op = INDEX_op_not_i64;
1095 have_not = TCG_TARGET_HAS_not_i64;
1096 break;
1097 case TCG_TYPE_V64:
1098 case TCG_TYPE_V128:
1099 case TCG_TYPE_V256:
1100 not_op = INDEX_op_not_vec;
1101 have_not = TCG_TARGET_HAS_not_vec;
1102 break;
1103 default:
1104 g_assert_not_reached();
1105 }
1106 if (have_not) {
1107 op->opc = not_op;
1108 op->args[1] = op->args[idx];
1109 return fold_not(ctx, op);
1110 }
1111 return false;
1112 }
1113
1114 /* If the binary operation has first argument @i, fold to @i. */
fold_ix_to_i(OptContext * ctx,TCGOp * op,uint64_t i)1115 static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1116 {
1117 if (arg_is_const_val(op->args[1], i)) {
1118 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1119 }
1120 return false;
1121 }
1122
1123 /* If the binary operation has first argument @i, fold to NOT. */
fold_ix_to_not(OptContext * ctx,TCGOp * op,uint64_t i)1124 static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1125 {
1126 if (arg_is_const_val(op->args[1], i)) {
1127 return fold_to_not(ctx, op, 2);
1128 }
1129 return false;
1130 }
1131
1132 /* If the binary operation has second argument @i, fold to @i. */
fold_xi_to_i(OptContext * ctx,TCGOp * op,uint64_t i)1133 static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1134 {
1135 if (arg_is_const_val(op->args[2], i)) {
1136 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1137 }
1138 return false;
1139 }
1140
1141 /* If the binary operation has second argument @i, fold to identity. */
fold_xi_to_x(OptContext * ctx,TCGOp * op,uint64_t i)1142 static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
1143 {
1144 if (arg_is_const_val(op->args[2], i)) {
1145 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1146 }
1147 return false;
1148 }
1149
1150 /* If the binary operation has second argument @i, fold to NOT. */
fold_xi_to_not(OptContext * ctx,TCGOp * op,uint64_t i)1151 static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1152 {
1153 if (arg_is_const_val(op->args[2], i)) {
1154 return fold_to_not(ctx, op, 1);
1155 }
1156 return false;
1157 }
1158
1159 /* If the binary operation has both arguments equal, fold to @i. */
fold_xx_to_i(OptContext * ctx,TCGOp * op,uint64_t i)1160 static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1161 {
1162 if (args_are_copies(op->args[1], op->args[2])) {
1163 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1164 }
1165 return false;
1166 }
1167
1168 /* If the binary operation has both arguments equal, fold to identity. */
fold_xx_to_x(OptContext * ctx,TCGOp * op)1169 static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
1170 {
1171 if (args_are_copies(op->args[1], op->args[2])) {
1172 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1173 }
1174 return false;
1175 }
1176
1177 /*
1178 * These outermost fold_<op> functions are sorted alphabetically.
1179 *
1180 * The ordering of the transformations should be:
1181 * 1) those that produce a constant
1182 * 2) those that produce a copy
1183 * 3) those that produce information about the result value.
1184 */
1185
fold_add(OptContext * ctx,TCGOp * op)1186 static bool fold_add(OptContext *ctx, TCGOp *op)
1187 {
1188 if (fold_const2_commutative(ctx, op) ||
1189 fold_xi_to_x(ctx, op, 0)) {
1190 return true;
1191 }
1192 return false;
1193 }
1194
1195 /* We cannot as yet do_constant_folding with vectors. */
fold_add_vec(OptContext * ctx,TCGOp * op)1196 static bool fold_add_vec(OptContext *ctx, TCGOp *op)
1197 {
1198 if (fold_commutative(ctx, op) ||
1199 fold_xi_to_x(ctx, op, 0)) {
1200 return true;
1201 }
1202 return false;
1203 }
1204
fold_addsub2(OptContext * ctx,TCGOp * op,bool add)1205 static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
1206 {
1207 bool a_const = arg_is_const(op->args[2]) && arg_is_const(op->args[3]);
1208 bool b_const = arg_is_const(op->args[4]) && arg_is_const(op->args[5]);
1209
1210 if (a_const && b_const) {
1211 uint64_t al = arg_info(op->args[2])->val;
1212 uint64_t ah = arg_info(op->args[3])->val;
1213 uint64_t bl = arg_info(op->args[4])->val;
1214 uint64_t bh = arg_info(op->args[5])->val;
1215 TCGArg rl, rh;
1216 TCGOp *op2;
1217
1218 if (ctx->type == TCG_TYPE_I32) {
1219 uint64_t a = deposit64(al, 32, 32, ah);
1220 uint64_t b = deposit64(bl, 32, 32, bh);
1221
1222 if (add) {
1223 a += b;
1224 } else {
1225 a -= b;
1226 }
1227
1228 al = sextract64(a, 0, 32);
1229 ah = sextract64(a, 32, 32);
1230 } else {
1231 Int128 a = int128_make128(al, ah);
1232 Int128 b = int128_make128(bl, bh);
1233
1234 if (add) {
1235 a = int128_add(a, b);
1236 } else {
1237 a = int128_sub(a, b);
1238 }
1239
1240 al = int128_getlo(a);
1241 ah = int128_gethi(a);
1242 }
1243
1244 rl = op->args[0];
1245 rh = op->args[1];
1246
1247 /* The proper opcode is supplied by tcg_opt_gen_mov. */
1248 op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2);
1249
1250 tcg_opt_gen_movi(ctx, op, rl, al);
1251 tcg_opt_gen_movi(ctx, op2, rh, ah);
1252 return true;
1253 }
1254
1255 /* Fold sub2 r,x,i to add2 r,x,-i */
1256 if (!add && b_const) {
1257 uint64_t bl = arg_info(op->args[4])->val;
1258 uint64_t bh = arg_info(op->args[5])->val;
1259
1260 /* Negate the two parts without assembling and disassembling. */
1261 bl = -bl;
1262 bh = ~bh + !bl;
1263
1264 op->opc = (ctx->type == TCG_TYPE_I32
1265 ? INDEX_op_add2_i32 : INDEX_op_add2_i64);
1266 op->args[4] = arg_new_constant(ctx, bl);
1267 op->args[5] = arg_new_constant(ctx, bh);
1268 }
1269 return false;
1270 }
1271
fold_add2(OptContext * ctx,TCGOp * op)1272 static bool fold_add2(OptContext *ctx, TCGOp *op)
1273 {
1274 /* Note that the high and low parts may be independently swapped. */
1275 swap_commutative(op->args[0], &op->args[2], &op->args[4]);
1276 swap_commutative(op->args[1], &op->args[3], &op->args[5]);
1277
1278 return fold_addsub2(ctx, op, true);
1279 }
1280
fold_and(OptContext * ctx,TCGOp * op)1281 static bool fold_and(OptContext *ctx, TCGOp *op)
1282 {
1283 uint64_t z1, z2;
1284
1285 if (fold_const2_commutative(ctx, op) ||
1286 fold_xi_to_i(ctx, op, 0) ||
1287 fold_xi_to_x(ctx, op, -1) ||
1288 fold_xx_to_x(ctx, op)) {
1289 return true;
1290 }
1291
1292 z1 = arg_info(op->args[1])->z_mask;
1293 z2 = arg_info(op->args[2])->z_mask;
1294 ctx->z_mask = z1 & z2;
1295
1296 /*
1297 * Sign repetitions are perforce all identical, whether they are 1 or 0.
1298 * Bitwise operations preserve the relative quantity of the repetitions.
1299 */
1300 ctx->s_mask = arg_info(op->args[1])->s_mask
1301 & arg_info(op->args[2])->s_mask;
1302
1303 /*
1304 * Known-zeros does not imply known-ones. Therefore unless
1305 * arg2 is constant, we can't infer affected bits from it.
1306 */
1307 if (arg_is_const(op->args[2])) {
1308 ctx->a_mask = z1 & ~z2;
1309 }
1310
1311 return fold_masks(ctx, op);
1312 }
1313
fold_andc(OptContext * ctx,TCGOp * op)1314 static bool fold_andc(OptContext *ctx, TCGOp *op)
1315 {
1316 uint64_t z1;
1317
1318 if (fold_const2(ctx, op) ||
1319 fold_xx_to_i(ctx, op, 0) ||
1320 fold_xi_to_x(ctx, op, 0) ||
1321 fold_ix_to_not(ctx, op, -1)) {
1322 return true;
1323 }
1324
1325 z1 = arg_info(op->args[1])->z_mask;
1326
1327 /*
1328 * Known-zeros does not imply known-ones. Therefore unless
1329 * arg2 is constant, we can't infer anything from it.
1330 */
1331 if (arg_is_const(op->args[2])) {
1332 uint64_t z2 = ~arg_info(op->args[2])->z_mask;
1333 ctx->a_mask = z1 & ~z2;
1334 z1 &= z2;
1335 }
1336 ctx->z_mask = z1;
1337
1338 ctx->s_mask = arg_info(op->args[1])->s_mask
1339 & arg_info(op->args[2])->s_mask;
1340 return fold_masks(ctx, op);
1341 }
1342
fold_brcond(OptContext * ctx,TCGOp * op)1343 static bool fold_brcond(OptContext *ctx, TCGOp *op)
1344 {
1345 int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
1346 &op->args[1], &op->args[2]);
1347 if (i == 0) {
1348 tcg_op_remove(ctx->tcg, op);
1349 return true;
1350 }
1351 if (i > 0) {
1352 op->opc = INDEX_op_br;
1353 op->args[0] = op->args[3];
1354 }
1355 return false;
1356 }
1357
fold_brcond2(OptContext * ctx,TCGOp * op)1358 static bool fold_brcond2(OptContext *ctx, TCGOp *op)
1359 {
1360 TCGCond cond;
1361 TCGArg label;
1362 int i, inv = 0;
1363
1364 i = do_constant_folding_cond2(ctx, op, &op->args[0]);
1365 cond = op->args[4];
1366 label = op->args[5];
1367 if (i >= 0) {
1368 goto do_brcond_const;
1369 }
1370
1371 switch (cond) {
1372 case TCG_COND_LT:
1373 case TCG_COND_GE:
1374 /*
1375 * Simplify LT/GE comparisons vs zero to a single compare
1376 * vs the high word of the input.
1377 */
1378 if (arg_is_const_val(op->args[2], 0) &&
1379 arg_is_const_val(op->args[3], 0)) {
1380 goto do_brcond_high;
1381 }
1382 break;
1383
1384 case TCG_COND_NE:
1385 inv = 1;
1386 QEMU_FALLTHROUGH;
1387 case TCG_COND_EQ:
1388 /*
1389 * Simplify EQ/NE comparisons where one of the pairs
1390 * can be simplified.
1391 */
1392 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
1393 op->args[2], cond);
1394 switch (i ^ inv) {
1395 case 0:
1396 goto do_brcond_const;
1397 case 1:
1398 goto do_brcond_high;
1399 }
1400
1401 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
1402 op->args[3], cond);
1403 switch (i ^ inv) {
1404 case 0:
1405 goto do_brcond_const;
1406 case 1:
1407 goto do_brcond_low;
1408 }
1409 break;
1410
1411 case TCG_COND_TSTEQ:
1412 case TCG_COND_TSTNE:
1413 if (arg_is_const_val(op->args[2], 0)) {
1414 goto do_brcond_high;
1415 }
1416 if (arg_is_const_val(op->args[3], 0)) {
1417 goto do_brcond_low;
1418 }
1419 break;
1420
1421 default:
1422 break;
1423
1424 do_brcond_low:
1425 op->opc = INDEX_op_brcond_i32;
1426 op->args[1] = op->args[2];
1427 op->args[2] = cond;
1428 op->args[3] = label;
1429 return fold_brcond(ctx, op);
1430
1431 do_brcond_high:
1432 op->opc = INDEX_op_brcond_i32;
1433 op->args[0] = op->args[1];
1434 op->args[1] = op->args[3];
1435 op->args[2] = cond;
1436 op->args[3] = label;
1437 return fold_brcond(ctx, op);
1438
1439 do_brcond_const:
1440 if (i == 0) {
1441 tcg_op_remove(ctx->tcg, op);
1442 return true;
1443 }
1444 op->opc = INDEX_op_br;
1445 op->args[0] = label;
1446 break;
1447 }
1448 return false;
1449 }
1450
fold_bswap(OptContext * ctx,TCGOp * op)1451 static bool fold_bswap(OptContext *ctx, TCGOp *op)
1452 {
1453 uint64_t z_mask, s_mask, sign;
1454
1455 if (arg_is_const(op->args[1])) {
1456 uint64_t t = arg_info(op->args[1])->val;
1457
1458 t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
1459 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1460 }
1461
1462 z_mask = arg_info(op->args[1])->z_mask;
1463
1464 switch (op->opc) {
1465 case INDEX_op_bswap16_i32:
1466 case INDEX_op_bswap16_i64:
1467 z_mask = bswap16(z_mask);
1468 sign = INT16_MIN;
1469 break;
1470 case INDEX_op_bswap32_i32:
1471 case INDEX_op_bswap32_i64:
1472 z_mask = bswap32(z_mask);
1473 sign = INT32_MIN;
1474 break;
1475 case INDEX_op_bswap64_i64:
1476 z_mask = bswap64(z_mask);
1477 sign = INT64_MIN;
1478 break;
1479 default:
1480 g_assert_not_reached();
1481 }
1482 s_mask = smask_from_zmask(z_mask);
1483
1484 switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
1485 case TCG_BSWAP_OZ:
1486 break;
1487 case TCG_BSWAP_OS:
1488 /* If the sign bit may be 1, force all the bits above to 1. */
1489 if (z_mask & sign) {
1490 z_mask |= sign;
1491 s_mask = sign << 1;
1492 }
1493 break;
1494 default:
1495 /* The high bits are undefined: force all bits above the sign to 1. */
1496 z_mask |= sign << 1;
1497 s_mask = 0;
1498 break;
1499 }
1500 ctx->z_mask = z_mask;
1501 ctx->s_mask = s_mask;
1502
1503 return fold_masks(ctx, op);
1504 }
1505
fold_call(OptContext * ctx,TCGOp * op)1506 static bool fold_call(OptContext *ctx, TCGOp *op)
1507 {
1508 TCGContext *s = ctx->tcg;
1509 int nb_oargs = TCGOP_CALLO(op);
1510 int nb_iargs = TCGOP_CALLI(op);
1511 int flags, i;
1512
1513 init_arguments(ctx, op, nb_oargs + nb_iargs);
1514 copy_propagate(ctx, op, nb_oargs, nb_iargs);
1515
1516 /* If the function reads or writes globals, reset temp data. */
1517 flags = tcg_call_flags(op);
1518 if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1519 int nb_globals = s->nb_globals;
1520
1521 for (i = 0; i < nb_globals; i++) {
1522 if (test_bit(i, ctx->temps_used.l)) {
1523 reset_ts(ctx, &ctx->tcg->temps[i]);
1524 }
1525 }
1526 }
1527
1528 /* If the function has side effects, reset mem data. */
1529 if (!(flags & TCG_CALL_NO_SIDE_EFFECTS)) {
1530 remove_mem_copy_all(ctx);
1531 }
1532
1533 /* Reset temp data for outputs. */
1534 for (i = 0; i < nb_oargs; i++) {
1535 reset_temp(ctx, op->args[i]);
1536 }
1537
1538 /* Stop optimizing MB across calls. */
1539 ctx->prev_mb = NULL;
1540 return true;
1541 }
1542
fold_count_zeros(OptContext * ctx,TCGOp * op)1543 static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
1544 {
1545 uint64_t z_mask;
1546
1547 if (arg_is_const(op->args[1])) {
1548 uint64_t t = arg_info(op->args[1])->val;
1549
1550 if (t != 0) {
1551 t = do_constant_folding(op->opc, ctx->type, t, 0);
1552 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1553 }
1554 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1555 }
1556
1557 switch (ctx->type) {
1558 case TCG_TYPE_I32:
1559 z_mask = 31;
1560 break;
1561 case TCG_TYPE_I64:
1562 z_mask = 63;
1563 break;
1564 default:
1565 g_assert_not_reached();
1566 }
1567 ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
1568 ctx->s_mask = smask_from_zmask(ctx->z_mask);
1569 return false;
1570 }
1571
fold_ctpop(OptContext * ctx,TCGOp * op)1572 static bool fold_ctpop(OptContext *ctx, TCGOp *op)
1573 {
1574 if (fold_const1(ctx, op)) {
1575 return true;
1576 }
1577
1578 switch (ctx->type) {
1579 case TCG_TYPE_I32:
1580 ctx->z_mask = 32 | 31;
1581 break;
1582 case TCG_TYPE_I64:
1583 ctx->z_mask = 64 | 63;
1584 break;
1585 default:
1586 g_assert_not_reached();
1587 }
1588 ctx->s_mask = smask_from_zmask(ctx->z_mask);
1589 return false;
1590 }
1591
fold_deposit(OptContext * ctx,TCGOp * op)1592 static bool fold_deposit(OptContext *ctx, TCGOp *op)
1593 {
1594 TCGOpcode and_opc;
1595
1596 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1597 uint64_t t1 = arg_info(op->args[1])->val;
1598 uint64_t t2 = arg_info(op->args[2])->val;
1599
1600 t1 = deposit64(t1, op->args[3], op->args[4], t2);
1601 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
1602 }
1603
1604 switch (ctx->type) {
1605 case TCG_TYPE_I32:
1606 and_opc = INDEX_op_and_i32;
1607 break;
1608 case TCG_TYPE_I64:
1609 and_opc = INDEX_op_and_i64;
1610 break;
1611 default:
1612 g_assert_not_reached();
1613 }
1614
1615 /* Inserting a value into zero at offset 0. */
1616 if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
1617 uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
1618
1619 op->opc = and_opc;
1620 op->args[1] = op->args[2];
1621 op->args[2] = arg_new_constant(ctx, mask);
1622 ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
1623 return false;
1624 }
1625
1626 /* Inserting zero into a value. */
1627 if (arg_is_const_val(op->args[2], 0)) {
1628 uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
1629
1630 op->opc = and_opc;
1631 op->args[2] = arg_new_constant(ctx, mask);
1632 ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
1633 return false;
1634 }
1635
1636 ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
1637 op->args[3], op->args[4],
1638 arg_info(op->args[2])->z_mask);
1639 return false;
1640 }
1641
fold_divide(OptContext * ctx,TCGOp * op)1642 static bool fold_divide(OptContext *ctx, TCGOp *op)
1643 {
1644 if (fold_const2(ctx, op) ||
1645 fold_xi_to_x(ctx, op, 1)) {
1646 return true;
1647 }
1648 return false;
1649 }
1650
fold_dup(OptContext * ctx,TCGOp * op)1651 static bool fold_dup(OptContext *ctx, TCGOp *op)
1652 {
1653 if (arg_is_const(op->args[1])) {
1654 uint64_t t = arg_info(op->args[1])->val;
1655 t = dup_const(TCGOP_VECE(op), t);
1656 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1657 }
1658 return false;
1659 }
1660
fold_dup2(OptContext * ctx,TCGOp * op)1661 static bool fold_dup2(OptContext *ctx, TCGOp *op)
1662 {
1663 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1664 uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
1665 arg_info(op->args[2])->val);
1666 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1667 }
1668
1669 if (args_are_copies(op->args[1], op->args[2])) {
1670 op->opc = INDEX_op_dup_vec;
1671 TCGOP_VECE(op) = MO_32;
1672 }
1673 return false;
1674 }
1675
fold_eqv(OptContext * ctx,TCGOp * op)1676 static bool fold_eqv(OptContext *ctx, TCGOp *op)
1677 {
1678 if (fold_const2_commutative(ctx, op) ||
1679 fold_xi_to_x(ctx, op, -1) ||
1680 fold_xi_to_not(ctx, op, 0)) {
1681 return true;
1682 }
1683
1684 ctx->s_mask = arg_info(op->args[1])->s_mask
1685 & arg_info(op->args[2])->s_mask;
1686 return false;
1687 }
1688
fold_extract(OptContext * ctx,TCGOp * op)1689 static bool fold_extract(OptContext *ctx, TCGOp *op)
1690 {
1691 uint64_t z_mask_old, z_mask;
1692 int pos = op->args[2];
1693 int len = op->args[3];
1694
1695 if (arg_is_const(op->args[1])) {
1696 uint64_t t;
1697
1698 t = arg_info(op->args[1])->val;
1699 t = extract64(t, pos, len);
1700 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1701 }
1702
1703 z_mask_old = arg_info(op->args[1])->z_mask;
1704 z_mask = extract64(z_mask_old, pos, len);
1705 if (pos == 0) {
1706 ctx->a_mask = z_mask_old ^ z_mask;
1707 }
1708 ctx->z_mask = z_mask;
1709 ctx->s_mask = smask_from_zmask(z_mask);
1710
1711 return fold_masks(ctx, op);
1712 }
1713
fold_extract2(OptContext * ctx,TCGOp * op)1714 static bool fold_extract2(OptContext *ctx, TCGOp *op)
1715 {
1716 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1717 uint64_t v1 = arg_info(op->args[1])->val;
1718 uint64_t v2 = arg_info(op->args[2])->val;
1719 int shr = op->args[3];
1720
1721 if (op->opc == INDEX_op_extract2_i64) {
1722 v1 >>= shr;
1723 v2 <<= 64 - shr;
1724 } else {
1725 v1 = (uint32_t)v1 >> shr;
1726 v2 = (uint64_t)((int32_t)v2 << (32 - shr));
1727 }
1728 return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
1729 }
1730 return false;
1731 }
1732
fold_exts(OptContext * ctx,TCGOp * op)1733 static bool fold_exts(OptContext *ctx, TCGOp *op)
1734 {
1735 uint64_t s_mask_old, s_mask, z_mask, sign;
1736 bool type_change = false;
1737
1738 if (fold_const1(ctx, op)) {
1739 return true;
1740 }
1741
1742 z_mask = arg_info(op->args[1])->z_mask;
1743 s_mask = arg_info(op->args[1])->s_mask;
1744 s_mask_old = s_mask;
1745
1746 switch (op->opc) {
1747 CASE_OP_32_64(ext8s):
1748 sign = INT8_MIN;
1749 z_mask = (uint8_t)z_mask;
1750 break;
1751 CASE_OP_32_64(ext16s):
1752 sign = INT16_MIN;
1753 z_mask = (uint16_t)z_mask;
1754 break;
1755 case INDEX_op_ext_i32_i64:
1756 type_change = true;
1757 QEMU_FALLTHROUGH;
1758 case INDEX_op_ext32s_i64:
1759 sign = INT32_MIN;
1760 z_mask = (uint32_t)z_mask;
1761 break;
1762 default:
1763 g_assert_not_reached();
1764 }
1765
1766 if (z_mask & sign) {
1767 z_mask |= sign;
1768 }
1769 s_mask |= sign << 1;
1770
1771 ctx->z_mask = z_mask;
1772 ctx->s_mask = s_mask;
1773 if (!type_change) {
1774 ctx->a_mask = s_mask & ~s_mask_old;
1775 }
1776
1777 return fold_masks(ctx, op);
1778 }
1779
fold_extu(OptContext * ctx,TCGOp * op)1780 static bool fold_extu(OptContext *ctx, TCGOp *op)
1781 {
1782 uint64_t z_mask_old, z_mask;
1783 bool type_change = false;
1784
1785 if (fold_const1(ctx, op)) {
1786 return true;
1787 }
1788
1789 z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
1790
1791 switch (op->opc) {
1792 CASE_OP_32_64(ext8u):
1793 z_mask = (uint8_t)z_mask;
1794 break;
1795 CASE_OP_32_64(ext16u):
1796 z_mask = (uint16_t)z_mask;
1797 break;
1798 case INDEX_op_extrl_i64_i32:
1799 case INDEX_op_extu_i32_i64:
1800 type_change = true;
1801 QEMU_FALLTHROUGH;
1802 case INDEX_op_ext32u_i64:
1803 z_mask = (uint32_t)z_mask;
1804 break;
1805 case INDEX_op_extrh_i64_i32:
1806 type_change = true;
1807 z_mask >>= 32;
1808 break;
1809 default:
1810 g_assert_not_reached();
1811 }
1812
1813 ctx->z_mask = z_mask;
1814 ctx->s_mask = smask_from_zmask(z_mask);
1815 if (!type_change) {
1816 ctx->a_mask = z_mask_old ^ z_mask;
1817 }
1818 return fold_masks(ctx, op);
1819 }
1820
fold_mb(OptContext * ctx,TCGOp * op)1821 static bool fold_mb(OptContext *ctx, TCGOp *op)
1822 {
1823 /* Eliminate duplicate and redundant fence instructions. */
1824 if (ctx->prev_mb) {
1825 /*
1826 * Merge two barriers of the same type into one,
1827 * or a weaker barrier into a stronger one,
1828 * or two weaker barriers into a stronger one.
1829 * mb X; mb Y => mb X|Y
1830 * mb; strl => mb; st
1831 * ldaq; mb => ld; mb
1832 * ldaq; strl => ld; mb; st
1833 * Other combinations are also merged into a strong
1834 * barrier. This is stricter than specified but for
1835 * the purposes of TCG is better than not optimizing.
1836 */
1837 ctx->prev_mb->args[0] |= op->args[0];
1838 tcg_op_remove(ctx->tcg, op);
1839 } else {
1840 ctx->prev_mb = op;
1841 }
1842 return true;
1843 }
1844
fold_mov(OptContext * ctx,TCGOp * op)1845 static bool fold_mov(OptContext *ctx, TCGOp *op)
1846 {
1847 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1848 }
1849
fold_movcond(OptContext * ctx,TCGOp * op)1850 static bool fold_movcond(OptContext *ctx, TCGOp *op)
1851 {
1852 int i;
1853
1854 /* If true and false values are the same, eliminate the cmp. */
1855 if (args_are_copies(op->args[3], op->args[4])) {
1856 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
1857 }
1858
1859 /*
1860 * Canonicalize the "false" input reg to match the destination reg so
1861 * that the tcg backend can implement a "move if true" operation.
1862 */
1863 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
1864 op->args[5] = tcg_invert_cond(op->args[5]);
1865 }
1866
1867 i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[1],
1868 &op->args[2], &op->args[5]);
1869 if (i >= 0) {
1870 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
1871 }
1872
1873 ctx->z_mask = arg_info(op->args[3])->z_mask
1874 | arg_info(op->args[4])->z_mask;
1875 ctx->s_mask = arg_info(op->args[3])->s_mask
1876 & arg_info(op->args[4])->s_mask;
1877
1878 if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
1879 uint64_t tv = arg_info(op->args[3])->val;
1880 uint64_t fv = arg_info(op->args[4])->val;
1881 TCGOpcode opc, negopc = 0;
1882 TCGCond cond = op->args[5];
1883
1884 switch (ctx->type) {
1885 case TCG_TYPE_I32:
1886 opc = INDEX_op_setcond_i32;
1887 if (TCG_TARGET_HAS_negsetcond_i32) {
1888 negopc = INDEX_op_negsetcond_i32;
1889 }
1890 tv = (int32_t)tv;
1891 fv = (int32_t)fv;
1892 break;
1893 case TCG_TYPE_I64:
1894 opc = INDEX_op_setcond_i64;
1895 if (TCG_TARGET_HAS_negsetcond_i64) {
1896 negopc = INDEX_op_negsetcond_i64;
1897 }
1898 break;
1899 default:
1900 g_assert_not_reached();
1901 }
1902
1903 if (tv == 1 && fv == 0) {
1904 op->opc = opc;
1905 op->args[3] = cond;
1906 } else if (fv == 1 && tv == 0) {
1907 op->opc = opc;
1908 op->args[3] = tcg_invert_cond(cond);
1909 } else if (negopc) {
1910 if (tv == -1 && fv == 0) {
1911 op->opc = negopc;
1912 op->args[3] = cond;
1913 } else if (fv == -1 && tv == 0) {
1914 op->opc = negopc;
1915 op->args[3] = tcg_invert_cond(cond);
1916 }
1917 }
1918 }
1919 return false;
1920 }
1921
fold_mul(OptContext * ctx,TCGOp * op)1922 static bool fold_mul(OptContext *ctx, TCGOp *op)
1923 {
1924 if (fold_const2(ctx, op) ||
1925 fold_xi_to_i(ctx, op, 0) ||
1926 fold_xi_to_x(ctx, op, 1)) {
1927 return true;
1928 }
1929 return false;
1930 }
1931
fold_mul_highpart(OptContext * ctx,TCGOp * op)1932 static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
1933 {
1934 if (fold_const2_commutative(ctx, op) ||
1935 fold_xi_to_i(ctx, op, 0)) {
1936 return true;
1937 }
1938 return false;
1939 }
1940
fold_multiply2(OptContext * ctx,TCGOp * op)1941 static bool fold_multiply2(OptContext *ctx, TCGOp *op)
1942 {
1943 swap_commutative(op->args[0], &op->args[2], &op->args[3]);
1944
1945 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
1946 uint64_t a = arg_info(op->args[2])->val;
1947 uint64_t b = arg_info(op->args[3])->val;
1948 uint64_t h, l;
1949 TCGArg rl, rh;
1950 TCGOp *op2;
1951
1952 switch (op->opc) {
1953 case INDEX_op_mulu2_i32:
1954 l = (uint64_t)(uint32_t)a * (uint32_t)b;
1955 h = (int32_t)(l >> 32);
1956 l = (int32_t)l;
1957 break;
1958 case INDEX_op_muls2_i32:
1959 l = (int64_t)(int32_t)a * (int32_t)b;
1960 h = l >> 32;
1961 l = (int32_t)l;
1962 break;
1963 case INDEX_op_mulu2_i64:
1964 mulu64(&l, &h, a, b);
1965 break;
1966 case INDEX_op_muls2_i64:
1967 muls64(&l, &h, a, b);
1968 break;
1969 default:
1970 g_assert_not_reached();
1971 }
1972
1973 rl = op->args[0];
1974 rh = op->args[1];
1975
1976 /* The proper opcode is supplied by tcg_opt_gen_mov. */
1977 op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2);
1978
1979 tcg_opt_gen_movi(ctx, op, rl, l);
1980 tcg_opt_gen_movi(ctx, op2, rh, h);
1981 return true;
1982 }
1983 return false;
1984 }
1985
fold_nand(OptContext * ctx,TCGOp * op)1986 static bool fold_nand(OptContext *ctx, TCGOp *op)
1987 {
1988 if (fold_const2_commutative(ctx, op) ||
1989 fold_xi_to_not(ctx, op, -1)) {
1990 return true;
1991 }
1992
1993 ctx->s_mask = arg_info(op->args[1])->s_mask
1994 & arg_info(op->args[2])->s_mask;
1995 return false;
1996 }
1997
fold_neg_no_const(OptContext * ctx,TCGOp * op)1998 static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
1999 {
2000 /* Set to 1 all bits to the left of the rightmost. */
2001 uint64_t z_mask = arg_info(op->args[1])->z_mask;
2002 ctx->z_mask = -(z_mask & -z_mask);
2003
2004 /*
2005 * Because of fold_sub_to_neg, we want to always return true,
2006 * via finish_folding.
2007 */
2008 finish_folding(ctx, op);
2009 return true;
2010 }
2011
fold_neg(OptContext * ctx,TCGOp * op)2012 static bool fold_neg(OptContext *ctx, TCGOp *op)
2013 {
2014 return fold_const1(ctx, op) || fold_neg_no_const(ctx, op);
2015 }
2016
fold_nor(OptContext * ctx,TCGOp * op)2017 static bool fold_nor(OptContext *ctx, TCGOp *op)
2018 {
2019 if (fold_const2_commutative(ctx, op) ||
2020 fold_xi_to_not(ctx, op, 0)) {
2021 return true;
2022 }
2023
2024 ctx->s_mask = arg_info(op->args[1])->s_mask
2025 & arg_info(op->args[2])->s_mask;
2026 return false;
2027 }
2028
fold_not(OptContext * ctx,TCGOp * op)2029 static bool fold_not(OptContext *ctx, TCGOp *op)
2030 {
2031 if (fold_const1(ctx, op)) {
2032 return true;
2033 }
2034
2035 ctx->s_mask = arg_info(op->args[1])->s_mask;
2036
2037 /* Because of fold_to_not, we want to always return true, via finish. */
2038 finish_folding(ctx, op);
2039 return true;
2040 }
2041
fold_or(OptContext * ctx,TCGOp * op)2042 static bool fold_or(OptContext *ctx, TCGOp *op)
2043 {
2044 if (fold_const2_commutative(ctx, op) ||
2045 fold_xi_to_x(ctx, op, 0) ||
2046 fold_xx_to_x(ctx, op)) {
2047 return true;
2048 }
2049
2050 ctx->z_mask = arg_info(op->args[1])->z_mask
2051 | arg_info(op->args[2])->z_mask;
2052 ctx->s_mask = arg_info(op->args[1])->s_mask
2053 & arg_info(op->args[2])->s_mask;
2054 return fold_masks(ctx, op);
2055 }
2056
fold_orc(OptContext * ctx,TCGOp * op)2057 static bool fold_orc(OptContext *ctx, TCGOp *op)
2058 {
2059 if (fold_const2(ctx, op) ||
2060 fold_xx_to_i(ctx, op, -1) ||
2061 fold_xi_to_x(ctx, op, -1) ||
2062 fold_ix_to_not(ctx, op, 0)) {
2063 return true;
2064 }
2065
2066 ctx->s_mask = arg_info(op->args[1])->s_mask
2067 & arg_info(op->args[2])->s_mask;
2068 return false;
2069 }
2070
fold_qemu_ld(OptContext * ctx,TCGOp * op)2071 static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
2072 {
2073 const TCGOpDef *def = &tcg_op_defs[op->opc];
2074 MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
2075 MemOp mop = get_memop(oi);
2076 int width = 8 * memop_size(mop);
2077
2078 if (width < 64) {
2079 ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
2080 if (!(mop & MO_SIGN)) {
2081 ctx->z_mask = MAKE_64BIT_MASK(0, width);
2082 ctx->s_mask <<= 1;
2083 }
2084 }
2085
2086 /* Opcodes that touch guest memory stop the mb optimization. */
2087 ctx->prev_mb = NULL;
2088 return false;
2089 }
2090
fold_qemu_st(OptContext * ctx,TCGOp * op)2091 static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
2092 {
2093 /* Opcodes that touch guest memory stop the mb optimization. */
2094 ctx->prev_mb = NULL;
2095 return false;
2096 }
2097
fold_remainder(OptContext * ctx,TCGOp * op)2098 static bool fold_remainder(OptContext *ctx, TCGOp *op)
2099 {
2100 if (fold_const2(ctx, op) ||
2101 fold_xx_to_i(ctx, op, 0)) {
2102 return true;
2103 }
2104 return false;
2105 }
2106
fold_setcond_zmask(OptContext * ctx,TCGOp * op,bool neg)2107 static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
2108 {
2109 uint64_t a_zmask, b_val;
2110 TCGCond cond;
2111
2112 if (!arg_is_const(op->args[2])) {
2113 return false;
2114 }
2115
2116 a_zmask = arg_info(op->args[1])->z_mask;
2117 b_val = arg_info(op->args[2])->val;
2118 cond = op->args[3];
2119
2120 if (ctx->type == TCG_TYPE_I32) {
2121 a_zmask = (uint32_t)a_zmask;
2122 b_val = (uint32_t)b_val;
2123 }
2124
2125 /*
2126 * A with only low bits set vs B with high bits set means that A < B.
2127 */
2128 if (a_zmask < b_val) {
2129 bool inv = false;
2130
2131 switch (cond) {
2132 case TCG_COND_NE:
2133 case TCG_COND_LEU:
2134 case TCG_COND_LTU:
2135 inv = true;
2136 /* fall through */
2137 case TCG_COND_GTU:
2138 case TCG_COND_GEU:
2139 case TCG_COND_EQ:
2140 return tcg_opt_gen_movi(ctx, op, op->args[0], neg ? -inv : inv);
2141 default:
2142 break;
2143 }
2144 }
2145
2146 /*
2147 * A with only lsb set is already boolean.
2148 */
2149 if (a_zmask <= 1) {
2150 bool convert = false;
2151 bool inv = false;
2152
2153 switch (cond) {
2154 case TCG_COND_EQ:
2155 inv = true;
2156 /* fall through */
2157 case TCG_COND_NE:
2158 convert = (b_val == 0);
2159 break;
2160 case TCG_COND_LTU:
2161 case TCG_COND_TSTEQ:
2162 inv = true;
2163 /* fall through */
2164 case TCG_COND_GEU:
2165 case TCG_COND_TSTNE:
2166 convert = (b_val == 1);
2167 break;
2168 default:
2169 break;
2170 }
2171 if (convert) {
2172 TCGOpcode add_opc, xor_opc, neg_opc;
2173
2174 if (!inv && !neg) {
2175 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2176 }
2177
2178 switch (ctx->type) {
2179 case TCG_TYPE_I32:
2180 add_opc = INDEX_op_add_i32;
2181 neg_opc = INDEX_op_neg_i32;
2182 xor_opc = INDEX_op_xor_i32;
2183 break;
2184 case TCG_TYPE_I64:
2185 add_opc = INDEX_op_add_i64;
2186 neg_opc = INDEX_op_neg_i64;
2187 xor_opc = INDEX_op_xor_i64;
2188 break;
2189 default:
2190 g_assert_not_reached();
2191 }
2192
2193 if (!inv) {
2194 op->opc = neg_opc;
2195 } else if (neg) {
2196 op->opc = add_opc;
2197 op->args[2] = arg_new_constant(ctx, -1);
2198 } else {
2199 op->opc = xor_opc;
2200 op->args[2] = arg_new_constant(ctx, 1);
2201 }
2202 return false;
2203 }
2204 }
2205
2206 return false;
2207 }
2208
fold_setcond_tst_pow2(OptContext * ctx,TCGOp * op,bool neg)2209 static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
2210 {
2211 TCGOpcode and_opc, sub_opc, xor_opc, neg_opc, shr_opc;
2212 TCGOpcode uext_opc = 0, sext_opc = 0;
2213 TCGCond cond = op->args[3];
2214 TCGArg ret, src1, src2;
2215 TCGOp *op2;
2216 uint64_t val;
2217 int sh;
2218 bool inv;
2219
2220 if (!is_tst_cond(cond) || !arg_is_const(op->args[2])) {
2221 return;
2222 }
2223
2224 src2 = op->args[2];
2225 val = arg_info(src2)->val;
2226 if (!is_power_of_2(val)) {
2227 return;
2228 }
2229 sh = ctz64(val);
2230
2231 switch (ctx->type) {
2232 case TCG_TYPE_I32:
2233 and_opc = INDEX_op_and_i32;
2234 sub_opc = INDEX_op_sub_i32;
2235 xor_opc = INDEX_op_xor_i32;
2236 shr_opc = INDEX_op_shr_i32;
2237 neg_opc = INDEX_op_neg_i32;
2238 if (TCG_TARGET_extract_i32_valid(sh, 1)) {
2239 uext_opc = TCG_TARGET_HAS_extract_i32 ? INDEX_op_extract_i32 : 0;
2240 sext_opc = TCG_TARGET_HAS_sextract_i32 ? INDEX_op_sextract_i32 : 0;
2241 }
2242 break;
2243 case TCG_TYPE_I64:
2244 and_opc = INDEX_op_and_i64;
2245 sub_opc = INDEX_op_sub_i64;
2246 xor_opc = INDEX_op_xor_i64;
2247 shr_opc = INDEX_op_shr_i64;
2248 neg_opc = INDEX_op_neg_i64;
2249 if (TCG_TARGET_extract_i64_valid(sh, 1)) {
2250 uext_opc = TCG_TARGET_HAS_extract_i64 ? INDEX_op_extract_i64 : 0;
2251 sext_opc = TCG_TARGET_HAS_sextract_i64 ? INDEX_op_sextract_i64 : 0;
2252 }
2253 break;
2254 default:
2255 g_assert_not_reached();
2256 }
2257
2258 ret = op->args[0];
2259 src1 = op->args[1];
2260 inv = cond == TCG_COND_TSTEQ;
2261
2262 if (sh && sext_opc && neg && !inv) {
2263 op->opc = sext_opc;
2264 op->args[1] = src1;
2265 op->args[2] = sh;
2266 op->args[3] = 1;
2267 return;
2268 } else if (sh && uext_opc) {
2269 op->opc = uext_opc;
2270 op->args[1] = src1;
2271 op->args[2] = sh;
2272 op->args[3] = 1;
2273 } else {
2274 if (sh) {
2275 op2 = tcg_op_insert_before(ctx->tcg, op, shr_opc, 3);
2276 op2->args[0] = ret;
2277 op2->args[1] = src1;
2278 op2->args[2] = arg_new_constant(ctx, sh);
2279 src1 = ret;
2280 }
2281 op->opc = and_opc;
2282 op->args[1] = src1;
2283 op->args[2] = arg_new_constant(ctx, 1);
2284 }
2285
2286 if (neg && inv) {
2287 op2 = tcg_op_insert_after(ctx->tcg, op, sub_opc, 3);
2288 op2->args[0] = ret;
2289 op2->args[1] = ret;
2290 op2->args[2] = arg_new_constant(ctx, 1);
2291 } else if (inv) {
2292 op2 = tcg_op_insert_after(ctx->tcg, op, xor_opc, 3);
2293 op2->args[0] = ret;
2294 op2->args[1] = ret;
2295 op2->args[2] = arg_new_constant(ctx, 1);
2296 } else if (neg) {
2297 op2 = tcg_op_insert_after(ctx->tcg, op, neg_opc, 2);
2298 op2->args[0] = ret;
2299 op2->args[1] = ret;
2300 }
2301 }
2302
fold_setcond(OptContext * ctx,TCGOp * op)2303 static bool fold_setcond(OptContext *ctx, TCGOp *op)
2304 {
2305 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
2306 &op->args[2], &op->args[3]);
2307 if (i >= 0) {
2308 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2309 }
2310
2311 if (fold_setcond_zmask(ctx, op, false)) {
2312 return true;
2313 }
2314 fold_setcond_tst_pow2(ctx, op, false);
2315
2316 ctx->z_mask = 1;
2317 ctx->s_mask = smask_from_zmask(1);
2318 return false;
2319 }
2320
fold_negsetcond(OptContext * ctx,TCGOp * op)2321 static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
2322 {
2323 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
2324 &op->args[2], &op->args[3]);
2325 if (i >= 0) {
2326 return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
2327 }
2328
2329 if (fold_setcond_zmask(ctx, op, true)) {
2330 return true;
2331 }
2332 fold_setcond_tst_pow2(ctx, op, true);
2333
2334 /* Value is {0,-1} so all bits are repetitions of the sign. */
2335 ctx->s_mask = -1;
2336 return false;
2337 }
2338
fold_setcond2(OptContext * ctx,TCGOp * op)2339 static bool fold_setcond2(OptContext *ctx, TCGOp *op)
2340 {
2341 TCGCond cond;
2342 int i, inv = 0;
2343
2344 i = do_constant_folding_cond2(ctx, op, &op->args[1]);
2345 cond = op->args[5];
2346 if (i >= 0) {
2347 goto do_setcond_const;
2348 }
2349
2350 switch (cond) {
2351 case TCG_COND_LT:
2352 case TCG_COND_GE:
2353 /*
2354 * Simplify LT/GE comparisons vs zero to a single compare
2355 * vs the high word of the input.
2356 */
2357 if (arg_is_const_val(op->args[3], 0) &&
2358 arg_is_const_val(op->args[4], 0)) {
2359 goto do_setcond_high;
2360 }
2361 break;
2362
2363 case TCG_COND_NE:
2364 inv = 1;
2365 QEMU_FALLTHROUGH;
2366 case TCG_COND_EQ:
2367 /*
2368 * Simplify EQ/NE comparisons where one of the pairs
2369 * can be simplified.
2370 */
2371 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
2372 op->args[3], cond);
2373 switch (i ^ inv) {
2374 case 0:
2375 goto do_setcond_const;
2376 case 1:
2377 goto do_setcond_high;
2378 }
2379
2380 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
2381 op->args[4], cond);
2382 switch (i ^ inv) {
2383 case 0:
2384 goto do_setcond_const;
2385 case 1:
2386 goto do_setcond_low;
2387 }
2388 break;
2389
2390 case TCG_COND_TSTEQ:
2391 case TCG_COND_TSTNE:
2392 if (arg_is_const_val(op->args[3], 0)) {
2393 goto do_setcond_high;
2394 }
2395 if (arg_is_const_val(op->args[4], 0)) {
2396 goto do_setcond_low;
2397 }
2398 break;
2399
2400 default:
2401 break;
2402
2403 do_setcond_low:
2404 op->args[2] = op->args[3];
2405 op->args[3] = cond;
2406 op->opc = INDEX_op_setcond_i32;
2407 return fold_setcond(ctx, op);
2408
2409 do_setcond_high:
2410 op->args[1] = op->args[2];
2411 op->args[2] = op->args[4];
2412 op->args[3] = cond;
2413 op->opc = INDEX_op_setcond_i32;
2414 return fold_setcond(ctx, op);
2415 }
2416
2417 ctx->z_mask = 1;
2418 ctx->s_mask = smask_from_zmask(1);
2419 return false;
2420
2421 do_setcond_const:
2422 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2423 }
2424
fold_cmp_vec(OptContext * ctx,TCGOp * op)2425 static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
2426 {
2427 /* Canonicalize the comparison to put immediate second. */
2428 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
2429 op->args[3] = tcg_swap_cond(op->args[3]);
2430 }
2431 return false;
2432 }
2433
fold_cmpsel_vec(OptContext * ctx,TCGOp * op)2434 static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
2435 {
2436 /* If true and false values are the same, eliminate the cmp. */
2437 if (args_are_copies(op->args[3], op->args[4])) {
2438 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
2439 }
2440
2441 /* Canonicalize the comparison to put immediate second. */
2442 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
2443 op->args[5] = tcg_swap_cond(op->args[5]);
2444 }
2445 /*
2446 * Canonicalize the "false" input reg to match the destination,
2447 * so that the tcg backend can implement "move if true".
2448 */
2449 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
2450 op->args[5] = tcg_invert_cond(op->args[5]);
2451 }
2452 return false;
2453 }
2454
fold_sextract(OptContext * ctx,TCGOp * op)2455 static bool fold_sextract(OptContext *ctx, TCGOp *op)
2456 {
2457 uint64_t z_mask, s_mask, s_mask_old;
2458 int pos = op->args[2];
2459 int len = op->args[3];
2460
2461 if (arg_is_const(op->args[1])) {
2462 uint64_t t;
2463
2464 t = arg_info(op->args[1])->val;
2465 t = sextract64(t, pos, len);
2466 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
2467 }
2468
2469 z_mask = arg_info(op->args[1])->z_mask;
2470 z_mask = sextract64(z_mask, pos, len);
2471 ctx->z_mask = z_mask;
2472
2473 s_mask_old = arg_info(op->args[1])->s_mask;
2474 s_mask = sextract64(s_mask_old, pos, len);
2475 s_mask |= MAKE_64BIT_MASK(len, 64 - len);
2476 ctx->s_mask = s_mask;
2477
2478 if (pos == 0) {
2479 ctx->a_mask = s_mask & ~s_mask_old;
2480 }
2481
2482 return fold_masks(ctx, op);
2483 }
2484
fold_shift(OptContext * ctx,TCGOp * op)2485 static bool fold_shift(OptContext *ctx, TCGOp *op)
2486 {
2487 uint64_t s_mask, z_mask, sign;
2488
2489 if (fold_const2(ctx, op) ||
2490 fold_ix_to_i(ctx, op, 0) ||
2491 fold_xi_to_x(ctx, op, 0)) {
2492 return true;
2493 }
2494
2495 s_mask = arg_info(op->args[1])->s_mask;
2496 z_mask = arg_info(op->args[1])->z_mask;
2497
2498 if (arg_is_const(op->args[2])) {
2499 int sh = arg_info(op->args[2])->val;
2500
2501 ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
2502
2503 s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
2504 ctx->s_mask = smask_from_smask(s_mask);
2505
2506 return fold_masks(ctx, op);
2507 }
2508
2509 switch (op->opc) {
2510 CASE_OP_32_64(sar):
2511 /*
2512 * Arithmetic right shift will not reduce the number of
2513 * input sign repetitions.
2514 */
2515 ctx->s_mask = s_mask;
2516 break;
2517 CASE_OP_32_64(shr):
2518 /*
2519 * If the sign bit is known zero, then logical right shift
2520 * will not reduced the number of input sign repetitions.
2521 */
2522 sign = (s_mask & -s_mask) >> 1;
2523 if (sign && !(z_mask & sign)) {
2524 ctx->s_mask = s_mask;
2525 }
2526 break;
2527 default:
2528 break;
2529 }
2530
2531 return false;
2532 }
2533
fold_sub_to_neg(OptContext * ctx,TCGOp * op)2534 static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
2535 {
2536 TCGOpcode neg_op;
2537 bool have_neg;
2538
2539 if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
2540 return false;
2541 }
2542
2543 switch (ctx->type) {
2544 case TCG_TYPE_I32:
2545 neg_op = INDEX_op_neg_i32;
2546 have_neg = true;
2547 break;
2548 case TCG_TYPE_I64:
2549 neg_op = INDEX_op_neg_i64;
2550 have_neg = true;
2551 break;
2552 case TCG_TYPE_V64:
2553 case TCG_TYPE_V128:
2554 case TCG_TYPE_V256:
2555 neg_op = INDEX_op_neg_vec;
2556 have_neg = (TCG_TARGET_HAS_neg_vec &&
2557 tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
2558 break;
2559 default:
2560 g_assert_not_reached();
2561 }
2562 if (have_neg) {
2563 op->opc = neg_op;
2564 op->args[1] = op->args[2];
2565 return fold_neg_no_const(ctx, op);
2566 }
2567 return false;
2568 }
2569
2570 /* We cannot as yet do_constant_folding with vectors. */
fold_sub_vec(OptContext * ctx,TCGOp * op)2571 static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
2572 {
2573 if (fold_xx_to_i(ctx, op, 0) ||
2574 fold_xi_to_x(ctx, op, 0) ||
2575 fold_sub_to_neg(ctx, op)) {
2576 return true;
2577 }
2578 return false;
2579 }
2580
fold_sub(OptContext * ctx,TCGOp * op)2581 static bool fold_sub(OptContext *ctx, TCGOp *op)
2582 {
2583 if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
2584 return true;
2585 }
2586
2587 /* Fold sub r,x,i to add r,x,-i */
2588 if (arg_is_const(op->args[2])) {
2589 uint64_t val = arg_info(op->args[2])->val;
2590
2591 op->opc = (ctx->type == TCG_TYPE_I32
2592 ? INDEX_op_add_i32 : INDEX_op_add_i64);
2593 op->args[2] = arg_new_constant(ctx, -val);
2594 }
2595 return false;
2596 }
2597
fold_sub2(OptContext * ctx,TCGOp * op)2598 static bool fold_sub2(OptContext *ctx, TCGOp *op)
2599 {
2600 return fold_addsub2(ctx, op, false);
2601 }
2602
fold_tcg_ld(OptContext * ctx,TCGOp * op)2603 static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
2604 {
2605 /* We can't do any folding with a load, but we can record bits. */
2606 switch (op->opc) {
2607 CASE_OP_32_64(ld8s):
2608 ctx->s_mask = MAKE_64BIT_MASK(8, 56);
2609 break;
2610 CASE_OP_32_64(ld8u):
2611 ctx->z_mask = MAKE_64BIT_MASK(0, 8);
2612 ctx->s_mask = MAKE_64BIT_MASK(9, 55);
2613 break;
2614 CASE_OP_32_64(ld16s):
2615 ctx->s_mask = MAKE_64BIT_MASK(16, 48);
2616 break;
2617 CASE_OP_32_64(ld16u):
2618 ctx->z_mask = MAKE_64BIT_MASK(0, 16);
2619 ctx->s_mask = MAKE_64BIT_MASK(17, 47);
2620 break;
2621 case INDEX_op_ld32s_i64:
2622 ctx->s_mask = MAKE_64BIT_MASK(32, 32);
2623 break;
2624 case INDEX_op_ld32u_i64:
2625 ctx->z_mask = MAKE_64BIT_MASK(0, 32);
2626 ctx->s_mask = MAKE_64BIT_MASK(33, 31);
2627 break;
2628 default:
2629 g_assert_not_reached();
2630 }
2631 return false;
2632 }
2633
fold_tcg_ld_memcopy(OptContext * ctx,TCGOp * op)2634 static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
2635 {
2636 TCGTemp *dst, *src;
2637 intptr_t ofs;
2638 TCGType type;
2639
2640 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2641 return false;
2642 }
2643
2644 type = ctx->type;
2645 ofs = op->args[2];
2646 dst = arg_temp(op->args[0]);
2647 src = find_mem_copy_for(ctx, type, ofs);
2648 if (src && src->base_type == type) {
2649 return tcg_opt_gen_mov(ctx, op, temp_arg(dst), temp_arg(src));
2650 }
2651
2652 reset_ts(ctx, dst);
2653 record_mem_copy(ctx, type, dst, ofs, ofs + tcg_type_size(type) - 1);
2654 return true;
2655 }
2656
fold_tcg_st(OptContext * ctx,TCGOp * op)2657 static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
2658 {
2659 intptr_t ofs = op->args[2];
2660 intptr_t lm1;
2661
2662 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2663 remove_mem_copy_all(ctx);
2664 return false;
2665 }
2666
2667 switch (op->opc) {
2668 CASE_OP_32_64(st8):
2669 lm1 = 0;
2670 break;
2671 CASE_OP_32_64(st16):
2672 lm1 = 1;
2673 break;
2674 case INDEX_op_st32_i64:
2675 case INDEX_op_st_i32:
2676 lm1 = 3;
2677 break;
2678 case INDEX_op_st_i64:
2679 lm1 = 7;
2680 break;
2681 case INDEX_op_st_vec:
2682 lm1 = tcg_type_size(ctx->type) - 1;
2683 break;
2684 default:
2685 g_assert_not_reached();
2686 }
2687 remove_mem_copy_in(ctx, ofs, ofs + lm1);
2688 return false;
2689 }
2690
fold_tcg_st_memcopy(OptContext * ctx,TCGOp * op)2691 static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
2692 {
2693 TCGTemp *src;
2694 intptr_t ofs, last;
2695 TCGType type;
2696
2697 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2698 fold_tcg_st(ctx, op);
2699 return false;
2700 }
2701
2702 src = arg_temp(op->args[0]);
2703 ofs = op->args[2];
2704 type = ctx->type;
2705
2706 /*
2707 * Eliminate duplicate stores of a constant.
2708 * This happens frequently when the target ISA zero-extends.
2709 */
2710 if (ts_is_const(src)) {
2711 TCGTemp *prev = find_mem_copy_for(ctx, type, ofs);
2712 if (src == prev) {
2713 tcg_op_remove(ctx->tcg, op);
2714 return true;
2715 }
2716 }
2717
2718 last = ofs + tcg_type_size(type) - 1;
2719 remove_mem_copy_in(ctx, ofs, last);
2720 record_mem_copy(ctx, type, src, ofs, last);
2721 return false;
2722 }
2723
fold_xor(OptContext * ctx,TCGOp * op)2724 static bool fold_xor(OptContext *ctx, TCGOp *op)
2725 {
2726 if (fold_const2_commutative(ctx, op) ||
2727 fold_xx_to_i(ctx, op, 0) ||
2728 fold_xi_to_x(ctx, op, 0) ||
2729 fold_xi_to_not(ctx, op, -1)) {
2730 return true;
2731 }
2732
2733 ctx->z_mask = arg_info(op->args[1])->z_mask
2734 | arg_info(op->args[2])->z_mask;
2735 ctx->s_mask = arg_info(op->args[1])->s_mask
2736 & arg_info(op->args[2])->s_mask;
2737 return fold_masks(ctx, op);
2738 }
2739
fold_bitsel_vec(OptContext * ctx,TCGOp * op)2740 static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
2741 {
2742 /* If true and false values are the same, eliminate the cmp. */
2743 if (args_are_copies(op->args[2], op->args[3])) {
2744 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
2745 }
2746
2747 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
2748 uint64_t tv = arg_info(op->args[2])->val;
2749 uint64_t fv = arg_info(op->args[3])->val;
2750
2751 if (tv == -1 && fv == 0) {
2752 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2753 }
2754 if (tv == 0 && fv == -1) {
2755 if (TCG_TARGET_HAS_not_vec) {
2756 op->opc = INDEX_op_not_vec;
2757 return fold_not(ctx, op);
2758 } else {
2759 op->opc = INDEX_op_xor_vec;
2760 op->args[2] = arg_new_constant(ctx, -1);
2761 return fold_xor(ctx, op);
2762 }
2763 }
2764 }
2765 if (arg_is_const(op->args[2])) {
2766 uint64_t tv = arg_info(op->args[2])->val;
2767 if (tv == -1) {
2768 op->opc = INDEX_op_or_vec;
2769 op->args[2] = op->args[3];
2770 return fold_or(ctx, op);
2771 }
2772 if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
2773 op->opc = INDEX_op_andc_vec;
2774 op->args[2] = op->args[1];
2775 op->args[1] = op->args[3];
2776 return fold_andc(ctx, op);
2777 }
2778 }
2779 if (arg_is_const(op->args[3])) {
2780 uint64_t fv = arg_info(op->args[3])->val;
2781 if (fv == 0) {
2782 op->opc = INDEX_op_and_vec;
2783 return fold_and(ctx, op);
2784 }
2785 if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
2786 op->opc = INDEX_op_orc_vec;
2787 op->args[2] = op->args[1];
2788 op->args[1] = op->args[3];
2789 return fold_orc(ctx, op);
2790 }
2791 }
2792 return false;
2793 }
2794
2795 /* Propagate constants and copies, fold constant expressions. */
tcg_optimize(TCGContext * s)2796 void tcg_optimize(TCGContext *s)
2797 {
2798 int nb_temps, i;
2799 TCGOp *op, *op_next;
2800 OptContext ctx = { .tcg = s };
2801
2802 QSIMPLEQ_INIT(&ctx.mem_free);
2803
2804 /* Array VALS has an element for each temp.
2805 If this temp holds a constant then its value is kept in VALS' element.
2806 If this temp is a copy of other ones then the other copies are
2807 available through the doubly linked circular list. */
2808
2809 nb_temps = s->nb_temps;
2810 for (i = 0; i < nb_temps; ++i) {
2811 s->temps[i].state_ptr = NULL;
2812 }
2813
2814 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
2815 TCGOpcode opc = op->opc;
2816 const TCGOpDef *def;
2817 bool done = false;
2818
2819 /* Calls are special. */
2820 if (opc == INDEX_op_call) {
2821 fold_call(&ctx, op);
2822 continue;
2823 }
2824
2825 def = &tcg_op_defs[opc];
2826 init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
2827 copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
2828
2829 /* Pre-compute the type of the operation. */
2830 if (def->flags & TCG_OPF_VECTOR) {
2831 ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
2832 } else if (def->flags & TCG_OPF_64BIT) {
2833 ctx.type = TCG_TYPE_I64;
2834 } else {
2835 ctx.type = TCG_TYPE_I32;
2836 }
2837
2838 /* Assume all bits affected, no bits known zero, no sign reps. */
2839 ctx.a_mask = -1;
2840 ctx.z_mask = -1;
2841 ctx.s_mask = 0;
2842
2843 /*
2844 * Process each opcode.
2845 * Sorted alphabetically by opcode as much as possible.
2846 */
2847 switch (opc) {
2848 CASE_OP_32_64(add):
2849 done = fold_add(&ctx, op);
2850 break;
2851 case INDEX_op_add_vec:
2852 done = fold_add_vec(&ctx, op);
2853 break;
2854 CASE_OP_32_64(add2):
2855 done = fold_add2(&ctx, op);
2856 break;
2857 CASE_OP_32_64_VEC(and):
2858 done = fold_and(&ctx, op);
2859 break;
2860 CASE_OP_32_64_VEC(andc):
2861 done = fold_andc(&ctx, op);
2862 break;
2863 CASE_OP_32_64(brcond):
2864 done = fold_brcond(&ctx, op);
2865 break;
2866 case INDEX_op_brcond2_i32:
2867 done = fold_brcond2(&ctx, op);
2868 break;
2869 CASE_OP_32_64(bswap16):
2870 CASE_OP_32_64(bswap32):
2871 case INDEX_op_bswap64_i64:
2872 done = fold_bswap(&ctx, op);
2873 break;
2874 CASE_OP_32_64(clz):
2875 CASE_OP_32_64(ctz):
2876 done = fold_count_zeros(&ctx, op);
2877 break;
2878 CASE_OP_32_64(ctpop):
2879 done = fold_ctpop(&ctx, op);
2880 break;
2881 CASE_OP_32_64(deposit):
2882 done = fold_deposit(&ctx, op);
2883 break;
2884 CASE_OP_32_64(div):
2885 CASE_OP_32_64(divu):
2886 done = fold_divide(&ctx, op);
2887 break;
2888 case INDEX_op_dup_vec:
2889 done = fold_dup(&ctx, op);
2890 break;
2891 case INDEX_op_dup2_vec:
2892 done = fold_dup2(&ctx, op);
2893 break;
2894 CASE_OP_32_64_VEC(eqv):
2895 done = fold_eqv(&ctx, op);
2896 break;
2897 CASE_OP_32_64(extract):
2898 done = fold_extract(&ctx, op);
2899 break;
2900 CASE_OP_32_64(extract2):
2901 done = fold_extract2(&ctx, op);
2902 break;
2903 CASE_OP_32_64(ext8s):
2904 CASE_OP_32_64(ext16s):
2905 case INDEX_op_ext32s_i64:
2906 case INDEX_op_ext_i32_i64:
2907 done = fold_exts(&ctx, op);
2908 break;
2909 CASE_OP_32_64(ext8u):
2910 CASE_OP_32_64(ext16u):
2911 case INDEX_op_ext32u_i64:
2912 case INDEX_op_extu_i32_i64:
2913 case INDEX_op_extrl_i64_i32:
2914 case INDEX_op_extrh_i64_i32:
2915 done = fold_extu(&ctx, op);
2916 break;
2917 CASE_OP_32_64(ld8s):
2918 CASE_OP_32_64(ld8u):
2919 CASE_OP_32_64(ld16s):
2920 CASE_OP_32_64(ld16u):
2921 case INDEX_op_ld32s_i64:
2922 case INDEX_op_ld32u_i64:
2923 done = fold_tcg_ld(&ctx, op);
2924 break;
2925 case INDEX_op_ld_i32:
2926 case INDEX_op_ld_i64:
2927 case INDEX_op_ld_vec:
2928 done = fold_tcg_ld_memcopy(&ctx, op);
2929 break;
2930 CASE_OP_32_64(st8):
2931 CASE_OP_32_64(st16):
2932 case INDEX_op_st32_i64:
2933 done = fold_tcg_st(&ctx, op);
2934 break;
2935 case INDEX_op_st_i32:
2936 case INDEX_op_st_i64:
2937 case INDEX_op_st_vec:
2938 done = fold_tcg_st_memcopy(&ctx, op);
2939 break;
2940 case INDEX_op_mb:
2941 done = fold_mb(&ctx, op);
2942 break;
2943 CASE_OP_32_64_VEC(mov):
2944 done = fold_mov(&ctx, op);
2945 break;
2946 CASE_OP_32_64(movcond):
2947 done = fold_movcond(&ctx, op);
2948 break;
2949 CASE_OP_32_64(mul):
2950 done = fold_mul(&ctx, op);
2951 break;
2952 CASE_OP_32_64(mulsh):
2953 CASE_OP_32_64(muluh):
2954 done = fold_mul_highpart(&ctx, op);
2955 break;
2956 CASE_OP_32_64(muls2):
2957 CASE_OP_32_64(mulu2):
2958 done = fold_multiply2(&ctx, op);
2959 break;
2960 CASE_OP_32_64_VEC(nand):
2961 done = fold_nand(&ctx, op);
2962 break;
2963 CASE_OP_32_64(neg):
2964 done = fold_neg(&ctx, op);
2965 break;
2966 CASE_OP_32_64_VEC(nor):
2967 done = fold_nor(&ctx, op);
2968 break;
2969 CASE_OP_32_64_VEC(not):
2970 done = fold_not(&ctx, op);
2971 break;
2972 CASE_OP_32_64_VEC(or):
2973 done = fold_or(&ctx, op);
2974 break;
2975 CASE_OP_32_64_VEC(orc):
2976 done = fold_orc(&ctx, op);
2977 break;
2978 case INDEX_op_qemu_ld_a32_i32:
2979 case INDEX_op_qemu_ld_a64_i32:
2980 case INDEX_op_qemu_ld_a32_i64:
2981 case INDEX_op_qemu_ld_a64_i64:
2982 case INDEX_op_qemu_ld_a32_i128:
2983 case INDEX_op_qemu_ld_a64_i128:
2984 done = fold_qemu_ld(&ctx, op);
2985 break;
2986 case INDEX_op_qemu_st8_a32_i32:
2987 case INDEX_op_qemu_st8_a64_i32:
2988 case INDEX_op_qemu_st_a32_i32:
2989 case INDEX_op_qemu_st_a64_i32:
2990 case INDEX_op_qemu_st_a32_i64:
2991 case INDEX_op_qemu_st_a64_i64:
2992 case INDEX_op_qemu_st_a32_i128:
2993 case INDEX_op_qemu_st_a64_i128:
2994 done = fold_qemu_st(&ctx, op);
2995 break;
2996 CASE_OP_32_64(rem):
2997 CASE_OP_32_64(remu):
2998 done = fold_remainder(&ctx, op);
2999 break;
3000 CASE_OP_32_64(rotl):
3001 CASE_OP_32_64(rotr):
3002 CASE_OP_32_64(sar):
3003 CASE_OP_32_64(shl):
3004 CASE_OP_32_64(shr):
3005 done = fold_shift(&ctx, op);
3006 break;
3007 CASE_OP_32_64(setcond):
3008 done = fold_setcond(&ctx, op);
3009 break;
3010 CASE_OP_32_64(negsetcond):
3011 done = fold_negsetcond(&ctx, op);
3012 break;
3013 case INDEX_op_setcond2_i32:
3014 done = fold_setcond2(&ctx, op);
3015 break;
3016 case INDEX_op_cmp_vec:
3017 done = fold_cmp_vec(&ctx, op);
3018 break;
3019 case INDEX_op_cmpsel_vec:
3020 done = fold_cmpsel_vec(&ctx, op);
3021 break;
3022 case INDEX_op_bitsel_vec:
3023 done = fold_bitsel_vec(&ctx, op);
3024 break;
3025 CASE_OP_32_64(sextract):
3026 done = fold_sextract(&ctx, op);
3027 break;
3028 CASE_OP_32_64(sub):
3029 done = fold_sub(&ctx, op);
3030 break;
3031 case INDEX_op_sub_vec:
3032 done = fold_sub_vec(&ctx, op);
3033 break;
3034 CASE_OP_32_64(sub2):
3035 done = fold_sub2(&ctx, op);
3036 break;
3037 CASE_OP_32_64_VEC(xor):
3038 done = fold_xor(&ctx, op);
3039 break;
3040 default:
3041 break;
3042 }
3043
3044 if (!done) {
3045 finish_folding(&ctx, op);
3046 }
3047 }
3048 }
3049