Lines Matching full:t1

62    add_i32 t0, t1, t2    /* (t0 <- t1 + t2) */
207 add_i32 t0, t1, t2
242 * - brcond_i32/i64 *t0*, *t1*, *cond*, *label*
244 - | Conditional jump if *t0* *cond* *t1* is true. *cond* can be:
256 | ``TCG_COND_TSTEQ /* t1 & t2 == 0 */``
257 | ``TCG_COND_TSTNE /* t1 & t2 != 0 */``
264 * - add_i32/i64 *t0*, *t1*, *t2*
266 - | *t0* = *t1* + *t2*
268 * - sub_i32/i64 *t0*, *t1*, *t2*
270 - | *t0* = *t1* - *t2*
272 * - neg_i32/i64 *t0*, *t1*
274 - | *t0* = -*t1* (two's complement)
276 * - mul_i32/i64 *t0*, *t1*, *t2*
278 - | *t0* = *t1* * *t2*
280 * - div_i32/i64 *t0*, *t1*, *t2*
282 - | *t0* = *t1* / *t2* (signed)
285 * - divu_i32/i64 *t0*, *t1*, *t2*
287 - | *t0* = *t1* / *t2* (unsigned)
290 * - rem_i32/i64 *t0*, *t1*, *t2*
292 - | *t0* = *t1* % *t2* (signed)
295 * - remu_i32/i64 *t0*, *t1*, *t2*
297 - | *t0* = *t1* % *t2* (unsigned)
306 * - and_i32/i64 *t0*, *t1*, *t2*
308 - | *t0* = *t1* & *t2*
310 * - or_i32/i64 *t0*, *t1*, *t2*
312 - | *t0* = *t1* | *t2*
314 * - xor_i32/i64 *t0*, *t1*, *t2*
316 - | *t0* = *t1* ^ *t2*
318 * - not_i32/i64 *t0*, *t1*
320 - | *t0* = ~\ *t1*
322 * - andc_i32/i64 *t0*, *t1*, *t2*
324 - | *t0* = *t1* & ~\ *t2*
326 * - eqv_i32/i64 *t0*, *t1*, *t2*
328 - | *t0* = ~(*t1* ^ *t2*), or equivalently, *t0* = *t1* ^ ~\ *t2*
330 * - nand_i32/i64 *t0*, *t1*, *t2*
332 - | *t0* = ~(*t1* & *t2*)
334 * - nor_i32/i64 *t0*, *t1*, *t2*
336 - | *t0* = ~(*t1* | *t2*)
338 * - orc_i32/i64 *t0*, *t1*, *t2*
340 - | *t0* = *t1* | ~\ *t2*
342 * - clz_i32/i64 *t0*, *t1*, *t2*
344 - | *t0* = *t1* ? clz(*t1*) : *t2*
346 * - ctz_i32/i64 *t0*, *t1*, *t2*
348 - | *t0* = *t1* ? ctz(*t1*) : *t2*
350 * - ctpop_i32/i64 *t0*, *t1*
352 - | *t0* = number of bits set in *t1*
363 * - shl_i32/i64 *t0*, *t1*, *t2*
365 - | *t0* = *t1* << *t2*
368 * - shr_i32/i64 *t0*, *t1*, *t2*
370 - | *t0* = *t1* >> *t2* (unsigned)
373 * - sar_i32/i64 *t0*, *t1*, *t2*
375 - | *t0* = *t1* >> *t2* (signed)
378 * - rotl_i32/i64 *t0*, *t1*, *t2*
383 * - rotr_i32/i64 *t0*, *t1*, *t2*
394 * - mov_i32/i64 *t0*, *t1*
396 - | *t0* = *t1*
397 | Move *t1* to *t0* (both operands must have the same type).
399 * - ext8s_i32/i64 *t0*, *t1*
401 ext8u_i32/i64 *t0*, *t1*
403 ext16s_i32/i64 *t0*, *t1*
405 ext16u_i32/i64 *t0*, *t1*
407 ext32s_i64 *t0*, *t1*
409 ext32u_i64 *t0*, *t1*
413 * - bswap16_i32/i64 *t0*, *t1*, *flags*
417 | If *flags* & ``TCG_BSWAP_IZ``, then *t1* is known to be zero-extended from bit 15.
423 * - bswap32_i64 *t0*, *t1*, *flags*
428 * - bswap32_i32 *t0*, *t1*, *flags*
430 bswap64_i64 *t0*, *t1*, *flags*
440 * - deposit_i32/i64 *dest*, *t1*, *t2*, *pos*, *len*
442 - | Deposit *t2* as a bitfield into *t1*, placing the result in *dest*.
449 | For example, "deposit_i32 dest, t1, t2, 8, 4" indicates a 4-bit field
452 | *dest* = (*t1* & ~0x0f00) | ((*t2* << 8) & 0x0f00)
454 * - extract_i32/i64 *dest*, *t1*, *pos*, *len*
456 sextract_i32/i64 *dest*, *t1*, *pos*, *len*
458 - | Extract a bitfield from *t1*, placing the result in *dest*.
465 | For example, "sextract_i32 dest, t1, 8, 4" indicates a 4-bit field
468 | *dest* = (*t1* << 20) >> 28
472 * - extract2_i32/i64 *dest*, *t1*, *t2*, *pos*
475 of *t2*:*t1*, beginning at *pos*. The tcg_gen_extract2_{i32,i64} expander
479 * - extrl_i64_i32 *t0*, *t1*
481 - | For 64-bit hosts only, extract the low 32-bits of input *t1* and place it
485 * - extrh_i64_i32 *t0*, *t1*
487 - | For 64-bit hosts only, extract the high 32-bits of input *t1* and place it
497 * - setcond_i32/i64 *dest*, *t1*, *t2*, *cond*
499 - | *dest* = (*t1* *cond* *t2*)
501 | Set *dest* to 1 if (*t1* *cond* *t2*) is true, otherwise set to 0.
503 * - negsetcond_i32/i64 *dest*, *t1*, *t2*, *cond*
505 - | *dest* = -(*t1* *cond* *t2*)
507 | Set *dest* to -1 if (*t1* *cond* *t2*) is true, otherwise set to 0.
521 * - ext_i32_i64 *t0*, *t1*
523 - | Convert *t1* (32 bit) to *t0* (64 bit) and does sign extension
525 * - extu_i32_i64 *t0*, *t1*
527 - | Convert *t1* (32 bit) to *t0* (64 bit) and does zero extension
529 * - trunc_i64_i32 *t0*, *t1*
531 - | Truncate *t1* (64 bit) to *t0* (32 bit)
533 * - concat_i32_i64 *t0*, *t1*, *t2*
535 - | Construct *t0* (64-bit) taking the low half from *t1* (32 bit) and the high half
538 * - concat32_i64 *t0*, *t1*, *t2*
540 - | Construct *t0* (64-bit) taking the low half from *t1* (64 bit) and the high half
549 * - ld_i32/i64 *t0*, *t1*, *offset*
551 ld8s_i32/i64 *t0*, *t1*, *offset*
553 ld8u_i32/i64 *t0*, *t1*, *offset*
555 ld16s_i32/i64 *t0*, *t1*, *offset*
557 ld16u_i32/i64 *t0*, *t1*, *offset*
559 ld32s_i64 t0, *t1*, *offset*
561 ld32u_i64 t0, *t1*, *offset*
563 - | *t0* = read(*t1* + *offset*)
568 * - st_i32/i64 *t0*, *t1*, *offset*
570 st8_i32/i64 *t0*, *t1*, *offset*
572 st16_i32/i64 *t0*, *t1*, *offset*
574 st32_i64 *t0*, *t1*, *offset*
576 - | write(*t0*, *t1* + *offset*)
593 - | Similar to add/sub, except that the double-word inputs *t1* and *t2* are
597 * - mulu2_i32/i64 *t0_low*, *t0_high*, *t1*, *t2*
599 - | Similar to mul, except two unsigned inputs *t1* and *t2* yielding the full
602 * - muls2_i32/i64 *t0_low*, *t0_high*, *t1*, *t2*
604 - | Similar to mulu2, except the two inputs *t1* and *t2* are signed.
606 * - mulsh_i32/i64 *t0*, *t1*, *t2*
608 muluh_i32/i64 *t0*, *t1*, *t2*
650 - | Similar to brcond, except that the 64-bit values *t0* and *t1*
655 - | Similar to setcond, except that the 64-bit values *t1* and *t2* are
683 * - qemu_ld_i32/i64/i128 *t0*, *t1*, *flags*, *memidx*
685 qemu_st_i32/i64/i128 *t0*, *t1*, *flags*, *memidx*
687 qemu_st8_i32 *t0*, *t1*, *flags*, *memidx*
689 - | Load data at the guest address *t1* into *t0*, or store data in *t0* at guest
690 address *t1*. The _i32/_i64/_i128 size applies to the size of the input/output
691 register *t0* only. The address *t1* is always sized according to the guest,
694 | Both *t0* and *t1* may be split into little-endian ordered pairs of registers
724 ld_vec *v0*, *t1*
726 st_vec *v0*, *t1*