Lines Matching +full:nand +full:- +full:rb
30 * Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV or _CALL_AIX.
70 calling convention, we can re-use the TOC register since we'll be reloading
71 it at every call. Otherwise R12 will do nicely as neither a call-saved
145 TCG_REG_R12, /* call clobbered, non-arguments */
158 /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */
224 return tcg_tbrel_diff(s, target) - 4;
346 if ((ct & TCG_CT_CONST_N16) && -sval == (int16_t)-sval) {
358 if ((ct & TCG_CT_CONST_MONE) && sval == -1) {
491 #define NAND XO31(476)
686 #define RB(r) ((r)<<11)
693 #define FXM(b) (1 << (19 - (b)))
702 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
703 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
740 /* The low bit here is set if the RA and RB fields must be inverted. */
792 hi = value - lo;
805 /* Ensure that the prefixed instruction does not cross a 64-byte boundary. */
808 return ((uintptr_t)s->code_ptr & 0x3f) == 0x3c;
820 return tcg_pcrel_diff(s, target) - (tcg_out_need_prefix_align(s) ? 4 : 0);
823 /* Output Type 00 Prefix - 8-Byte Load/Store Form (8LS:D) */
837 /* Output Type 10 Prefix - Modified Load/Store Form (MLS:D) */
873 /* Altivec does not support vector->integer moves. */
882 /* Altivec does not support integer->vector moves. */
977 tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
982 tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
993 tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
998 tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
1046 /* Load 16-bit immediates with one insn. */
1059 * Load values up to 34 bits, and pc-relative addresses,
1077 /* Load 32-bit immediates with two insns. Note that we've already
1090 /* Load masked 16-bit value. */
1117 intptr_t hi = tcg_pcrel_diff(s, (void *)arg) - 4;
1120 hi -= lo;
1136 new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
1143 new_pool_label(s, arg, R_PPC64_PCREL34, s->code_ptr - 2, 0);
1148 new_pool_label(s, arg, R_PPC_REL14, s->code_ptr, 0);
1176 if (low >= -16 && low < 16) {
1188 if (low >= -16 && low < 16) {
1196 if (low >= -16 && low < 16) {
1212 new_pool_label(s, val, R_PPC64_PCREL34, s->code_ptr - 2, 0);
1215 new_pool_l2(s, R_PPC64_PCREL34, s->code_ptr - 2, 0, val, val);
1229 load_insn |= VRT(ret) | RB(TCG_REG_TMP1);
1231 new_pool_label(s, val, rel, s->code_ptr, add);
1233 new_pool_l2(s, rel, s->code_ptr, add, val >> 32, val);
1236 load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
1238 new_pool_l2(s, rel, s->code_ptr, add, val, val);
1240 new_pool_l4(s, rel, s->code_ptr, add,
1293 if (c == 0 || c == -1) {
1297 lsb = test & -test;
1299 if (test & (test - 1)) {
1304 *mb = test ? clz32(test & -test) + 1 : 0;
1316 lsb = c & -c;
1318 if (c == -lsb) {
1446 * Note that the MLS:D insns retain their un-prefixed opcode,
1496 offset = (offset - l0) >> 16;
1501 l1 = (int16_t)(offset - 0x4000);
1539 shift = (offset - 4) & 0xc;
1559 tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16);
1592 shift = (offset - 4) & 0xc;
1638 * Set dest non-zero if and only if (arg1 & arg2) is non-zero.
1687 * All of the tests are 16-bit, so a 32-bit sign extend always works.
1743 op |= const_arg2 ? arg2 & 0xffff : RB(arg2);
1752 * X != 0 implies X + -1 generates a carry.
1754 * = -1 + CA
1755 * = CA ? 0 : -1
1757 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1779 * X != 0 implies X + -1 generates a carry. Extra addition
1780 * trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C.
1782 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1788 tcg_out32(s, ADDI | TAI(dst, dst, -1));
1830 /* Re-use tcg_to_bc for BI and BO_COND_{TRUE,FALSE}. */
1885 tcg_out_movi(s, type, arg0, neg ? -1 : 1);
1944 tcg_out32(s, ADDI | TAI(arg0, arg0, -1));
1997 if (l->has_value) {
1998 insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
2000 tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
2013 if (l->has_value) {
2014 bd = reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
2016 tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
2255 arg -= ofs;
2338 MemOp opc = get_memop(lb->oi);
2340 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2348 tcg_out_b(s, 0, lb->raddr);
2354 MemOp opc = get_memop(lb->oi);
2356 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2363 tcg_out_b(s, 0, lb->raddr);
2382 * Reject 16-byte memop with 16-byte atomicity,
2383 * but do allow a pair of 64-bit operations.
2389 /* We expect to use a 16-bit negative offset from ENV. */
2390 #define MIN_TLB_MASK_TABLE_OFS -32768
2393 * For system-mode, perform the TLB load and compare.
2394 * For user-mode, perform any required alignment tests.
2401 TCGType addr_type = s->addr_type;
2407 * Book II, Section 1.4, Single-Copy Atomicity, specifies:
2411 * of these accesses are implementation-dependent." Thus MO_ATOM_IFALIGN.
2413 * As of 3.0, "the non-atomic access is performed as described in
2417 h->aa = atom_and_align_for_opc(s, opc,
2421 a_bits = h->aa.align;
2432 ldst->is_ld = is_ld;
2433 ldst->oi = oi;
2434 ldst->addr_reg = addr;
2443 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
2446 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
2452 * For 64-bit host, always load the entire 64-bit slot for simplicity.
2472 /* Clear the non-page, non-alignment bits from the address in R0. */
2475 * We don't support unaligned accesses on 32-bits.
2483 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
2489 * cross a page boundary. The trick is to add the access size-1
2495 unsigned a_mask = (1 << a_bits) - 1;
2496 unsigned s_mask = (1 << s_bits) - 1;
2497 tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
2504 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
2506 tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
2509 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
2517 /* Load a pointer into the current opcode w/conditional branch-link. */
2518 ldst->label_ptr[0] = s->code_ptr;
2521 h->base = TCG_REG_TMP1;
2525 ldst->is_ld = is_ld;
2526 ldst->oi = oi;
2527 ldst->addr_reg = addr;
2531 tcg_out32(s, ANDI | SAI(addr, TCG_REG_R0, (1 << a_bits) - 1));
2533 ldst->label_ptr[0] = s->code_ptr;
2537 h->base = guest_base ? TCG_GUEST_BASE_REG : 0;
2541 /* Zero-extend the guest address for use in the host address. */
2543 h->index = TCG_REG_TMP2;
2545 h->index = addr;
2594 ldst->type = data_type;
2595 ldst->datalo_reg = datalo;
2596 ldst->datahi_reg = datahi;
2597 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2636 ldst->type = data_type;
2637 ldst->datalo_reg = datalo;
2638 ldst->datahi_reg = datahi;
2639 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2665 tcg_debug_assert(datahi == datalo - 1);
2691 ldst->type = TCG_TYPE_I128;
2692 ldst->datalo_reg = datalo;
2693 ldst->datahi_reg = datahi;
2694 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2701 tcg_out_qemu_ld(s, data, -1, addr, oi, type);
2728 tcg_out_qemu_st(s, data, -1, addr, oi, type);
2795 + TCG_TARGET_STACK_ALIGN - 1) \
2796 & -TCG_TARGET_STACK_ALIGN)
2798 #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
2805 const void **desc = (const void **)s->code_ptr;
2808 s->code_ptr = (void *)(desc + 2); /* skip over descriptor */
2811 tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
2817 | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
2827 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2835 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2885 ptrdiff_t offset = tcg_pcrel_diff(s, (void *)ptr) - 4;
2887 tcg_out_addpcis(s, TCG_REG_TMP1, offset - lo);
2891 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - lo);
2910 uintptr_t addr = tb->jmp_target_addr[n];
2911 intptr_t diff = addr - jmp_rx;
2959 * environment. So in 64-bit mode it's always carry-out of bit 63.
3213 tcg_out32(s, NAND | SAB(a1, a0, a2));
3425 tgen_addco_rri(s, type, a0, a1, -a2);
3469 tcg_debug_assert(a1 == 0 || a1 == -1);
3537 * dep(a, b, m) -> (a & ~m) | (b & m)
3576 * dep(a, b, m) -> (a & ~m) | (b & m)
3612 * dep(a, b, m) -> (a & ~m) | (b & m)
3668 tcg_out_rlw(s, RLWIMI, a0, a2, ofs, 32 - ofs - len, 31 - ofs);
3670 tcg_out_rld(s, RLDIMI, a0, a2, ofs, 64 - ofs - len);
3690 tgen_andi(s, TCG_TYPE_I32, a0, a1, (1 << len) - 1);
3692 tcg_out_rlw(s, RLWINM, a0, a1, 32 - ofs, 32 - len, 31);
3694 tcg_out_rld(s, RLDICL, a0, a1, 64 - ofs, 64 - len);
3865 return vece <= MO_32 || have_isa_2_07 ? -1 : 0;
3875 return -1;
3877 return have_isa_2_07 ? 1 : -1;
3885 return -1;
3901 tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src));
3914 * right justified within the left (zero-index) double-word.
3949 tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16);
3962 tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16);
3991 tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
4329 * Only 5 bits are significant, and VSPLTISB can represent -16..15.
4334 imm &= (8 << vece) - 1;
4368 * Only 5 bits are significant, and VSPLTISB can represent -16..15.
4369 * So using -16 is a quick way to represent 16.
4371 c16 = tcg_constant_vec(type, MO_8, -16);
4546 s->reserved_regs = 0;
4547 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
4548 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
4550 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */
4553 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
4555 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
4556 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
4557 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
4558 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
4560 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tb->tc_ptr */
4582 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
4583 .cie.id = -1,
4586 .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */
4590 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
4599 0x11, 65, (LR_OFFSET / -SZR) & 0x7f,
4610 p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR;