1 /* 2 * x86 memory access helpers 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "exec/helper-proto.h" 23 #include "exec/exec-all.h" 24 #include "exec/cpu_ldst.h" 25 #include "qemu/int128.h" 26 #include "qemu/atomic128.h" 27 #include "tcg/tcg.h" 28 #include "helper-tcg.h" 29 30 void helper_cmpxchg8b_unlocked(CPUX86State *env, target_ulong a0) 31 { 32 uintptr_t ra = GETPC(); 33 uint64_t oldv, cmpv, newv; 34 int eflags; 35 36 eflags = cpu_cc_compute_all(env, CC_OP); 37 38 cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]); 39 newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]); 40 41 oldv = cpu_ldq_data_ra(env, a0, ra); 42 newv = (cmpv == oldv ? newv : oldv); 43 /* always do the store */ 44 cpu_stq_data_ra(env, a0, newv, ra); 45 46 if (oldv == cmpv) { 47 eflags |= CC_Z; 48 } else { 49 env->regs[R_EAX] = (uint32_t)oldv; 50 env->regs[R_EDX] = (uint32_t)(oldv >> 32); 51 eflags &= ~CC_Z; 52 } 53 CC_SRC = eflags; 54 } 55 56 void helper_cmpxchg8b(CPUX86State *env, target_ulong a0) 57 { 58 #ifdef CONFIG_ATOMIC64 59 uint64_t oldv, cmpv, newv; 60 int eflags; 61 62 eflags = cpu_cc_compute_all(env, CC_OP); 63 64 cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]); 65 newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]); 66 67 #ifdef CONFIG_USER_ONLY 68 { 69 uint64_t *haddr = g2h(a0); 70 cmpv = cpu_to_le64(cmpv); 71 newv = cpu_to_le64(newv); 72 oldv = qatomic_cmpxchg__nocheck(haddr, cmpv, newv); 73 oldv = le64_to_cpu(oldv); 74 } 75 #else 76 { 77 uintptr_t ra = GETPC(); 78 int mem_idx = cpu_mmu_index(env, false); 79 TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx); 80 oldv = helper_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra); 81 } 82 #endif 83 84 if (oldv == cmpv) { 85 eflags |= CC_Z; 86 } else { 87 env->regs[R_EAX] = (uint32_t)oldv; 88 env->regs[R_EDX] = (uint32_t)(oldv >> 32); 89 eflags &= ~CC_Z; 90 } 91 CC_SRC = eflags; 92 #else 93 cpu_loop_exit_atomic(env_cpu(env), GETPC()); 94 #endif /* CONFIG_ATOMIC64 */ 95 } 96 97 #ifdef TARGET_X86_64 98 void helper_cmpxchg16b_unlocked(CPUX86State *env, target_ulong a0) 99 { 100 uintptr_t ra = GETPC(); 101 Int128 oldv, cmpv, newv; 102 uint64_t o0, o1; 103 int eflags; 104 bool success; 105 106 if ((a0 & 0xf) != 0) { 107 raise_exception_ra(env, EXCP0D_GPF, GETPC()); 108 } 109 eflags = cpu_cc_compute_all(env, CC_OP); 110 111 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]); 112 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]); 113 114 o0 = cpu_ldq_data_ra(env, a0 + 0, ra); 115 o1 = cpu_ldq_data_ra(env, a0 + 8, ra); 116 117 oldv = int128_make128(o0, o1); 118 success = int128_eq(oldv, cmpv); 119 if (!success) { 120 newv = oldv; 121 } 122 123 cpu_stq_data_ra(env, a0 + 0, int128_getlo(newv), ra); 124 cpu_stq_data_ra(env, a0 + 8, int128_gethi(newv), ra); 125 126 if (success) { 127 eflags |= CC_Z; 128 } else { 129 env->regs[R_EAX] = int128_getlo(oldv); 130 env->regs[R_EDX] = int128_gethi(oldv); 131 eflags &= ~CC_Z; 132 } 133 CC_SRC = eflags; 134 } 135 136 void helper_cmpxchg16b(CPUX86State *env, target_ulong a0) 137 { 138 uintptr_t ra = GETPC(); 139 140 if ((a0 & 0xf) != 0) { 141 raise_exception_ra(env, EXCP0D_GPF, ra); 142 } else if (HAVE_CMPXCHG128) { 143 int eflags = cpu_cc_compute_all(env, CC_OP); 144 145 Int128 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]); 146 Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]); 147 148 int mem_idx = cpu_mmu_index(env, false); 149 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); 150 Int128 oldv = helper_atomic_cmpxchgo_le_mmu(env, a0, cmpv, 151 newv, oi, ra); 152 153 if (int128_eq(oldv, cmpv)) { 154 eflags |= CC_Z; 155 } else { 156 env->regs[R_EAX] = int128_getlo(oldv); 157 env->regs[R_EDX] = int128_gethi(oldv); 158 eflags &= ~CC_Z; 159 } 160 CC_SRC = eflags; 161 } else { 162 cpu_loop_exit_atomic(env_cpu(env), ra); 163 } 164 } 165 #endif 166 167 void helper_boundw(CPUX86State *env, target_ulong a0, int v) 168 { 169 int low, high; 170 171 low = cpu_ldsw_data_ra(env, a0, GETPC()); 172 high = cpu_ldsw_data_ra(env, a0 + 2, GETPC()); 173 v = (int16_t)v; 174 if (v < low || v > high) { 175 if (env->hflags & HF_MPX_EN_MASK) { 176 env->bndcs_regs.sts = 0; 177 } 178 raise_exception_ra(env, EXCP05_BOUND, GETPC()); 179 } 180 } 181 182 void helper_boundl(CPUX86State *env, target_ulong a0, int v) 183 { 184 int low, high; 185 186 low = cpu_ldl_data_ra(env, a0, GETPC()); 187 high = cpu_ldl_data_ra(env, a0 + 4, GETPC()); 188 if (v < low || v > high) { 189 if (env->hflags & HF_MPX_EN_MASK) { 190 env->bndcs_regs.sts = 0; 191 } 192 raise_exception_ra(env, EXCP05_BOUND, GETPC()); 193 } 194 } 195