1 /*
2 * PowerPC memory access emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "qemu/host-utils.h"
24 #include "exec/helper-proto.h"
25 #include "helper_regs.h"
26 #include "exec/cpu_ldst.h"
27 #include "internal.h"
28 #include "qemu/atomic128.h"
29
30 /* #define DEBUG_OP */
31
needs_byteswap(const CPUPPCState * env)32 static inline bool needs_byteswap(const CPUPPCState *env)
33 {
34 #if TARGET_BIG_ENDIAN
35 return FIELD_EX64(env->msr, MSR, LE);
36 #else
37 return !FIELD_EX64(env->msr, MSR, LE);
38 #endif
39 }
40
41 /*****************************************************************************/
42 /* Memory load and stores */
43
addr_add(CPUPPCState * env,target_ulong addr,target_long arg)44 static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr,
45 target_long arg)
46 {
47 #if defined(TARGET_PPC64)
48 if (!msr_is_64bit(env, env->msr)) {
49 return (uint32_t)(addr + arg);
50 } else
51 #endif
52 {
53 return addr + arg;
54 }
55 }
56
probe_contiguous(CPUPPCState * env,target_ulong addr,uint32_t nb,MMUAccessType access_type,int mmu_idx,uintptr_t raddr)57 static void *probe_contiguous(CPUPPCState *env, target_ulong addr, uint32_t nb,
58 MMUAccessType access_type, int mmu_idx,
59 uintptr_t raddr)
60 {
61 void *host1, *host2;
62 uint32_t nb_pg1, nb_pg2;
63
64 nb_pg1 = -(addr | TARGET_PAGE_MASK);
65 if (likely(nb <= nb_pg1)) {
66 /* The entire operation is on a single page. */
67 return probe_access(env, addr, nb, access_type, mmu_idx, raddr);
68 }
69
70 /* The operation spans two pages. */
71 nb_pg2 = nb - nb_pg1;
72 host1 = probe_access(env, addr, nb_pg1, access_type, mmu_idx, raddr);
73 addr = addr_add(env, addr, nb_pg1);
74 host2 = probe_access(env, addr, nb_pg2, access_type, mmu_idx, raddr);
75
76 /* If the two host pages are contiguous, optimize. */
77 if (host2 == host1 + nb_pg1) {
78 return host1;
79 }
80 return NULL;
81 }
82
helper_lmw(CPUPPCState * env,target_ulong addr,uint32_t reg)83 void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
84 {
85 uintptr_t raddr = GETPC();
86 int mmu_idx = ppc_env_mmu_index(env, false);
87 void *host = probe_contiguous(env, addr, (32 - reg) * 4,
88 MMU_DATA_LOAD, mmu_idx, raddr);
89
90 if (likely(host)) {
91 /* Fast path -- the entire operation is in RAM at host. */
92 for (; reg < 32; reg++) {
93 env->gpr[reg] = (uint32_t)ldl_be_p(host);
94 host += 4;
95 }
96 } else {
97 /* Slow path -- at least some of the operation requires i/o. */
98 for (; reg < 32; reg++) {
99 env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr);
100 addr = addr_add(env, addr, 4);
101 }
102 }
103 }
104
helper_stmw(CPUPPCState * env,target_ulong addr,uint32_t reg)105 void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
106 {
107 uintptr_t raddr = GETPC();
108 int mmu_idx = ppc_env_mmu_index(env, false);
109 void *host = probe_contiguous(env, addr, (32 - reg) * 4,
110 MMU_DATA_STORE, mmu_idx, raddr);
111
112 if (likely(host)) {
113 /* Fast path -- the entire operation is in RAM at host. */
114 for (; reg < 32; reg++) {
115 stl_be_p(host, env->gpr[reg]);
116 host += 4;
117 }
118 } else {
119 /* Slow path -- at least some of the operation requires i/o. */
120 for (; reg < 32; reg++) {
121 cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr);
122 addr = addr_add(env, addr, 4);
123 }
124 }
125 }
126
do_lsw(CPUPPCState * env,target_ulong addr,uint32_t nb,uint32_t reg,uintptr_t raddr)127 static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
128 uint32_t reg, uintptr_t raddr)
129 {
130 int mmu_idx;
131 void *host;
132 uint32_t val;
133
134 if (unlikely(nb == 0)) {
135 return;
136 }
137
138 mmu_idx = ppc_env_mmu_index(env, false);
139 host = probe_contiguous(env, addr, nb, MMU_DATA_LOAD, mmu_idx, raddr);
140
141 if (likely(host)) {
142 /* Fast path -- the entire operation is in RAM at host. */
143 for (; nb > 3; nb -= 4) {
144 env->gpr[reg] = (uint32_t)ldl_be_p(host);
145 reg = (reg + 1) % 32;
146 host += 4;
147 }
148 switch (nb) {
149 default:
150 return;
151 case 1:
152 val = ldub_p(host) << 24;
153 break;
154 case 2:
155 val = lduw_be_p(host) << 16;
156 break;
157 case 3:
158 val = (lduw_be_p(host) << 16) | (ldub_p(host + 2) << 8);
159 break;
160 }
161 } else {
162 /* Slow path -- at least some of the operation requires i/o. */
163 for (; nb > 3; nb -= 4) {
164 env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr);
165 reg = (reg + 1) % 32;
166 addr = addr_add(env, addr, 4);
167 }
168 switch (nb) {
169 default:
170 return;
171 case 1:
172 val = cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 24;
173 break;
174 case 2:
175 val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16;
176 break;
177 case 3:
178 val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16;
179 addr = addr_add(env, addr, 2);
180 val |= cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 8;
181 break;
182 }
183 }
184 env->gpr[reg] = val;
185 }
186
helper_lsw(CPUPPCState * env,target_ulong addr,uint32_t nb,uint32_t reg)187 void helper_lsw(CPUPPCState *env, target_ulong addr,
188 uint32_t nb, uint32_t reg)
189 {
190 do_lsw(env, addr, nb, reg, GETPC());
191 }
192
193 /*
194 * PPC32 specification says we must generate an exception if rA is in
195 * the range of registers to be loaded. In an other hand, IBM says
196 * this is valid, but rA won't be loaded. For now, I'll follow the
197 * spec...
198 */
helper_lswx(CPUPPCState * env,target_ulong addr,uint32_t reg,uint32_t ra,uint32_t rb)199 void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg,
200 uint32_t ra, uint32_t rb)
201 {
202 if (likely(xer_bc != 0)) {
203 int num_used_regs = DIV_ROUND_UP(xer_bc, 4);
204 if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) ||
205 lsw_reg_in_range(reg, num_used_regs, rb))) {
206 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
207 POWERPC_EXCP_INVAL |
208 POWERPC_EXCP_INVAL_LSWX, GETPC());
209 } else {
210 do_lsw(env, addr, xer_bc, reg, GETPC());
211 }
212 }
213 }
214
helper_stsw(CPUPPCState * env,target_ulong addr,uint32_t nb,uint32_t reg)215 void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
216 uint32_t reg)
217 {
218 uintptr_t raddr = GETPC();
219 int mmu_idx;
220 void *host;
221 uint32_t val;
222
223 if (unlikely(nb == 0)) {
224 return;
225 }
226
227 mmu_idx = ppc_env_mmu_index(env, false);
228 host = probe_contiguous(env, addr, nb, MMU_DATA_STORE, mmu_idx, raddr);
229
230 if (likely(host)) {
231 /* Fast path -- the entire operation is in RAM at host. */
232 for (; nb > 3; nb -= 4) {
233 stl_be_p(host, env->gpr[reg]);
234 reg = (reg + 1) % 32;
235 host += 4;
236 }
237 val = env->gpr[reg];
238 switch (nb) {
239 case 1:
240 stb_p(host, val >> 24);
241 break;
242 case 2:
243 stw_be_p(host, val >> 16);
244 break;
245 case 3:
246 stw_be_p(host, val >> 16);
247 stb_p(host + 2, val >> 8);
248 break;
249 }
250 } else {
251 for (; nb > 3; nb -= 4) {
252 cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr);
253 reg = (reg + 1) % 32;
254 addr = addr_add(env, addr, 4);
255 }
256 val = env->gpr[reg];
257 switch (nb) {
258 case 1:
259 cpu_stb_mmuidx_ra(env, addr, val >> 24, mmu_idx, raddr);
260 break;
261 case 2:
262 cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr);
263 break;
264 case 3:
265 cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr);
266 addr = addr_add(env, addr, 2);
267 cpu_stb_mmuidx_ra(env, addr, val >> 8, mmu_idx, raddr);
268 break;
269 }
270 }
271 }
272
dcbz_common(CPUPPCState * env,target_ulong addr,int mmu_idx,int dcbz_size,uintptr_t retaddr)273 static void dcbz_common(CPUPPCState *env, target_ulong addr,
274 int mmu_idx, int dcbz_size, uintptr_t retaddr)
275 {
276 target_ulong mask = ~(target_ulong)(dcbz_size - 1);
277 void *haddr;
278
279 /* Align address */
280 addr &= mask;
281
282 /* Check reservation */
283 if (unlikely((env->reserve_addr & mask) == addr)) {
284 env->reserve_addr = (target_ulong)-1ULL;
285 }
286
287 /* Try fast path translate */
288 #ifdef CONFIG_USER_ONLY
289 haddr = tlb_vaddr_to_host(env, addr, MMU_DATA_STORE, mmu_idx);
290 #else
291 haddr = probe_write(env, addr, dcbz_size, mmu_idx, retaddr);
292 if (unlikely(!haddr)) {
293 /* Slow path */
294 for (int i = 0; i < dcbz_size; i += 8) {
295 cpu_stq_mmuidx_ra(env, addr + i, 0, mmu_idx, retaddr);
296 }
297 return;
298 }
299 #endif
300
301 set_helper_retaddr(retaddr);
302 memset(haddr, 0, dcbz_size);
303 clear_helper_retaddr();
304 }
305
helper_dcbz(CPUPPCState * env,target_ulong addr,int mmu_idx)306 void helper_dcbz(CPUPPCState *env, target_ulong addr, int mmu_idx)
307 {
308 dcbz_common(env, addr, mmu_idx, env->dcache_line_size, GETPC());
309 }
310
311 #ifdef TARGET_PPC64
helper_dcbzl(CPUPPCState * env,target_ulong addr)312 void helper_dcbzl(CPUPPCState *env, target_ulong addr)
313 {
314 int dcbz_size = env->dcache_line_size;
315
316 /*
317 * The translator checked for POWERPC_EXCP_970.
318 * All that's left is to check HID5.
319 */
320 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) {
321 dcbz_size = 32;
322 }
323
324 dcbz_common(env, addr, ppc_env_mmu_index(env, false), dcbz_size, GETPC());
325 }
326 #endif
327
helper_icbi(CPUPPCState * env,target_ulong addr)328 void helper_icbi(CPUPPCState *env, target_ulong addr)
329 {
330 addr &= ~(env->dcache_line_size - 1);
331 /*
332 * Invalidate one cache line :
333 * PowerPC specification says this is to be treated like a load
334 * (not a fetch) by the MMU. To be sure it will be so,
335 * do the load "by hand".
336 */
337 cpu_ldl_data_ra(env, addr, GETPC());
338 }
339
helper_icbiep(CPUPPCState * env,target_ulong addr)340 void helper_icbiep(CPUPPCState *env, target_ulong addr)
341 {
342 #if !defined(CONFIG_USER_ONLY)
343 /* See comments above */
344 addr &= ~(env->dcache_line_size - 1);
345 cpu_ldl_mmuidx_ra(env, addr, PPC_TLB_EPID_LOAD, GETPC());
346 #endif
347 }
348
349 /* XXX: to be tested */
helper_lscbx(CPUPPCState * env,target_ulong addr,uint32_t reg,uint32_t ra,uint32_t rb)350 target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
351 uint32_t ra, uint32_t rb)
352 {
353 int i, c, d;
354
355 d = 24;
356 for (i = 0; i < xer_bc; i++) {
357 c = cpu_ldub_data_ra(env, addr, GETPC());
358 addr = addr_add(env, addr, 1);
359 /* ra (if not 0) and rb are never modified */
360 if (likely(reg != rb && (ra == 0 || reg != ra))) {
361 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
362 }
363 if (unlikely(c == xer_cmp)) {
364 break;
365 }
366 if (likely(d != 0)) {
367 d -= 8;
368 } else {
369 d = 24;
370 reg++;
371 reg = reg & 0x1F;
372 }
373 }
374 return i;
375 }
376
377 /*****************************************************************************/
378 /* Altivec extension helpers */
379 #if HOST_BIG_ENDIAN
380 #define HI_IDX 0
381 #define LO_IDX 1
382 #else
383 #define HI_IDX 1
384 #define LO_IDX 0
385 #endif
386
387 /*
388 * We use MSR_LE to determine index ordering in a vector. However,
389 * byteswapping is not simply controlled by MSR_LE. We also need to
390 * take into account endianness of the target. This is done for the
391 * little-endian PPC64 user-mode target.
392 */
393
394 #define LVE(name, access, swap, element) \
395 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
396 target_ulong addr) \
397 { \
398 size_t n_elems = ARRAY_SIZE(r->element); \
399 int adjust = HI_IDX * (n_elems - 1); \
400 int sh = sizeof(r->element[0]) >> 1; \
401 int index = (addr & 0xf) >> sh; \
402 if (FIELD_EX64(env->msr, MSR, LE)) { \
403 index = n_elems - index - 1; \
404 } \
405 \
406 if (needs_byteswap(env)) { \
407 r->element[LO_IDX ? index : (adjust - index)] = \
408 swap(access(env, addr, GETPC())); \
409 } else { \
410 r->element[LO_IDX ? index : (adjust - index)] = \
411 access(env, addr, GETPC()); \
412 } \
413 }
414 #define I(x) (x)
LVE(LVEBX,cpu_ldub_data_ra,I,u8)415 LVE(LVEBX, cpu_ldub_data_ra, I, u8)
416 LVE(LVEHX, cpu_lduw_data_ra, bswap16, u16)
417 LVE(LVEWX, cpu_ldl_data_ra, bswap32, u32)
418 #undef I
419 #undef LVE
420
421 #define STVE(name, access, swap, element) \
422 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
423 target_ulong addr) \
424 { \
425 size_t n_elems = ARRAY_SIZE(r->element); \
426 int adjust = HI_IDX * (n_elems - 1); \
427 int sh = sizeof(r->element[0]) >> 1; \
428 int index = (addr & 0xf) >> sh; \
429 if (FIELD_EX64(env->msr, MSR, LE)) { \
430 index = n_elems - index - 1; \
431 } \
432 \
433 if (needs_byteswap(env)) { \
434 access(env, addr, swap(r->element[LO_IDX ? index : \
435 (adjust - index)]), \
436 GETPC()); \
437 } else { \
438 access(env, addr, r->element[LO_IDX ? index : \
439 (adjust - index)], GETPC()); \
440 } \
441 }
442 #define I(x) (x)
443 STVE(STVEBX, cpu_stb_data_ra, I, u8)
444 STVE(STVEHX, cpu_stw_data_ra, bswap16, u16)
445 STVE(STVEWX, cpu_stl_data_ra, bswap32, u32)
446 #undef I
447 #undef LVE
448
449 #ifdef TARGET_PPC64
450 #define GET_NB(rb) ((rb >> 56) & 0xFF)
451
452 #define VSX_LXVL(name, lj) \
453 void helper_##name(CPUPPCState *env, target_ulong addr, \
454 ppc_vsr_t *xt, target_ulong rb) \
455 { \
456 ppc_vsr_t t; \
457 uint64_t nb = GET_NB(rb); \
458 int i; \
459 \
460 t.s128 = int128_zero(); \
461 if (nb) { \
462 nb = (nb >= 16) ? 16 : nb; \
463 if (FIELD_EX64(env->msr, MSR, LE) && !lj) { \
464 for (i = 16; i > 16 - nb; i--) { \
465 t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \
466 addr = addr_add(env, addr, 1); \
467 } \
468 } else { \
469 for (i = 0; i < nb; i++) { \
470 t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \
471 addr = addr_add(env, addr, 1); \
472 } \
473 } \
474 } \
475 *xt = t; \
476 }
477
478 VSX_LXVL(LXVL, 0)
479 VSX_LXVL(LXVLL, 1)
480 #undef VSX_LXVL
481
482 #define VSX_STXVL(name, lj) \
483 void helper_##name(CPUPPCState *env, target_ulong addr, \
484 ppc_vsr_t *xt, target_ulong rb) \
485 { \
486 target_ulong nb = GET_NB(rb); \
487 int i; \
488 \
489 if (!nb) { \
490 return; \
491 } \
492 \
493 nb = (nb >= 16) ? 16 : nb; \
494 if (FIELD_EX64(env->msr, MSR, LE) && !lj) { \
495 for (i = 16; i > 16 - nb; i--) { \
496 cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \
497 addr = addr_add(env, addr, 1); \
498 } \
499 } else { \
500 for (i = 0; i < nb; i++) { \
501 cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC()); \
502 addr = addr_add(env, addr, 1); \
503 } \
504 } \
505 }
506
507 VSX_STXVL(STXVL, 0)
508 VSX_STXVL(STXVLL, 1)
509 #undef VSX_STXVL
510 #undef GET_NB
511 #endif /* TARGET_PPC64 */
512
513 #undef HI_IDX
514 #undef LO_IDX
515
516 void helper_tbegin(CPUPPCState *env)
517 {
518 /*
519 * As a degenerate implementation, always fail tbegin. The reason
520 * given is "Nesting overflow". The "persistent" bit is set,
521 * providing a hint to the error handler to not retry. The TFIAR
522 * captures the address of the failure, which is this tbegin
523 * instruction. Instruction execution will continue with the next
524 * instruction in memory, which is precisely what we want.
525 */
526
527 env->spr[SPR_TEXASR] =
528 (1ULL << TEXASR_FAILURE_PERSISTENT) |
529 (1ULL << TEXASR_NESTING_OVERFLOW) |
530 (FIELD_EX64_HV(env->msr) << TEXASR_PRIVILEGE_HV) |
531 (FIELD_EX64(env->msr, MSR, PR) << TEXASR_PRIVILEGE_PR) |
532 (1ULL << TEXASR_FAILURE_SUMMARY) |
533 (1ULL << TEXASR_TFIAR_EXACT);
534 env->spr[SPR_TFIAR] = env->nip | (FIELD_EX64_HV(env->msr) << 1) |
535 FIELD_EX64(env->msr, MSR, PR);
536 env->spr[SPR_TFHAR] = env->nip + 4;
537 env->crf[0] = 0xB; /* 0b1010 = transaction failure */
538 }
539