1 /*
2 * Helpers for HPPA instructions.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "exec/cpu_ldst.h"
26 #include "qemu/timer.h"
27 #include "trace.h"
28
HELPER(excp)29 G_NORETURN void HELPER(excp)(CPUHPPAState *env, int excp)
30 {
31 CPUState *cs = env_cpu(env);
32
33 cs->exception_index = excp;
34 cpu_loop_exit(cs);
35 }
36
hppa_dynamic_excp(CPUHPPAState * env,int excp,uintptr_t ra)37 G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra)
38 {
39 CPUState *cs = env_cpu(env);
40
41 cs->exception_index = excp;
42 cpu_loop_exit_restore(cs, ra);
43 }
44
atomic_store_mask32(CPUHPPAState * env,target_ulong addr,uint32_t val,uint32_t mask,uintptr_t ra)45 static void atomic_store_mask32(CPUHPPAState *env, target_ulong addr,
46 uint32_t val, uint32_t mask, uintptr_t ra)
47 {
48 int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
49 uint32_t old, new, cmp, *haddr;
50 void *vaddr;
51
52 vaddr = probe_access(env, addr, 3, MMU_DATA_STORE, mmu_idx, ra);
53 if (vaddr == NULL) {
54 cpu_loop_exit_atomic(env_cpu(env), ra);
55 }
56 haddr = (uint32_t *)((uintptr_t)vaddr & -4);
57 mask = addr & 1 ? 0x00ffffffu : 0xffffff00u;
58
59 old = *haddr;
60 while (1) {
61 new = be32_to_cpu((cpu_to_be32(old) & ~mask) | (val & mask));
62 cmp = qatomic_cmpxchg(haddr, old, new);
63 if (cmp == old) {
64 return;
65 }
66 old = cmp;
67 }
68 }
69
atomic_store_mask64(CPUHPPAState * env,target_ulong addr,uint64_t val,uint64_t mask,int size,uintptr_t ra)70 static void atomic_store_mask64(CPUHPPAState *env, target_ulong addr,
71 uint64_t val, uint64_t mask,
72 int size, uintptr_t ra)
73 {
74 #ifdef CONFIG_ATOMIC64
75 int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
76 uint64_t old, new, cmp, *haddr;
77 void *vaddr;
78
79 vaddr = probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, ra);
80 if (vaddr == NULL) {
81 cpu_loop_exit_atomic(env_cpu(env), ra);
82 }
83 haddr = (uint64_t *)((uintptr_t)vaddr & -8);
84
85 old = *haddr;
86 while (1) {
87 new = be32_to_cpu((cpu_to_be32(old) & ~mask) | (val & mask));
88 cmp = qatomic_cmpxchg__nocheck(haddr, old, new);
89 if (cmp == old) {
90 return;
91 }
92 old = cmp;
93 }
94 #else
95 cpu_loop_exit_atomic(env_cpu(env), ra);
96 #endif
97 }
98
do_stby_b(CPUHPPAState * env,target_ulong addr,target_ulong val,bool parallel,uintptr_t ra)99 static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ulong val,
100 bool parallel, uintptr_t ra)
101 {
102 switch (addr & 3) {
103 case 3:
104 cpu_stb_data_ra(env, addr, val, ra);
105 break;
106 case 2:
107 cpu_stw_data_ra(env, addr, val, ra);
108 break;
109 case 1:
110 /* The 3 byte store must appear atomic. */
111 if (parallel) {
112 atomic_store_mask32(env, addr, val, 0x00ffffffu, ra);
113 } else {
114 cpu_stb_data_ra(env, addr, val >> 16, ra);
115 cpu_stw_data_ra(env, addr + 1, val, ra);
116 }
117 break;
118 default:
119 cpu_stl_data_ra(env, addr, val, ra);
120 break;
121 }
122 }
123
do_stdby_b(CPUHPPAState * env,target_ulong addr,uint64_t val,bool parallel,uintptr_t ra)124 static void do_stdby_b(CPUHPPAState *env, target_ulong addr, uint64_t val,
125 bool parallel, uintptr_t ra)
126 {
127 switch (addr & 7) {
128 case 7:
129 cpu_stb_data_ra(env, addr, val, ra);
130 break;
131 case 6:
132 cpu_stw_data_ra(env, addr, val, ra);
133 break;
134 case 5:
135 /* The 3 byte store must appear atomic. */
136 if (parallel) {
137 atomic_store_mask32(env, addr, val, 0x00ffffffu, ra);
138 } else {
139 cpu_stb_data_ra(env, addr, val >> 16, ra);
140 cpu_stw_data_ra(env, addr + 1, val, ra);
141 }
142 break;
143 case 4:
144 cpu_stl_data_ra(env, addr, val, ra);
145 break;
146 case 3:
147 /* The 5 byte store must appear atomic. */
148 if (parallel) {
149 atomic_store_mask64(env, addr, val, 0x000000ffffffffffull, 5, ra);
150 } else {
151 cpu_stb_data_ra(env, addr, val >> 32, ra);
152 cpu_stl_data_ra(env, addr + 1, val, ra);
153 }
154 break;
155 case 2:
156 /* The 6 byte store must appear atomic. */
157 if (parallel) {
158 atomic_store_mask64(env, addr, val, 0x0000ffffffffffffull, 6, ra);
159 } else {
160 cpu_stw_data_ra(env, addr, val >> 32, ra);
161 cpu_stl_data_ra(env, addr + 2, val, ra);
162 }
163 break;
164 case 1:
165 /* The 7 byte store must appear atomic. */
166 if (parallel) {
167 atomic_store_mask64(env, addr, val, 0x00ffffffffffffffull, 7, ra);
168 } else {
169 cpu_stb_data_ra(env, addr, val >> 48, ra);
170 cpu_stw_data_ra(env, addr + 1, val >> 32, ra);
171 cpu_stl_data_ra(env, addr + 3, val, ra);
172 }
173 break;
174 default:
175 cpu_stq_data_ra(env, addr, val, ra);
176 break;
177 }
178 }
179
HELPER(stby_b)180 void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
181 {
182 do_stby_b(env, addr, val, false, GETPC());
183 }
184
HELPER(stby_b_parallel)185 void HELPER(stby_b_parallel)(CPUHPPAState *env, target_ulong addr,
186 target_ulong val)
187 {
188 do_stby_b(env, addr, val, true, GETPC());
189 }
190
HELPER(stdby_b)191 void HELPER(stdby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
192 {
193 do_stdby_b(env, addr, val, false, GETPC());
194 }
195
HELPER(stdby_b_parallel)196 void HELPER(stdby_b_parallel)(CPUHPPAState *env, target_ulong addr,
197 target_ulong val)
198 {
199 do_stdby_b(env, addr, val, true, GETPC());
200 }
201
do_stby_e(CPUHPPAState * env,target_ulong addr,target_ulong val,bool parallel,uintptr_t ra)202 static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ulong val,
203 bool parallel, uintptr_t ra)
204 {
205 switch (addr & 3) {
206 case 3:
207 /* The 3 byte store must appear atomic. */
208 if (parallel) {
209 atomic_store_mask32(env, addr - 3, val, 0xffffff00u, ra);
210 } else {
211 cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
212 cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
213 }
214 break;
215 case 2:
216 cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
217 break;
218 case 1:
219 cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
220 break;
221 default:
222 /* Nothing is stored, but protection is checked and the
223 cacheline is marked dirty. */
224 probe_write(env, addr, 0, cpu_mmu_index(env_cpu(env), 0), ra);
225 break;
226 }
227 }
228
do_stdby_e(CPUHPPAState * env,target_ulong addr,uint64_t val,bool parallel,uintptr_t ra)229 static void do_stdby_e(CPUHPPAState *env, target_ulong addr, uint64_t val,
230 bool parallel, uintptr_t ra)
231 {
232 switch (addr & 7) {
233 case 7:
234 /* The 7 byte store must appear atomic. */
235 if (parallel) {
236 atomic_store_mask64(env, addr - 7, val,
237 0xffffffffffffff00ull, 7, ra);
238 } else {
239 cpu_stl_data_ra(env, addr - 7, val >> 32, ra);
240 cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
241 cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
242 }
243 break;
244 case 6:
245 /* The 6 byte store must appear atomic. */
246 if (parallel) {
247 atomic_store_mask64(env, addr - 6, val,
248 0xffffffffffff0000ull, 6, ra);
249 } else {
250 cpu_stl_data_ra(env, addr - 6, val >> 32, ra);
251 cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
252 }
253 break;
254 case 5:
255 /* The 5 byte store must appear atomic. */
256 if (parallel) {
257 atomic_store_mask64(env, addr - 5, val,
258 0xffffffffff000000ull, 5, ra);
259 } else {
260 cpu_stl_data_ra(env, addr - 5, val >> 32, ra);
261 cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
262 }
263 break;
264 case 4:
265 cpu_stl_data_ra(env, addr - 4, val >> 32, ra);
266 break;
267 case 3:
268 /* The 3 byte store must appear atomic. */
269 if (parallel) {
270 atomic_store_mask32(env, addr - 3, val >> 32, 0xffffff00u, ra);
271 } else {
272 cpu_stw_data_ra(env, addr - 3, val >> 48, ra);
273 cpu_stb_data_ra(env, addr - 1, val >> 40, ra);
274 }
275 break;
276 case 2:
277 cpu_stw_data_ra(env, addr - 2, val >> 48, ra);
278 break;
279 case 1:
280 cpu_stb_data_ra(env, addr - 1, val >> 56, ra);
281 break;
282 default:
283 /* Nothing is stored, but protection is checked and the
284 cacheline is marked dirty. */
285 probe_write(env, addr, 0, cpu_mmu_index(env_cpu(env), 0), ra);
286 break;
287 }
288 }
289
HELPER(stby_e)290 void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
291 {
292 do_stby_e(env, addr, val, false, GETPC());
293 }
294
HELPER(stby_e_parallel)295 void HELPER(stby_e_parallel)(CPUHPPAState *env, target_ulong addr,
296 target_ulong val)
297 {
298 do_stby_e(env, addr, val, true, GETPC());
299 }
300
HELPER(stdby_e)301 void HELPER(stdby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
302 {
303 do_stdby_e(env, addr, val, false, GETPC());
304 }
305
HELPER(stdby_e_parallel)306 void HELPER(stdby_e_parallel)(CPUHPPAState *env, target_ulong addr,
307 target_ulong val)
308 {
309 do_stdby_e(env, addr, val, true, GETPC());
310 }
311
HELPER(ldc_check)312 void HELPER(ldc_check)(target_ulong addr)
313 {
314 if (unlikely(addr & 0xf)) {
315 qemu_log_mask(LOG_GUEST_ERROR,
316 "Undefined ldc to unaligned address mod 16: "
317 TARGET_FMT_lx "\n", addr);
318 }
319 }
320
HELPER(probe)321 target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
322 uint32_t level, uint32_t want)
323 {
324 #ifdef CONFIG_USER_ONLY
325 return page_check_range(addr, 1, want);
326 #else
327 int prot, excp, mmu_idx;
328 hwaddr phys;
329
330 trace_hppa_tlb_probe(addr, level, want);
331 /* Fail if the requested privilege level is higher than current. */
332 if (level < (env->iaoq_f & 3)) {
333 return 0;
334 }
335
336 mmu_idx = PRIV_P_TO_MMU_IDX(level, env->psw & PSW_P);
337 excp = hppa_get_physical_address(env, addr, mmu_idx, 0, &phys, &prot);
338 if (excp >= 0) {
339 cpu_restore_state(env_cpu(env), GETPC());
340 hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx));
341 if (excp == EXCP_DTLB_MISS) {
342 excp = EXCP_NA_DTLB_MISS;
343 }
344 helper_excp(env, excp);
345 }
346 return (want & prot) != 0;
347 #endif
348 }
349
HELPER(read_interval_timer)350 target_ulong HELPER(read_interval_timer)(void)
351 {
352 #ifdef CONFIG_USER_ONLY
353 /* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist.
354 Just pass through the host cpu clock ticks. */
355 return cpu_get_host_ticks();
356 #else
357 /* In system mode we have access to a decent high-resolution clock.
358 In order to make OS-level time accounting work with the cr16,
359 present it with a well-timed clock fixed at 250MHz. */
360 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2;
361 #endif
362 }
363
HELPER(hadd_ss)364 uint64_t HELPER(hadd_ss)(uint64_t r1, uint64_t r2)
365 {
366 uint64_t ret = 0;
367
368 for (int i = 0; i < 64; i += 16) {
369 int f1 = sextract64(r1, i, 16);
370 int f2 = sextract64(r2, i, 16);
371 int fr = f1 + f2;
372
373 fr = MIN(fr, INT16_MAX);
374 fr = MAX(fr, INT16_MIN);
375 ret = deposit64(ret, i, 16, fr);
376 }
377 return ret;
378 }
379
HELPER(hadd_us)380 uint64_t HELPER(hadd_us)(uint64_t r1, uint64_t r2)
381 {
382 uint64_t ret = 0;
383
384 for (int i = 0; i < 64; i += 16) {
385 int f1 = extract64(r1, i, 16);
386 int f2 = sextract64(r2, i, 16);
387 int fr = f1 + f2;
388
389 fr = MIN(fr, UINT16_MAX);
390 fr = MAX(fr, 0);
391 ret = deposit64(ret, i, 16, fr);
392 }
393 return ret;
394 }
395
HELPER(havg)396 uint64_t HELPER(havg)(uint64_t r1, uint64_t r2)
397 {
398 uint64_t ret = 0;
399
400 for (int i = 0; i < 64; i += 16) {
401 int f1 = extract64(r1, i, 16);
402 int f2 = extract64(r2, i, 16);
403 int fr = f1 + f2;
404
405 ret = deposit64(ret, i, 16, (fr >> 1) | (fr & 1));
406 }
407 return ret;
408 }
409
HELPER(hsub_ss)410 uint64_t HELPER(hsub_ss)(uint64_t r1, uint64_t r2)
411 {
412 uint64_t ret = 0;
413
414 for (int i = 0; i < 64; i += 16) {
415 int f1 = sextract64(r1, i, 16);
416 int f2 = sextract64(r2, i, 16);
417 int fr = f1 - f2;
418
419 fr = MIN(fr, INT16_MAX);
420 fr = MAX(fr, INT16_MIN);
421 ret = deposit64(ret, i, 16, fr);
422 }
423 return ret;
424 }
425
HELPER(hsub_us)426 uint64_t HELPER(hsub_us)(uint64_t r1, uint64_t r2)
427 {
428 uint64_t ret = 0;
429
430 for (int i = 0; i < 64; i += 16) {
431 int f1 = extract64(r1, i, 16);
432 int f2 = sextract64(r2, i, 16);
433 int fr = f1 - f2;
434
435 fr = MIN(fr, UINT16_MAX);
436 fr = MAX(fr, 0);
437 ret = deposit64(ret, i, 16, fr);
438 }
439 return ret;
440 }
441
HELPER(hshladd)442 uint64_t HELPER(hshladd)(uint64_t r1, uint64_t r2, uint32_t sh)
443 {
444 uint64_t ret = 0;
445
446 for (int i = 0; i < 64; i += 16) {
447 int f1 = sextract64(r1, i, 16);
448 int f2 = sextract64(r2, i, 16);
449 int fr = (f1 << sh) + f2;
450
451 fr = MIN(fr, INT16_MAX);
452 fr = MAX(fr, INT16_MIN);
453 ret = deposit64(ret, i, 16, fr);
454 }
455 return ret;
456 }
457
HELPER(hshradd)458 uint64_t HELPER(hshradd)(uint64_t r1, uint64_t r2, uint32_t sh)
459 {
460 uint64_t ret = 0;
461
462 for (int i = 0; i < 64; i += 16) {
463 int f1 = sextract64(r1, i, 16);
464 int f2 = sextract64(r2, i, 16);
465 int fr = (f1 >> sh) + f2;
466
467 fr = MIN(fr, INT16_MAX);
468 fr = MAX(fr, INT16_MIN);
469 ret = deposit64(ret, i, 16, fr);
470 }
471 return ret;
472 }
473