xref: /openbmc/qemu/target/hppa/op_helper.c (revision 5e6aceb2)
1 /*
2  * Helpers for HPPA instructions.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "exec/cpu_ldst.h"
26 #include "qemu/timer.h"
27 #include "trace.h"
28 
29 G_NORETURN void HELPER(excp)(CPUHPPAState *env, int excp)
30 {
31     CPUState *cs = env_cpu(env);
32 
33     cs->exception_index = excp;
34     cpu_loop_exit(cs);
35 }
36 
37 G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra)
38 {
39     CPUState *cs = env_cpu(env);
40 
41     cs->exception_index = excp;
42     cpu_loop_exit_restore(cs, ra);
43 }
44 
45 void HELPER(tsv)(CPUHPPAState *env, target_ulong cond)
46 {
47     if (unlikely((target_long)cond < 0)) {
48         hppa_dynamic_excp(env, EXCP_OVERFLOW, GETPC());
49     }
50 }
51 
52 void HELPER(tcond)(CPUHPPAState *env, target_ulong cond)
53 {
54     if (unlikely(cond)) {
55         hppa_dynamic_excp(env, EXCP_COND, GETPC());
56     }
57 }
58 
59 static void atomic_store_mask32(CPUHPPAState *env, target_ulong addr,
60                                 uint32_t val, uint32_t mask, uintptr_t ra)
61 {
62     int mmu_idx = cpu_mmu_index(env, 0);
63     uint32_t old, new, cmp, *haddr;
64     void *vaddr;
65 
66     vaddr = probe_access(env, addr, 3, MMU_DATA_STORE, mmu_idx, ra);
67     if (vaddr == NULL) {
68         cpu_loop_exit_atomic(env_cpu(env), ra);
69     }
70     haddr = (uint32_t *)((uintptr_t)vaddr & -4);
71     mask = addr & 1 ? 0x00ffffffu : 0xffffff00u;
72 
73     old = *haddr;
74     while (1) {
75         new = be32_to_cpu((cpu_to_be32(old) & ~mask) | (val & mask));
76         cmp = qatomic_cmpxchg(haddr, old, new);
77         if (cmp == old) {
78             return;
79         }
80         old = cmp;
81     }
82 }
83 
84 static void atomic_store_mask64(CPUHPPAState *env, target_ulong addr,
85                                 uint64_t val, uint64_t mask,
86                                 int size, uintptr_t ra)
87 {
88 #ifdef CONFIG_ATOMIC64
89     int mmu_idx = cpu_mmu_index(env, 0);
90     uint64_t old, new, cmp, *haddr;
91     void *vaddr;
92 
93     vaddr = probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, ra);
94     if (vaddr == NULL) {
95         cpu_loop_exit_atomic(env_cpu(env), ra);
96     }
97     haddr = (uint64_t *)((uintptr_t)vaddr & -8);
98 
99     old = *haddr;
100     while (1) {
101         new = be32_to_cpu((cpu_to_be32(old) & ~mask) | (val & mask));
102         cmp = qatomic_cmpxchg__nocheck(haddr, old, new);
103         if (cmp == old) {
104             return;
105         }
106         old = cmp;
107     }
108 #else
109     cpu_loop_exit_atomic(env_cpu(env), ra);
110 #endif
111 }
112 
113 static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ulong val,
114                       bool parallel, uintptr_t ra)
115 {
116     switch (addr & 3) {
117     case 3:
118         cpu_stb_data_ra(env, addr, val, ra);
119         break;
120     case 2:
121         cpu_stw_data_ra(env, addr, val, ra);
122         break;
123     case 1:
124         /* The 3 byte store must appear atomic.  */
125         if (parallel) {
126             atomic_store_mask32(env, addr, val, 0x00ffffffu, ra);
127         } else {
128             cpu_stb_data_ra(env, addr, val >> 16, ra);
129             cpu_stw_data_ra(env, addr + 1, val, ra);
130         }
131         break;
132     default:
133         cpu_stl_data_ra(env, addr, val, ra);
134         break;
135     }
136 }
137 
138 static void do_stdby_b(CPUHPPAState *env, target_ulong addr, uint64_t val,
139                        bool parallel, uintptr_t ra)
140 {
141     switch (addr & 7) {
142     case 7:
143         cpu_stb_data_ra(env, addr, val, ra);
144         break;
145     case 6:
146         cpu_stw_data_ra(env, addr, val, ra);
147         break;
148     case 5:
149         /* The 3 byte store must appear atomic.  */
150         if (parallel) {
151             atomic_store_mask32(env, addr, val, 0x00ffffffu, ra);
152         } else {
153             cpu_stb_data_ra(env, addr, val >> 16, ra);
154             cpu_stw_data_ra(env, addr + 1, val, ra);
155         }
156         break;
157     case 4:
158         cpu_stl_data_ra(env, addr, val, ra);
159         break;
160     case 3:
161         /* The 5 byte store must appear atomic.  */
162         if (parallel) {
163             atomic_store_mask64(env, addr, val, 0x000000ffffffffffull, 5, ra);
164         } else {
165             cpu_stb_data_ra(env, addr, val >> 32, ra);
166             cpu_stl_data_ra(env, addr + 1, val, ra);
167         }
168         break;
169     case 2:
170         /* The 6 byte store must appear atomic.  */
171         if (parallel) {
172             atomic_store_mask64(env, addr, val, 0x0000ffffffffffffull, 6, ra);
173         } else {
174             cpu_stw_data_ra(env, addr, val >> 32, ra);
175             cpu_stl_data_ra(env, addr + 2, val, ra);
176         }
177         break;
178     case 1:
179         /* The 7 byte store must appear atomic.  */
180         if (parallel) {
181             atomic_store_mask64(env, addr, val, 0x00ffffffffffffffull, 7, ra);
182         } else {
183             cpu_stb_data_ra(env, addr, val >> 48, ra);
184             cpu_stw_data_ra(env, addr + 1, val >> 32, ra);
185             cpu_stl_data_ra(env, addr + 3, val, ra);
186         }
187         break;
188     default:
189         cpu_stq_data_ra(env, addr, val, ra);
190         break;
191     }
192 }
193 
194 void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
195 {
196     do_stby_b(env, addr, val, false, GETPC());
197 }
198 
199 void HELPER(stby_b_parallel)(CPUHPPAState *env, target_ulong addr,
200                              target_ulong val)
201 {
202     do_stby_b(env, addr, val, true, GETPC());
203 }
204 
205 void HELPER(stdby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
206 {
207     do_stdby_b(env, addr, val, false, GETPC());
208 }
209 
210 void HELPER(stdby_b_parallel)(CPUHPPAState *env, target_ulong addr,
211                               target_ulong val)
212 {
213     do_stdby_b(env, addr, val, true, GETPC());
214 }
215 
216 static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ulong val,
217                       bool parallel, uintptr_t ra)
218 {
219     switch (addr & 3) {
220     case 3:
221         /* The 3 byte store must appear atomic.  */
222         if (parallel) {
223             atomic_store_mask32(env, addr - 3, val, 0xffffff00u, ra);
224         } else {
225             cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
226             cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
227         }
228         break;
229     case 2:
230         cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
231         break;
232     case 1:
233         cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
234         break;
235     default:
236         /* Nothing is stored, but protection is checked and the
237            cacheline is marked dirty.  */
238         probe_write(env, addr, 0, cpu_mmu_index(env, 0), ra);
239         break;
240     }
241 }
242 
243 static void do_stdby_e(CPUHPPAState *env, target_ulong addr, uint64_t val,
244                        bool parallel, uintptr_t ra)
245 {
246     switch (addr & 7) {
247     case 7:
248         /* The 7 byte store must appear atomic.  */
249         if (parallel) {
250             atomic_store_mask64(env, addr - 7, val,
251                                 0xffffffffffffff00ull, 7, ra);
252         } else {
253             cpu_stl_data_ra(env, addr - 7, val >> 32, ra);
254             cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
255             cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
256         }
257         break;
258     case 6:
259         /* The 6 byte store must appear atomic.  */
260         if (parallel) {
261             atomic_store_mask64(env, addr - 6, val,
262                                 0xffffffffffff0000ull, 6, ra);
263         } else {
264             cpu_stl_data_ra(env, addr - 6, val >> 32, ra);
265             cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
266         }
267         break;
268     case 5:
269         /* The 5 byte store must appear atomic.  */
270         if (parallel) {
271             atomic_store_mask64(env, addr - 5, val,
272                                 0xffffffffff000000ull, 5, ra);
273         } else {
274             cpu_stl_data_ra(env, addr - 5, val >> 32, ra);
275             cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
276         }
277         break;
278     case 4:
279         cpu_stl_data_ra(env, addr - 4, val >> 32, ra);
280         break;
281     case 3:
282         /* The 3 byte store must appear atomic.  */
283         if (parallel) {
284             atomic_store_mask32(env, addr - 3, val, 0xffffff00u, ra);
285         } else {
286             cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
287             cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
288         }
289         break;
290     case 2:
291         cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
292         break;
293     case 1:
294         cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
295         break;
296     default:
297         /* Nothing is stored, but protection is checked and the
298            cacheline is marked dirty.  */
299         probe_write(env, addr, 0, cpu_mmu_index(env, 0), ra);
300         break;
301     }
302 }
303 
304 void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
305 {
306     do_stby_e(env, addr, val, false, GETPC());
307 }
308 
309 void HELPER(stby_e_parallel)(CPUHPPAState *env, target_ulong addr,
310                              target_ulong val)
311 {
312     do_stby_e(env, addr, val, true, GETPC());
313 }
314 
315 void HELPER(stdby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
316 {
317     do_stdby_e(env, addr, val, false, GETPC());
318 }
319 
320 void HELPER(stdby_e_parallel)(CPUHPPAState *env, target_ulong addr,
321                               target_ulong val)
322 {
323     do_stdby_e(env, addr, val, true, GETPC());
324 }
325 
326 void HELPER(ldc_check)(target_ulong addr)
327 {
328     if (unlikely(addr & 0xf)) {
329         qemu_log_mask(LOG_GUEST_ERROR,
330                       "Undefined ldc to unaligned address mod 16: "
331                       TARGET_FMT_lx "\n", addr);
332     }
333 }
334 
335 target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
336                           uint32_t level, uint32_t want)
337 {
338 #ifdef CONFIG_USER_ONLY
339     return page_check_range(addr, 1, want);
340 #else
341     int prot, excp, mmu_idx;
342     hwaddr phys;
343 
344     trace_hppa_tlb_probe(addr, level, want);
345     /* Fail if the requested privilege level is higher than current.  */
346     if (level < (env->iaoq_f & 3)) {
347         return 0;
348     }
349 
350     mmu_idx = PRIV_P_TO_MMU_IDX(level, env->psw & PSW_P);
351     excp = hppa_get_physical_address(env, addr, mmu_idx, 0, &phys,
352                                      &prot, NULL);
353     if (excp >= 0) {
354         if (env->psw & PSW_Q) {
355             /* ??? Needs tweaking for hppa64.  */
356             env->cr[CR_IOR] = addr;
357             env->cr[CR_ISR] = addr >> 32;
358         }
359         if (excp == EXCP_DTLB_MISS) {
360             excp = EXCP_NA_DTLB_MISS;
361         }
362         hppa_dynamic_excp(env, excp, GETPC());
363     }
364     return (want & prot) != 0;
365 #endif
366 }
367 
368 target_ulong HELPER(read_interval_timer)(void)
369 {
370 #ifdef CONFIG_USER_ONLY
371     /* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist.
372        Just pass through the host cpu clock ticks.  */
373     return cpu_get_host_ticks();
374 #else
375     /* In system mode we have access to a decent high-resolution clock.
376        In order to make OS-level time accounting work with the cr16,
377        present it with a well-timed clock fixed at 250MHz.  */
378     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2;
379 #endif
380 }
381 
382 uint64_t HELPER(hadd_ss)(uint64_t r1, uint64_t r2)
383 {
384     uint64_t ret = 0;
385 
386     for (int i = 0; i < 64; i += 16) {
387         int f1 = sextract64(r1, i, 16);
388         int f2 = sextract64(r2, i, 16);
389         int fr = f1 + f2;
390 
391         fr = MIN(fr, INT16_MAX);
392         fr = MAX(fr, INT16_MIN);
393         ret = deposit64(ret, i, 16, fr);
394     }
395     return ret;
396 }
397 
398 uint64_t HELPER(hadd_us)(uint64_t r1, uint64_t r2)
399 {
400     uint64_t ret = 0;
401 
402     for (int i = 0; i < 64; i += 16) {
403         int f1 = extract64(r1, i, 16);
404         int f2 = sextract64(r2, i, 16);
405         int fr = f1 + f2;
406 
407         fr = MIN(fr, UINT16_MAX);
408         fr = MAX(fr, 0);
409         ret = deposit64(ret, i, 16, fr);
410     }
411     return ret;
412 }
413 
414 uint64_t HELPER(havg)(uint64_t r1, uint64_t r2)
415 {
416     uint64_t ret = 0;
417 
418     for (int i = 0; i < 64; i += 16) {
419         int f1 = extract64(r1, i, 16);
420         int f2 = extract64(r2, i, 16);
421         int fr = f1 + f2;
422 
423         ret = deposit64(ret, i, 16, (fr >> 1) | (fr & 1));
424     }
425     return ret;
426 }
427 
428 uint64_t HELPER(hsub_ss)(uint64_t r1, uint64_t r2)
429 {
430     uint64_t ret = 0;
431 
432     for (int i = 0; i < 64; i += 16) {
433         int f1 = sextract64(r1, i, 16);
434         int f2 = sextract64(r2, i, 16);
435         int fr = f1 - f2;
436 
437         fr = MIN(fr, INT16_MAX);
438         fr = MAX(fr, INT16_MIN);
439         ret = deposit64(ret, i, 16, fr);
440     }
441     return ret;
442 }
443 
444 uint64_t HELPER(hsub_us)(uint64_t r1, uint64_t r2)
445 {
446     uint64_t ret = 0;
447 
448     for (int i = 0; i < 64; i += 16) {
449         int f1 = extract64(r1, i, 16);
450         int f2 = sextract64(r2, i, 16);
451         int fr = f1 - f2;
452 
453         fr = MIN(fr, UINT16_MAX);
454         fr = MAX(fr, 0);
455         ret = deposit64(ret, i, 16, fr);
456     }
457     return ret;
458 }
459 
460 uint64_t HELPER(hshladd)(uint64_t r1, uint64_t r2, uint32_t sh)
461 {
462     uint64_t ret = 0;
463 
464     for (int i = 0; i < 64; i += 16) {
465         int f1 = sextract64(r1, i, 16);
466         int f2 = sextract64(r2, i, 16);
467         int fr = (f1 << sh) + f2;
468 
469         fr = MIN(fr, INT16_MAX);
470         fr = MAX(fr, INT16_MIN);
471         ret = deposit64(ret, i, 16, fr);
472     }
473     return ret;
474 }
475 
476 uint64_t HELPER(hshradd)(uint64_t r1, uint64_t r2, uint32_t sh)
477 {
478     uint64_t ret = 0;
479 
480     for (int i = 0; i < 64; i += 16) {
481         int f1 = sextract64(r1, i, 16);
482         int f2 = sextract64(r2, i, 16);
483         int fr = (f1 >> sh) + f2;
484 
485         fr = MIN(fr, INT16_MAX);
486         fr = MAX(fr, INT16_MIN);
487         ret = deposit64(ret, i, 16, fr);
488     }
489     return ret;
490 }
491