xref: /openbmc/qemu/target/hppa/op_helper.c (revision fe1a3ace13a8b53fc20c74fb7e3337f754396e6b)
1 /*
2  * Helpers for HPPA instructions.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "accel/tcg/cpu-ldst.h"
26 #include "accel/tcg/probe.h"
27 #include "qemu/timer.h"
28 #include "trace.h"
29 #ifdef CONFIG_USER_ONLY
30 #include "user/page-protection.h"
31 #endif
32 
33 G_NORETURN void HELPER(excp)(CPUHPPAState *env, int excp)
34 {
35     CPUState *cs = env_cpu(env);
36 
37     cs->exception_index = excp;
38     cpu_loop_exit(cs);
39 }
40 
41 G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra)
42 {
43     CPUState *cs = env_cpu(env);
44 
45     cs->exception_index = excp;
46     cpu_loop_exit_restore(cs, ra);
47 }
48 
49 static void atomic_store_mask32(CPUHPPAState *env, target_ulong addr,
50                                 uint32_t val, uint32_t mask, uintptr_t ra)
51 {
52     int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
53     uint32_t old, new, cmp, *haddr;
54     void *vaddr;
55 
56     vaddr = probe_access(env, addr, 3, MMU_DATA_STORE, mmu_idx, ra);
57     if (vaddr == NULL) {
58         cpu_loop_exit_atomic(env_cpu(env), ra);
59     }
60     haddr = (uint32_t *)((uintptr_t)vaddr & -4);
61     mask = addr & 1 ? 0x00ffffffu : 0xffffff00u;
62 
63     old = *haddr;
64     while (1) {
65         new = be32_to_cpu((cpu_to_be32(old) & ~mask) | (val & mask));
66         cmp = qatomic_cmpxchg(haddr, old, new);
67         if (cmp == old) {
68             return;
69         }
70         old = cmp;
71     }
72 }
73 
74 static void atomic_store_mask64(CPUHPPAState *env, target_ulong addr,
75                                 uint64_t val, uint64_t mask,
76                                 int size, uintptr_t ra)
77 {
78 #ifdef CONFIG_ATOMIC64
79     int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
80     uint64_t old, new, cmp, *haddr;
81     void *vaddr;
82 
83     vaddr = probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, ra);
84     if (vaddr == NULL) {
85         cpu_loop_exit_atomic(env_cpu(env), ra);
86     }
87     haddr = (uint64_t *)((uintptr_t)vaddr & -8);
88 
89     old = *haddr;
90     while (1) {
91         new = be32_to_cpu((cpu_to_be32(old) & ~mask) | (val & mask));
92         cmp = qatomic_cmpxchg__nocheck(haddr, old, new);
93         if (cmp == old) {
94             return;
95         }
96         old = cmp;
97     }
98 #else
99     cpu_loop_exit_atomic(env_cpu(env), ra);
100 #endif
101 }
102 
103 static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ulong val,
104                       bool parallel, uintptr_t ra)
105 {
106     switch (addr & 3) {
107     case 3:
108         cpu_stb_data_ra(env, addr, val, ra);
109         break;
110     case 2:
111         cpu_stw_data_ra(env, addr, val, ra);
112         break;
113     case 1:
114         /* The 3 byte store must appear atomic.  */
115         if (parallel) {
116             atomic_store_mask32(env, addr, val, 0x00ffffffu, ra);
117         } else {
118             cpu_stb_data_ra(env, addr, val >> 16, ra);
119             cpu_stw_data_ra(env, addr + 1, val, ra);
120         }
121         break;
122     default:
123         cpu_stl_data_ra(env, addr, val, ra);
124         break;
125     }
126 }
127 
128 static void do_stdby_b(CPUHPPAState *env, target_ulong addr, uint64_t val,
129                        bool parallel, uintptr_t ra)
130 {
131     switch (addr & 7) {
132     case 7:
133         cpu_stb_data_ra(env, addr, val, ra);
134         break;
135     case 6:
136         cpu_stw_data_ra(env, addr, val, ra);
137         break;
138     case 5:
139         /* The 3 byte store must appear atomic.  */
140         if (parallel) {
141             atomic_store_mask32(env, addr, val, 0x00ffffffu, ra);
142         } else {
143             cpu_stb_data_ra(env, addr, val >> 16, ra);
144             cpu_stw_data_ra(env, addr + 1, val, ra);
145         }
146         break;
147     case 4:
148         cpu_stl_data_ra(env, addr, val, ra);
149         break;
150     case 3:
151         /* The 5 byte store must appear atomic.  */
152         if (parallel) {
153             atomic_store_mask64(env, addr, val, 0x000000ffffffffffull, 5, ra);
154         } else {
155             cpu_stb_data_ra(env, addr, val >> 32, ra);
156             cpu_stl_data_ra(env, addr + 1, val, ra);
157         }
158         break;
159     case 2:
160         /* The 6 byte store must appear atomic.  */
161         if (parallel) {
162             atomic_store_mask64(env, addr, val, 0x0000ffffffffffffull, 6, ra);
163         } else {
164             cpu_stw_data_ra(env, addr, val >> 32, ra);
165             cpu_stl_data_ra(env, addr + 2, val, ra);
166         }
167         break;
168     case 1:
169         /* The 7 byte store must appear atomic.  */
170         if (parallel) {
171             atomic_store_mask64(env, addr, val, 0x00ffffffffffffffull, 7, ra);
172         } else {
173             cpu_stb_data_ra(env, addr, val >> 48, ra);
174             cpu_stw_data_ra(env, addr + 1, val >> 32, ra);
175             cpu_stl_data_ra(env, addr + 3, val, ra);
176         }
177         break;
178     default:
179         cpu_stq_data_ra(env, addr, val, ra);
180         break;
181     }
182 }
183 
184 void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
185 {
186     do_stby_b(env, addr, val, false, GETPC());
187 }
188 
189 void HELPER(stby_b_parallel)(CPUHPPAState *env, target_ulong addr,
190                              target_ulong val)
191 {
192     do_stby_b(env, addr, val, true, GETPC());
193 }
194 
195 void HELPER(stdby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
196 {
197     do_stdby_b(env, addr, val, false, GETPC());
198 }
199 
200 void HELPER(stdby_b_parallel)(CPUHPPAState *env, target_ulong addr,
201                               target_ulong val)
202 {
203     do_stdby_b(env, addr, val, true, GETPC());
204 }
205 
206 static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ulong val,
207                       bool parallel, uintptr_t ra)
208 {
209     switch (addr & 3) {
210     case 3:
211         /* The 3 byte store must appear atomic.  */
212         if (parallel) {
213             atomic_store_mask32(env, addr - 3, val, 0xffffff00u, ra);
214         } else {
215             cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
216             cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
217         }
218         break;
219     case 2:
220         cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
221         break;
222     case 1:
223         cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
224         break;
225     default:
226         /* Nothing is stored, but protection is checked and the
227            cacheline is marked dirty.  */
228         probe_write(env, addr, 0, cpu_mmu_index(env_cpu(env), 0), ra);
229         break;
230     }
231 }
232 
233 static void do_stdby_e(CPUHPPAState *env, target_ulong addr, uint64_t val,
234                        bool parallel, uintptr_t ra)
235 {
236     switch (addr & 7) {
237     case 7:
238         /* The 7 byte store must appear atomic.  */
239         if (parallel) {
240             atomic_store_mask64(env, addr - 7, val,
241                                 0xffffffffffffff00ull, 7, ra);
242         } else {
243             cpu_stl_data_ra(env, addr - 7, val >> 32, ra);
244             cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
245             cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
246         }
247         break;
248     case 6:
249         /* The 6 byte store must appear atomic.  */
250         if (parallel) {
251             atomic_store_mask64(env, addr - 6, val,
252                                 0xffffffffffff0000ull, 6, ra);
253         } else {
254             cpu_stl_data_ra(env, addr - 6, val >> 32, ra);
255             cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
256         }
257         break;
258     case 5:
259         /* The 5 byte store must appear atomic.  */
260         if (parallel) {
261             atomic_store_mask64(env, addr - 5, val,
262                                 0xffffffffff000000ull, 5, ra);
263         } else {
264             cpu_stl_data_ra(env, addr - 5, val >> 32, ra);
265             cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
266         }
267         break;
268     case 4:
269         cpu_stl_data_ra(env, addr - 4, val >> 32, ra);
270         break;
271     case 3:
272         /* The 3 byte store must appear atomic.  */
273         if (parallel) {
274             atomic_store_mask32(env, addr - 3, val >> 32, 0xffffff00u, ra);
275         } else {
276             cpu_stw_data_ra(env, addr - 3, val >> 48, ra);
277             cpu_stb_data_ra(env, addr - 1, val >> 40, ra);
278         }
279         break;
280     case 2:
281         cpu_stw_data_ra(env, addr - 2, val >> 48, ra);
282         break;
283     case 1:
284         cpu_stb_data_ra(env, addr - 1, val >> 56, ra);
285         break;
286     default:
287         /* Nothing is stored, but protection is checked and the
288            cacheline is marked dirty.  */
289         probe_write(env, addr, 0, cpu_mmu_index(env_cpu(env), 0), ra);
290         break;
291     }
292 }
293 
294 void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
295 {
296     do_stby_e(env, addr, val, false, GETPC());
297 }
298 
299 void HELPER(stby_e_parallel)(CPUHPPAState *env, target_ulong addr,
300                              target_ulong val)
301 {
302     do_stby_e(env, addr, val, true, GETPC());
303 }
304 
305 void HELPER(stdby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
306 {
307     do_stdby_e(env, addr, val, false, GETPC());
308 }
309 
310 void HELPER(stdby_e_parallel)(CPUHPPAState *env, target_ulong addr,
311                               target_ulong val)
312 {
313     do_stdby_e(env, addr, val, true, GETPC());
314 }
315 
316 void HELPER(ldc_check)(target_ulong addr)
317 {
318     if (unlikely(addr & 0xf)) {
319         qemu_log_mask(LOG_GUEST_ERROR,
320                       "Undefined ldc to unaligned address mod 16: "
321                       TARGET_FMT_lx "\n", addr);
322     }
323 }
324 
325 target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
326                           uint32_t level, uint32_t want)
327 {
328 #ifdef CONFIG_USER_ONLY
329     return page_check_range(addr, 1, want);
330 #else
331     int prot, excp, mmu_idx;
332     hwaddr phys;
333 
334     trace_hppa_tlb_probe(addr, level, want);
335     /* Fail if the requested privilege level is higher than current.  */
336     if (level < (env->iaoq_f & 3)) {
337         return 0;
338     }
339 
340     mmu_idx = PRIV_P_TO_MMU_IDX(level, env->psw & PSW_P);
341     excp = hppa_get_physical_address(env, addr, mmu_idx, 0, 0, &phys, &prot);
342     if (excp >= 0) {
343         cpu_restore_state(env_cpu(env), GETPC());
344         hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx));
345         if (excp == EXCP_DTLB_MISS) {
346             excp = EXCP_NA_DTLB_MISS;
347         }
348         helper_excp(env, excp);
349     }
350     return (want & prot) != 0;
351 #endif
352 }
353 
354 target_ulong HELPER(read_interval_timer)(void)
355 {
356 #ifdef CONFIG_USER_ONLY
357     /* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist.
358        Just pass through the host cpu clock ticks.  */
359     return cpu_get_host_ticks();
360 #else
361     /* In system mode we have access to a decent high-resolution clock.
362        In order to make OS-level time accounting work with the cr16,
363        present it with a well-timed clock fixed at 250MHz.  */
364     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2;
365 #endif
366 }
367 
368 uint64_t HELPER(hadd_ss)(uint64_t r1, uint64_t r2)
369 {
370     uint64_t ret = 0;
371 
372     for (int i = 0; i < 64; i += 16) {
373         int f1 = sextract64(r1, i, 16);
374         int f2 = sextract64(r2, i, 16);
375         int fr = f1 + f2;
376 
377         fr = MIN(fr, INT16_MAX);
378         fr = MAX(fr, INT16_MIN);
379         ret = deposit64(ret, i, 16, fr);
380     }
381     return ret;
382 }
383 
384 uint64_t HELPER(hadd_us)(uint64_t r1, uint64_t r2)
385 {
386     uint64_t ret = 0;
387 
388     for (int i = 0; i < 64; i += 16) {
389         int f1 = extract64(r1, i, 16);
390         int f2 = sextract64(r2, i, 16);
391         int fr = f1 + f2;
392 
393         fr = MIN(fr, UINT16_MAX);
394         fr = MAX(fr, 0);
395         ret = deposit64(ret, i, 16, fr);
396     }
397     return ret;
398 }
399 
400 uint64_t HELPER(havg)(uint64_t r1, uint64_t r2)
401 {
402     uint64_t ret = 0;
403 
404     for (int i = 0; i < 64; i += 16) {
405         int f1 = extract64(r1, i, 16);
406         int f2 = extract64(r2, i, 16);
407         int fr = f1 + f2;
408 
409         ret = deposit64(ret, i, 16, (fr >> 1) | (fr & 1));
410     }
411     return ret;
412 }
413 
414 uint64_t HELPER(hsub_ss)(uint64_t r1, uint64_t r2)
415 {
416     uint64_t ret = 0;
417 
418     for (int i = 0; i < 64; i += 16) {
419         int f1 = sextract64(r1, i, 16);
420         int f2 = sextract64(r2, i, 16);
421         int fr = f1 - f2;
422 
423         fr = MIN(fr, INT16_MAX);
424         fr = MAX(fr, INT16_MIN);
425         ret = deposit64(ret, i, 16, fr);
426     }
427     return ret;
428 }
429 
430 uint64_t HELPER(hsub_us)(uint64_t r1, uint64_t r2)
431 {
432     uint64_t ret = 0;
433 
434     for (int i = 0; i < 64; i += 16) {
435         int f1 = extract64(r1, i, 16);
436         int f2 = sextract64(r2, i, 16);
437         int fr = f1 - f2;
438 
439         fr = MIN(fr, UINT16_MAX);
440         fr = MAX(fr, 0);
441         ret = deposit64(ret, i, 16, fr);
442     }
443     return ret;
444 }
445 
446 uint64_t HELPER(hshladd)(uint64_t r1, uint64_t r2, uint32_t sh)
447 {
448     uint64_t ret = 0;
449 
450     for (int i = 0; i < 64; i += 16) {
451         int f1 = sextract64(r1, i, 16);
452         int f2 = sextract64(r2, i, 16);
453         int fr = (f1 << sh) + f2;
454 
455         fr = MIN(fr, INT16_MAX);
456         fr = MAX(fr, INT16_MIN);
457         ret = deposit64(ret, i, 16, fr);
458     }
459     return ret;
460 }
461 
462 uint64_t HELPER(hshradd)(uint64_t r1, uint64_t r2, uint32_t sh)
463 {
464     uint64_t ret = 0;
465 
466     for (int i = 0; i < 64; i += 16) {
467         int f1 = sextract64(r1, i, 16);
468         int f2 = sextract64(r2, i, 16);
469         int fr = (f1 >> sh) + f2;
470 
471         fr = MIN(fr, INT16_MAX);
472         fr = MAX(fr, INT16_MIN);
473         ret = deposit64(ret, i, 16, fr);
474     }
475     return ret;
476 }
477