xref: /openbmc/qemu/target/alpha/helper.c (revision 4889d9666076d8164171d1208ffb8b2be10463f6)
1 /*
2  *  Alpha emulation cpu helpers for qemu.
3  *
4  *  Copyright (c) 2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/cputlb.h"
24 #include "exec/page-protection.h"
25 #include "exec/target_page.h"
26 #include "fpu/softfloat-types.h"
27 #include "exec/helper-proto.h"
28 #include "qemu/qemu-print.h"
29 #include "system/memory.h"
30 #include "qemu/plugin.h"
31 
32 
33 #define CONVERT_BIT(X, SRC, DST) \
34     (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
35 
36 uint64_t cpu_alpha_load_fpcr(CPUAlphaState *env)
37 {
38     return (uint64_t)env->fpcr << 32;
39 }
40 
41 void cpu_alpha_store_fpcr(CPUAlphaState *env, uint64_t val)
42 {
43     static const uint8_t rm_map[] = {
44         [FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT] = float_round_nearest_even,
45         [FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT] = float_round_to_zero,
46         [FPCR_DYN_MINUS >> FPCR_DYN_SHIFT] = float_round_down,
47         [FPCR_DYN_PLUS >> FPCR_DYN_SHIFT] = float_round_up,
48     };
49 
50     uint32_t fpcr = val >> 32;
51     uint32_t t = 0;
52 
53     /* Record the raw value before adjusting for linux-user.  */
54     env->fpcr = fpcr;
55 
56 #ifdef CONFIG_USER_ONLY
57     /*
58      * Override some of these bits with the contents of ENV->SWCR.
59      * In system mode, some of these would trap to the kernel, at
60      * which point the kernel's handler would emulate and apply
61      * the software exception mask.
62      */
63     uint32_t soft_fpcr = alpha_ieee_swcr_to_fpcr(env->swcr) >> 32;
64     fpcr |= soft_fpcr & (FPCR_STATUS_MASK | FPCR_DNZ);
65 
66     /*
67      * The IOV exception is disabled by the kernel with SWCR_TRAP_ENABLE_INV,
68      * which got mapped by alpha_ieee_swcr_to_fpcr to FPCR_INVD.
69      * Add FPCR_IOV to fpcr_exc_enable so that it is handled identically.
70      */
71     t |= CONVERT_BIT(soft_fpcr, FPCR_INVD, FPCR_IOV);
72 #endif
73 
74     t |= CONVERT_BIT(fpcr, FPCR_INED, FPCR_INE);
75     t |= CONVERT_BIT(fpcr, FPCR_UNFD, FPCR_UNF);
76     t |= CONVERT_BIT(fpcr, FPCR_OVFD, FPCR_OVF);
77     t |= CONVERT_BIT(fpcr, FPCR_DZED, FPCR_DZE);
78     t |= CONVERT_BIT(fpcr, FPCR_INVD, FPCR_INV);
79 
80     env->fpcr_exc_enable = ~t & FPCR_STATUS_MASK;
81 
82     env->fpcr_dyn_round = rm_map[(fpcr & FPCR_DYN_MASK) >> FPCR_DYN_SHIFT];
83     env->fp_status.flush_inputs_to_zero = (fpcr & FPCR_DNZ) != 0;
84 
85     t = (fpcr & FPCR_UNFD) && (fpcr & FPCR_UNDZ);
86 #ifdef CONFIG_USER_ONLY
87     t |= (env->swcr & SWCR_MAP_UMZ) != 0;
88 #endif
89     env->fpcr_flush_to_zero = t;
90 }
91 
92 uint64_t helper_load_fpcr(CPUAlphaState *env)
93 {
94     return cpu_alpha_load_fpcr(env);
95 }
96 
97 void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
98 {
99     cpu_alpha_store_fpcr(env, val);
100 }
101 
102 static uint64_t *cpu_alpha_addr_gr(CPUAlphaState *env, unsigned reg)
103 {
104 #ifndef CONFIG_USER_ONLY
105     if (env->flags & ENV_FLAG_PAL_MODE) {
106         if (reg >= 8 && reg <= 14) {
107             return &env->shadow[reg - 8];
108         } else if (reg == 25) {
109             return &env->shadow[7];
110         }
111     }
112 #endif
113     return &env->ir[reg];
114 }
115 
116 uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg)
117 {
118     return *cpu_alpha_addr_gr(env, reg);
119 }
120 
121 void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
122 {
123     *cpu_alpha_addr_gr(env, reg) = val;
124 }
125 
126 #if defined(CONFIG_USER_ONLY)
127 void alpha_cpu_record_sigsegv(CPUState *cs, vaddr address,
128                               MMUAccessType access_type,
129                               bool maperr, uintptr_t retaddr)
130 {
131     CPUAlphaState *env = cpu_env(cs);
132     target_ulong mmcsr, cause;
133 
134     /* Assuming !maperr, infer the missing protection. */
135     switch (access_type) {
136     case MMU_DATA_LOAD:
137         mmcsr = MM_K_FOR;
138         cause = 0;
139         break;
140     case MMU_DATA_STORE:
141         mmcsr = MM_K_FOW;
142         cause = 1;
143         break;
144     case MMU_INST_FETCH:
145         mmcsr = MM_K_FOE;
146         cause = -1;
147         break;
148     default:
149         g_assert_not_reached();
150     }
151     if (maperr) {
152         if (address < BIT_ULL(TARGET_VIRT_ADDR_SPACE_BITS - 1)) {
153             /* Userspace address, therefore page not mapped. */
154             mmcsr = MM_K_TNV;
155         } else {
156             /* Kernel or invalid address. */
157             mmcsr = MM_K_ACV;
158         }
159     }
160 
161     /* Record the arguments that PALcode would give to the kernel. */
162     env->trap_arg0 = address;
163     env->trap_arg1 = mmcsr;
164     env->trap_arg2 = cause;
165 }
166 #else
167 /* Returns the OSF/1 entMM failure indication, or -1 on success.  */
168 static int get_physical_address(CPUAlphaState *env, target_ulong addr,
169                                 int prot_need, int mmu_idx,
170                                 target_ulong *pphys, int *pprot)
171 {
172     CPUState *cs = env_cpu(env);
173     target_long saddr = addr;
174     target_ulong phys = 0;
175     target_ulong L1pte, L2pte, L3pte;
176     target_ulong pt, index;
177     int prot = 0;
178     int ret = MM_K_ACV;
179 
180     /* Handle physical accesses.  */
181     if (mmu_idx == MMU_PHYS_IDX) {
182         phys = addr;
183         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
184         ret = -1;
185         goto exit;
186     }
187 
188     /* Ensure that the virtual address is properly sign-extended from
189        the last implemented virtual address bit.  */
190     if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
191         goto exit;
192     }
193 
194     /* Translate the superpage.  */
195     /* ??? When we do more than emulate Unix PALcode, we'll need to
196        determine which KSEG is actually active.  */
197     if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
198         /* User-space cannot access KSEG addresses.  */
199         if (mmu_idx != MMU_KERNEL_IDX) {
200             goto exit;
201         }
202 
203         /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
204            We would not do this if the 48-bit KSEG is enabled.  */
205         phys = saddr & ((1ull << 40) - 1);
206         phys |= (saddr & (1ull << 40)) << 3;
207 
208         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
209         ret = -1;
210         goto exit;
211     }
212 
213     /* Interpret the page table exactly like PALcode does.  */
214 
215     pt = env->ptbr;
216 
217     /* TODO: rather than using ldq_phys() to read the page table we should
218      * use address_space_ldq() so that we can handle the case when
219      * the page table read gives a bus fault, rather than ignoring it.
220      * For the existing code the zero data that ldq_phys will return for
221      * an access to invalid memory will result in our treating the page
222      * table as invalid, which may even be the right behaviour.
223      */
224 
225     /* L1 page table read.  */
226     index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
227     L1pte = ldq_phys(cs->as, pt + index*8);
228 
229     if (unlikely((L1pte & PTE_VALID) == 0)) {
230         ret = MM_K_TNV;
231         goto exit;
232     }
233     if (unlikely((L1pte & PTE_KRE) == 0)) {
234         goto exit;
235     }
236     pt = L1pte >> 32 << TARGET_PAGE_BITS;
237 
238     /* L2 page table read.  */
239     index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
240     L2pte = ldq_phys(cs->as, pt + index*8);
241 
242     if (unlikely((L2pte & PTE_VALID) == 0)) {
243         ret = MM_K_TNV;
244         goto exit;
245     }
246     if (unlikely((L2pte & PTE_KRE) == 0)) {
247         goto exit;
248     }
249     pt = L2pte >> 32 << TARGET_PAGE_BITS;
250 
251     /* L3 page table read.  */
252     index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
253     L3pte = ldq_phys(cs->as, pt + index*8);
254 
255     phys = L3pte >> 32 << TARGET_PAGE_BITS;
256     if (unlikely((L3pte & PTE_VALID) == 0)) {
257         ret = MM_K_TNV;
258         goto exit;
259     }
260 
261 #if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
262 # error page bits out of date
263 #endif
264 
265     /* Check access violations.  */
266     if (L3pte & (PTE_KRE << mmu_idx)) {
267         prot |= PAGE_READ | PAGE_EXEC;
268     }
269     if (L3pte & (PTE_KWE << mmu_idx)) {
270         prot |= PAGE_WRITE;
271     }
272     if (unlikely((prot & prot_need) == 0 && prot_need)) {
273         goto exit;
274     }
275 
276     /* Check fault-on-operation violations.  */
277     prot &= ~(L3pte >> 1);
278     ret = -1;
279     if (unlikely((prot & prot_need) == 0)) {
280         ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
281                prot_need & PAGE_WRITE ? MM_K_FOW :
282                prot_need & PAGE_READ ? MM_K_FOR : -1);
283     }
284 
285  exit:
286     *pphys = phys;
287     *pprot = prot;
288     return ret;
289 }
290 
291 hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
292 {
293     target_ulong phys;
294     int prot, fail;
295 
296     fail = get_physical_address(cpu_env(cs), addr, 0, 0, &phys, &prot);
297     return (fail >= 0 ? -1 : phys);
298 }
299 
300 bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
301                         MMUAccessType access_type, int mmu_idx,
302                         bool probe, uintptr_t retaddr)
303 {
304     CPUAlphaState *env = cpu_env(cs);
305     target_ulong phys;
306     int prot, fail;
307 
308     fail = get_physical_address(env, addr, 1 << access_type,
309                                 mmu_idx, &phys, &prot);
310     if (unlikely(fail >= 0)) {
311         if (probe) {
312             return false;
313         }
314         cs->exception_index = EXCP_MMFAULT;
315         env->trap_arg0 = addr;
316         env->trap_arg1 = fail;
317         env->trap_arg2 = (access_type == MMU_DATA_LOAD ? 0ull :
318                           access_type == MMU_DATA_STORE ? 1ull :
319                           /* access_type == MMU_INST_FETCH */ -1ull);
320         cpu_loop_exit_restore(cs, retaddr);
321     }
322 
323     tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
324                  prot, mmu_idx, TARGET_PAGE_SIZE);
325     return true;
326 }
327 
328 void alpha_cpu_do_interrupt(CPUState *cs)
329 {
330     CPUAlphaState *env = cpu_env(cs);
331     int i = cs->exception_index;
332     uint64_t last_pc = env->pc;
333 
334     if (qemu_loglevel_mask(CPU_LOG_INT)) {
335         static int count;
336         const char *name = "<unknown>";
337 
338         switch (i) {
339         case EXCP_RESET:
340             name = "reset";
341             break;
342         case EXCP_MCHK:
343             name = "mchk";
344             break;
345         case EXCP_SMP_INTERRUPT:
346             name = "smp_interrupt";
347             break;
348         case EXCP_CLK_INTERRUPT:
349             name = "clk_interrupt";
350             break;
351         case EXCP_DEV_INTERRUPT:
352             name = "dev_interrupt";
353             break;
354         case EXCP_MMFAULT:
355             name = "mmfault";
356             break;
357         case EXCP_UNALIGN:
358             name = "unalign";
359             break;
360         case EXCP_OPCDEC:
361             name = "opcdec";
362             break;
363         case EXCP_ARITH:
364             name = "arith";
365             break;
366         case EXCP_FEN:
367             name = "fen";
368             break;
369         case EXCP_CALL_PAL:
370             name = "call_pal";
371             break;
372         }
373         qemu_log("INT %6d: %s(%#x) cpu=%d pc=%016"
374                  PRIx64 " sp=%016" PRIx64 "\n",
375                  ++count, name, env->error_code, cs->cpu_index,
376                  env->pc, env->ir[IR_SP]);
377     }
378 
379     cs->exception_index = -1;
380 
381     switch (i) {
382     case EXCP_RESET:
383         i = 0x0000;
384         break;
385     case EXCP_MCHK:
386         i = 0x0080;
387         break;
388     case EXCP_SMP_INTERRUPT:
389         i = 0x0100;
390         break;
391     case EXCP_CLK_INTERRUPT:
392         i = 0x0180;
393         break;
394     case EXCP_DEV_INTERRUPT:
395         i = 0x0200;
396         break;
397     case EXCP_MMFAULT:
398         i = 0x0280;
399         break;
400     case EXCP_UNALIGN:
401         i = 0x0300;
402         break;
403     case EXCP_OPCDEC:
404         i = 0x0380;
405         break;
406     case EXCP_ARITH:
407         i = 0x0400;
408         break;
409     case EXCP_FEN:
410         i = 0x0480;
411         break;
412     case EXCP_CALL_PAL:
413         i = env->error_code;
414         /* There are 64 entry points for both privileged and unprivileged,
415            with bit 0x80 indicating unprivileged.  Each entry point gets
416            64 bytes to do its job.  */
417         if (i & 0x80) {
418             i = 0x2000 + (i - 0x80) * 64;
419         } else {
420             i = 0x1000 + i * 64;
421         }
422         break;
423     default:
424         cpu_abort(cs, "Unhandled CPU exception");
425     }
426 
427     /* Remember where the exception happened.  Emulate real hardware in
428        that the low bit of the PC indicates PALmode.  */
429     env->exc_addr = env->pc | (env->flags & ENV_FLAG_PAL_MODE);
430 
431     /* Continue execution at the PALcode entry point.  */
432     env->pc = env->palbr + i;
433 
434     /* Switch to PALmode.  */
435     env->flags |= ENV_FLAG_PAL_MODE;
436 
437     switch (i) {
438     case EXCP_SMP_INTERRUPT:
439     case EXCP_CLK_INTERRUPT:
440     case EXCP_DEV_INTERRUPT:
441         qemu_plugin_vcpu_interrupt_cb(cs, last_pc);
442         break;
443     default:
444         qemu_plugin_vcpu_exception_cb(cs, last_pc);
445         break;
446     }
447 }
448 
449 bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
450 {
451     CPUAlphaState *env = cpu_env(cs);
452     int idx = -1;
453 
454     /* We never take interrupts while in PALmode.  */
455     if (env->flags & ENV_FLAG_PAL_MODE) {
456         return false;
457     }
458 
459     /* Fall through the switch, collecting the highest priority
460        interrupt that isn't masked by the processor status IPL.  */
461     /* ??? This hard-codes the OSF/1 interrupt levels.  */
462     switch ((env->flags >> ENV_FLAG_PS_SHIFT) & PS_INT_MASK) {
463     case 0 ... 3:
464         if (interrupt_request & CPU_INTERRUPT_HARD) {
465             idx = EXCP_DEV_INTERRUPT;
466         }
467         /* FALLTHRU */
468     case 4:
469         if (interrupt_request & CPU_INTERRUPT_TIMER) {
470             idx = EXCP_CLK_INTERRUPT;
471         }
472         /* FALLTHRU */
473     case 5:
474         if (interrupt_request & CPU_INTERRUPT_SMP) {
475             idx = EXCP_SMP_INTERRUPT;
476         }
477         /* FALLTHRU */
478     case 6:
479         if (interrupt_request & CPU_INTERRUPT_MCHK) {
480             idx = EXCP_MCHK;
481         }
482     }
483     if (idx >= 0) {
484         cs->exception_index = idx;
485         env->error_code = 0;
486         alpha_cpu_do_interrupt(cs);
487         return true;
488     }
489     return false;
490 }
491 
492 #endif /* !CONFIG_USER_ONLY */
493 
494 void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags)
495 {
496     static const char linux_reg_names[31][4] = {
497         "v0",  "t0",  "t1", "t2",  "t3", "t4", "t5", "t6",
498         "t7",  "s0",  "s1", "s2",  "s3", "s4", "s5", "fp",
499         "a0",  "a1",  "a2", "a3",  "a4", "a5", "t8", "t9",
500         "t10", "t11", "ra", "t12", "at", "gp", "sp"
501     };
502     CPUAlphaState *env = cpu_env(cs);
503     int i;
504 
505     qemu_fprintf(f, "PC      " TARGET_FMT_lx " PS      %02x\n",
506                  env->pc, extract32(env->flags, ENV_FLAG_PS_SHIFT, 8));
507     for (i = 0; i < 31; i++) {
508         qemu_fprintf(f, "%-8s" TARGET_FMT_lx "%c",
509                      linux_reg_names[i], cpu_alpha_load_gr(env, i),
510                      (i % 3) == 2 ? '\n' : ' ');
511     }
512 
513     qemu_fprintf(f, "lock_a  " TARGET_FMT_lx " lock_v  " TARGET_FMT_lx "\n",
514                  env->lock_addr, env->lock_value);
515 
516     if (flags & CPU_DUMP_FPU) {
517         for (i = 0; i < 31; i++) {
518             qemu_fprintf(f, "f%-7d%016" PRIx64 "%c", i, env->fir[i],
519                          (i % 3) == 2 ? '\n' : ' ');
520         }
521         qemu_fprintf(f, "fpcr    %016" PRIx64 "\n", cpu_alpha_load_fpcr(env));
522     }
523     qemu_fprintf(f, "\n");
524 }
525 
526 /* This should only be called from translate, via gen_excp.
527    We expect that ENV->PC has already been updated.  */
528 G_NORETURN void helper_excp(CPUAlphaState *env, int excp, int error)
529 {
530     CPUState *cs = env_cpu(env);
531 
532     cs->exception_index = excp;
533     env->error_code = error;
534     cpu_loop_exit(cs);
535 }
536 
537 /* This may be called from any of the helpers to set up EXCEPTION_INDEX.  */
538 G_NORETURN void dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
539                              int excp, int error)
540 {
541     CPUState *cs = env_cpu(env);
542 
543     cs->exception_index = excp;
544     env->error_code = error;
545     if (retaddr) {
546         cpu_restore_state(cs, retaddr);
547         /* Floating-point exceptions (our only users) point to the next PC.  */
548         env->pc += 4;
549     }
550     cpu_loop_exit(cs);
551 }
552 
553 G_NORETURN void arith_excp(CPUAlphaState *env, uintptr_t retaddr,
554                            int exc, uint64_t mask)
555 {
556     env->trap_arg0 = exc;
557     env->trap_arg1 = mask;
558     dynamic_excp(env, retaddr, EXCP_ARITH, 0);
559 }
560