xref: /openbmc/qemu/target/s390x/tcg/excp_helper.c (revision ffe98631)
1 /*
2  * s390x exception / interrupt helpers
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2011 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/log.h"
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "s390x-internal.h"
28 #include "tcg_s390x.h"
29 #ifndef CONFIG_USER_ONLY
30 #include "qemu/timer.h"
31 #include "exec/address-spaces.h"
32 #include "hw/s390x/ioinst.h"
33 #include "hw/s390x/s390_flic.h"
34 #include "hw/boards.h"
35 #endif
36 
37 G_NORETURN void tcg_s390_program_interrupt(CPUS390XState *env,
38                                            uint32_t code, uintptr_t ra)
39 {
40     CPUState *cs = env_cpu(env);
41 
42     cpu_restore_state(cs, ra);
43     qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
44                   env->psw.addr);
45     trigger_pgm_exception(env, code);
46     cpu_loop_exit(cs);
47 }
48 
49 G_NORETURN void tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
50                                         uintptr_t ra)
51 {
52     g_assert(dxc <= 0xff);
53 #if !defined(CONFIG_USER_ONLY)
54     /* Store the DXC into the lowcore */
55     stl_phys(env_cpu(env)->as,
56              env->psa + offsetof(LowCore, data_exc_code), dxc);
57 #endif
58 
59     /* Store the DXC into the FPC if AFP is enabled */
60     if (env->cregs[0] & CR0_AFP) {
61         env->fpc = deposit32(env->fpc, 8, 8, dxc);
62     }
63     tcg_s390_program_interrupt(env, PGM_DATA, ra);
64 }
65 
66 G_NORETURN void tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
67                                           uintptr_t ra)
68 {
69     g_assert(vxc <= 0xff);
70 #if !defined(CONFIG_USER_ONLY)
71     /* Always store the VXC into the lowcore, without AFP it is undefined */
72     stl_phys(env_cpu(env)->as,
73              env->psa + offsetof(LowCore, data_exc_code), vxc);
74 #endif
75 
76     /* Always store the VXC into the FPC, without AFP it is undefined */
77     env->fpc = deposit32(env->fpc, 8, 8, vxc);
78     tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
79 }
80 
81 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
82 {
83     tcg_s390_data_exception(env, dxc, GETPC());
84 }
85 
86 /*
87  * Unaligned accesses are only diagnosed with MO_ALIGN.  At the moment,
88  * this is only for the atomic operations, for which we want to raise a
89  * specification exception.
90  */
91 static G_NORETURN
92 void do_unaligned_access(CPUState *cs, uintptr_t retaddr)
93 {
94     S390CPU *cpu = S390_CPU(cs);
95     CPUS390XState *env = &cpu->env;
96 
97     tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
98 }
99 
100 #if defined(CONFIG_USER_ONLY)
101 
102 void s390_cpu_do_interrupt(CPUState *cs)
103 {
104     cs->exception_index = -1;
105 }
106 
107 void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
108                              MMUAccessType access_type,
109                              bool maperr, uintptr_t retaddr)
110 {
111     S390CPU *cpu = S390_CPU(cs);
112 
113     trigger_pgm_exception(&cpu->env, maperr ? PGM_ADDRESSING : PGM_PROTECTION);
114     /*
115      * On real machines this value is dropped into LowMem. Since this
116      * is userland, simply put this someplace that cpu_loop can find it.
117      * S390 only gives the page of the fault, not the exact address.
118      * C.f. the construction of TEC in mmu_translate().
119      */
120     cpu->env.__excp_addr = address & TARGET_PAGE_MASK;
121     cpu_loop_exit_restore(cs, retaddr);
122 }
123 
124 void s390_cpu_record_sigbus(CPUState *cs, vaddr address,
125                             MMUAccessType access_type, uintptr_t retaddr)
126 {
127     do_unaligned_access(cs, retaddr);
128 }
129 
130 #else /* !CONFIG_USER_ONLY */
131 
132 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
133 {
134     switch (mmu_idx) {
135     case MMU_PRIMARY_IDX:
136         return PSW_ASC_PRIMARY;
137     case MMU_SECONDARY_IDX:
138         return PSW_ASC_SECONDARY;
139     case MMU_HOME_IDX:
140         return PSW_ASC_HOME;
141     default:
142         abort();
143     }
144 }
145 
146 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
147                        MMUAccessType access_type, int mmu_idx,
148                        bool probe, uintptr_t retaddr)
149 {
150     S390CPU *cpu = S390_CPU(cs);
151     CPUS390XState *env = &cpu->env;
152     target_ulong vaddr, raddr;
153     uint64_t asc, tec;
154     int prot, excp;
155 
156     qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
157                   __func__, address, access_type, mmu_idx);
158 
159     vaddr = address;
160 
161     if (mmu_idx < MMU_REAL_IDX) {
162         asc = cpu_mmu_idx_to_asc(mmu_idx);
163         /* 31-Bit mode */
164         if (!(env->psw.mask & PSW_MASK_64)) {
165             vaddr &= 0x7fffffff;
166         }
167         excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
168     } else if (mmu_idx == MMU_REAL_IDX) {
169         /* 31-Bit mode */
170         if (!(env->psw.mask & PSW_MASK_64)) {
171             vaddr &= 0x7fffffff;
172         }
173         excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
174     } else {
175         g_assert_not_reached();
176     }
177 
178     env->tlb_fill_exc = excp;
179     env->tlb_fill_tec = tec;
180 
181     if (!excp) {
182         qemu_log_mask(CPU_LOG_MMU,
183                       "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
184                       __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
185         tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
186                      mmu_idx, TARGET_PAGE_SIZE);
187         return true;
188     }
189     if (probe) {
190         return false;
191     }
192 
193     if (excp != PGM_ADDRESSING) {
194         stq_phys(env_cpu(env)->as,
195                  env->psa + offsetof(LowCore, trans_exc_code), tec);
196     }
197 
198     /*
199      * For data accesses, ILEN will be filled in from the unwind info,
200      * within cpu_loop_exit_restore.  For code accesses, retaddr == 0,
201      * and so unwinding will not occur.  However, ILEN is also undefined
202      * for that case -- we choose to set ILEN = 2.
203      */
204     env->int_pgm_ilen = 2;
205     trigger_pgm_exception(env, excp);
206     cpu_loop_exit_restore(cs, retaddr);
207 }
208 
209 static void do_program_interrupt(CPUS390XState *env)
210 {
211     uint64_t mask, addr;
212     LowCore *lowcore;
213     int ilen = env->int_pgm_ilen;
214 
215     assert(ilen == 2 || ilen == 4 || ilen == 6);
216 
217     switch (env->int_pgm_code) {
218     case PGM_PER:
219         if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
220             break;
221         }
222         /* FALL THROUGH */
223     case PGM_OPERATION:
224     case PGM_PRIVILEGED:
225     case PGM_EXECUTE:
226     case PGM_PROTECTION:
227     case PGM_ADDRESSING:
228     case PGM_SPECIFICATION:
229     case PGM_DATA:
230     case PGM_FIXPT_OVERFLOW:
231     case PGM_FIXPT_DIVIDE:
232     case PGM_DEC_OVERFLOW:
233     case PGM_DEC_DIVIDE:
234     case PGM_HFP_EXP_OVERFLOW:
235     case PGM_HFP_EXP_UNDERFLOW:
236     case PGM_HFP_SIGNIFICANCE:
237     case PGM_HFP_DIVIDE:
238     case PGM_TRANS_SPEC:
239     case PGM_SPECIAL_OP:
240     case PGM_OPERAND:
241     case PGM_HFP_SQRT:
242     case PGM_PC_TRANS_SPEC:
243     case PGM_ALET_SPEC:
244     case PGM_MONITOR:
245         /* advance the PSW if our exception is not nullifying */
246         env->psw.addr += ilen;
247         break;
248     }
249 
250     qemu_log_mask(CPU_LOG_INT,
251                   "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
252                   __func__, env->int_pgm_code, ilen, env->psw.mask,
253                   env->psw.addr);
254 
255     lowcore = cpu_map_lowcore(env);
256 
257     /* Signal PER events with the exception.  */
258     if (env->per_perc_atmid) {
259         env->int_pgm_code |= PGM_PER;
260         lowcore->per_address = cpu_to_be64(env->per_address);
261         lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
262         env->per_perc_atmid = 0;
263     }
264 
265     lowcore->pgm_ilen = cpu_to_be16(ilen);
266     lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
267     lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
268     lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
269     mask = be64_to_cpu(lowcore->program_new_psw.mask);
270     addr = be64_to_cpu(lowcore->program_new_psw.addr);
271     lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
272 
273     cpu_unmap_lowcore(lowcore);
274 
275     s390_cpu_set_psw(env, mask, addr);
276 }
277 
278 static void do_svc_interrupt(CPUS390XState *env)
279 {
280     uint64_t mask, addr;
281     LowCore *lowcore;
282 
283     lowcore = cpu_map_lowcore(env);
284 
285     lowcore->svc_code = cpu_to_be16(env->int_svc_code);
286     lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
287     lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
288     lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
289     mask = be64_to_cpu(lowcore->svc_new_psw.mask);
290     addr = be64_to_cpu(lowcore->svc_new_psw.addr);
291 
292     cpu_unmap_lowcore(lowcore);
293 
294     s390_cpu_set_psw(env, mask, addr);
295 
296     /* When a PER event is pending, the PER exception has to happen
297        immediately after the SERVICE CALL one.  */
298     if (env->per_perc_atmid) {
299         env->int_pgm_code = PGM_PER;
300         env->int_pgm_ilen = env->int_svc_ilen;
301         do_program_interrupt(env);
302     }
303 }
304 
305 #define VIRTIO_SUBCODE_64 0x0D00
306 
307 static void do_ext_interrupt(CPUS390XState *env)
308 {
309     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
310     S390CPU *cpu = env_archcpu(env);
311     uint64_t mask, addr;
312     uint16_t cpu_addr;
313     LowCore *lowcore;
314 
315     if (!(env->psw.mask & PSW_MASK_EXT)) {
316         cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
317     }
318 
319     lowcore = cpu_map_lowcore(env);
320 
321     if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
322         (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
323         MachineState *ms = MACHINE(qdev_get_machine());
324         unsigned int max_cpus = ms->smp.max_cpus;
325 
326         lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
327         cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
328         g_assert(cpu_addr < S390_MAX_CPUS);
329         lowcore->cpu_addr = cpu_to_be16(cpu_addr);
330         clear_bit(cpu_addr, env->emergency_signals);
331         if (bitmap_empty(env->emergency_signals, max_cpus)) {
332             env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
333         }
334     } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
335                (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
336         lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
337         lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
338         env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
339     } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
340                (env->cregs[0] & CR0_CKC_SC)) {
341         lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
342         lowcore->cpu_addr = 0;
343         env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
344     } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
345                (env->cregs[0] & CR0_CPU_TIMER_SC)) {
346         lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
347         lowcore->cpu_addr = 0;
348         env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
349     } else if (qemu_s390_flic_has_service(flic) &&
350                (env->cregs[0] & CR0_SERVICE_SC)) {
351         uint32_t param;
352 
353         param = qemu_s390_flic_dequeue_service(flic);
354         lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
355         lowcore->ext_params = cpu_to_be32(param);
356         lowcore->cpu_addr = 0;
357     } else {
358         g_assert_not_reached();
359     }
360 
361     mask = be64_to_cpu(lowcore->external_new_psw.mask);
362     addr = be64_to_cpu(lowcore->external_new_psw.addr);
363     lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
364     lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
365 
366     cpu_unmap_lowcore(lowcore);
367 
368     s390_cpu_set_psw(env, mask, addr);
369 }
370 
371 static void do_io_interrupt(CPUS390XState *env)
372 {
373     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
374     uint64_t mask, addr;
375     QEMUS390FlicIO *io;
376     LowCore *lowcore;
377 
378     g_assert(env->psw.mask & PSW_MASK_IO);
379     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
380     g_assert(io);
381 
382     lowcore = cpu_map_lowcore(env);
383 
384     lowcore->subchannel_id = cpu_to_be16(io->id);
385     lowcore->subchannel_nr = cpu_to_be16(io->nr);
386     lowcore->io_int_parm = cpu_to_be32(io->parm);
387     lowcore->io_int_word = cpu_to_be32(io->word);
388     lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
389     lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
390     mask = be64_to_cpu(lowcore->io_new_psw.mask);
391     addr = be64_to_cpu(lowcore->io_new_psw.addr);
392 
393     cpu_unmap_lowcore(lowcore);
394     g_free(io);
395 
396     s390_cpu_set_psw(env, mask, addr);
397 }
398 
399 typedef struct MchkExtSaveArea {
400     uint64_t    vregs[32][2];                     /* 0x0000 */
401     uint8_t     pad_0x0200[0x0400 - 0x0200];      /* 0x0200 */
402 } MchkExtSaveArea;
403 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
404 
405 static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
406 {
407     hwaddr len = sizeof(MchkExtSaveArea);
408     MchkExtSaveArea *sa;
409     int i;
410 
411     sa = cpu_physical_memory_map(mcesao, &len, true);
412     if (!sa) {
413         return -EFAULT;
414     }
415     if (len != sizeof(MchkExtSaveArea)) {
416         cpu_physical_memory_unmap(sa, len, 1, 0);
417         return -EFAULT;
418     }
419 
420     for (i = 0; i < 32; i++) {
421         sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
422         sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
423     }
424 
425     cpu_physical_memory_unmap(sa, len, 1, len);
426     return 0;
427 }
428 
429 static void do_mchk_interrupt(CPUS390XState *env)
430 {
431     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
432     uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
433     uint64_t mask, addr, mcesao = 0;
434     LowCore *lowcore;
435     int i;
436 
437     /* for now we only support channel report machine checks (floating) */
438     g_assert(env->psw.mask & PSW_MASK_MCHECK);
439     g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
440 
441     qemu_s390_flic_dequeue_crw_mchk(flic);
442 
443     lowcore = cpu_map_lowcore(env);
444 
445     /* extended save area */
446     if (mcic & MCIC_VB_VR) {
447         /* length and alignment is 1024 bytes */
448         mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
449     }
450 
451     /* try to store vector registers */
452     if (!mcesao || mchk_store_vregs(env, mcesao)) {
453         mcic &= ~MCIC_VB_VR;
454     }
455 
456     /* we are always in z/Architecture mode */
457     lowcore->ar_access_id = 1;
458 
459     for (i = 0; i < 16; i++) {
460         lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
461         lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
462         lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
463         lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
464     }
465     lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
466     lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
467     lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
468     lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
469     lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
470 
471     lowcore->mcic = cpu_to_be64(mcic);
472     lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
473     lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
474     mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
475     addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
476 
477     cpu_unmap_lowcore(lowcore);
478 
479     s390_cpu_set_psw(env, mask, addr);
480 }
481 
482 void s390_cpu_do_interrupt(CPUState *cs)
483 {
484     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
485     S390CPU *cpu = S390_CPU(cs);
486     CPUS390XState *env = &cpu->env;
487     bool stopped = false;
488 
489     qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
490                   __func__, cs->exception_index, env->psw.mask, env->psw.addr);
491 
492 try_deliver:
493     /* handle machine checks */
494     if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
495         cs->exception_index = EXCP_MCHK;
496     }
497     /* handle external interrupts */
498     if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
499         cs->exception_index = EXCP_EXT;
500     }
501     /* handle I/O interrupts */
502     if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
503         cs->exception_index = EXCP_IO;
504     }
505     /* RESTART interrupt */
506     if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
507         cs->exception_index = EXCP_RESTART;
508     }
509     /* STOP interrupt has least priority */
510     if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
511         cs->exception_index = EXCP_STOP;
512     }
513 
514     switch (cs->exception_index) {
515     case EXCP_PGM:
516         do_program_interrupt(env);
517         break;
518     case EXCP_SVC:
519         do_svc_interrupt(env);
520         break;
521     case EXCP_EXT:
522         do_ext_interrupt(env);
523         break;
524     case EXCP_IO:
525         do_io_interrupt(env);
526         break;
527     case EXCP_MCHK:
528         do_mchk_interrupt(env);
529         break;
530     case EXCP_RESTART:
531         do_restart_interrupt(env);
532         break;
533     case EXCP_STOP:
534         do_stop_interrupt(env);
535         stopped = true;
536         break;
537     }
538 
539     if (cs->exception_index != -1 && !stopped) {
540         /* check if there are more pending interrupts to deliver */
541         cs->exception_index = -1;
542         goto try_deliver;
543     }
544     cs->exception_index = -1;
545 
546     /* we might still have pending interrupts, but not deliverable */
547     if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
548         cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
549     }
550 
551     /* WAIT PSW during interrupt injection or STOP interrupt */
552     if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
553         /* don't trigger a cpu_loop_exit(), use an interrupt instead */
554         cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
555     } else if (cs->halted) {
556         /* unhalt if we had a WAIT PSW somewhere in our injection chain */
557         s390_cpu_unhalt(cpu);
558     }
559 }
560 
561 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
562 {
563     if (interrupt_request & CPU_INTERRUPT_HARD) {
564         S390CPU *cpu = S390_CPU(cs);
565         CPUS390XState *env = &cpu->env;
566 
567         if (env->ex_value) {
568             /* Execution of the target insn is indivisible from
569                the parent EXECUTE insn.  */
570             return false;
571         }
572         if (s390_cpu_has_int(cpu)) {
573             s390_cpu_do_interrupt(cs);
574             return true;
575         }
576         if (env->psw.mask & PSW_MASK_WAIT) {
577             /* Woken up because of a floating interrupt but it has already
578              * been delivered. Go back to sleep. */
579             cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
580         }
581     }
582     return false;
583 }
584 
585 void s390x_cpu_debug_excp_handler(CPUState *cs)
586 {
587     S390CPU *cpu = S390_CPU(cs);
588     CPUS390XState *env = &cpu->env;
589     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
590 
591     if (wp_hit && wp_hit->flags & BP_CPU) {
592         /* FIXME: When the storage-alteration-space control bit is set,
593            the exception should only be triggered if the memory access
594            is done using an address space with the storage-alteration-event
595            bit set.  We have no way to detect that with the current
596            watchpoint code.  */
597         cs->watchpoint_hit = NULL;
598 
599         env->per_address = env->psw.addr;
600         env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
601         /* FIXME: We currently no way to detect the address space used
602            to trigger the watchpoint.  For now just consider it is the
603            current default ASC. This turn to be true except when MVCP
604            and MVCS instrutions are not used.  */
605         env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
606 
607         /*
608          * Remove all watchpoints to re-execute the code.  A PER exception
609          * will be triggered, it will call s390_cpu_set_psw which will
610          * recompute the watchpoints.
611          */
612         cpu_watchpoint_remove_all(cs, BP_CPU);
613         cpu_loop_exit_noexc(cs);
614     }
615 }
616 
617 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
618                                    MMUAccessType access_type,
619                                    int mmu_idx, uintptr_t retaddr)
620 {
621     do_unaligned_access(cs, retaddr);
622 }
623 
624 static G_NORETURN
625 void monitor_event(CPUS390XState *env,
626                    uint64_t monitor_code,
627                    uint8_t monitor_class, uintptr_t ra)
628 {
629     /* Store the Monitor Code and the Monitor Class Number into the lowcore */
630     stq_phys(env_cpu(env)->as,
631              env->psa + offsetof(LowCore, monitor_code), monitor_code);
632     stw_phys(env_cpu(env)->as,
633              env->psa + offsetof(LowCore, mon_class_num), monitor_class);
634 
635     tcg_s390_program_interrupt(env, PGM_MONITOR, ra);
636 }
637 
638 void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
639                           uint32_t monitor_class)
640 {
641     g_assert(monitor_class <= 0xff);
642 
643     if (env->cregs[8] & (0x8000 >> monitor_class)) {
644         monitor_event(env, monitor_code, monitor_class, GETPC());
645     }
646 }
647 
648 #endif /* !CONFIG_USER_ONLY */
649