xref: /openbmc/qemu/target/s390x/tcg/excp_helper.c (revision 8d99713b)
1 /*
2  * s390x exception / interrupt helpers
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2011 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/log.h"
23 #include "cpu.h"
24 #include "s390x-internal.h"
25 #include "exec/helper-proto.h"
26 #include "qemu/timer.h"
27 #include "exec/exec-all.h"
28 #include "exec/cpu_ldst.h"
29 #include "hw/s390x/ioinst.h"
30 #include "exec/address-spaces.h"
31 #include "tcg_s390x.h"
32 #ifndef CONFIG_USER_ONLY
33 #include "hw/s390x/s390_flic.h"
34 #include "hw/boards.h"
35 #endif
36 
37 void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env,
38                                               uint32_t code, uintptr_t ra)
39 {
40     CPUState *cs = env_cpu(env);
41 
42     cpu_restore_state(cs, ra, true);
43     qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
44                   env->psw.addr);
45     trigger_pgm_exception(env, code);
46     cpu_loop_exit(cs);
47 }
48 
49 void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
50                                            uintptr_t ra)
51 {
52     g_assert(dxc <= 0xff);
53 #if !defined(CONFIG_USER_ONLY)
54     /* Store the DXC into the lowcore */
55     stl_phys(env_cpu(env)->as,
56              env->psa + offsetof(LowCore, data_exc_code), dxc);
57 #endif
58 
59     /* Store the DXC into the FPC if AFP is enabled */
60     if (env->cregs[0] & CR0_AFP) {
61         env->fpc = deposit32(env->fpc, 8, 8, dxc);
62     }
63     tcg_s390_program_interrupt(env, PGM_DATA, ra);
64 }
65 
66 void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
67                                              uintptr_t ra)
68 {
69     g_assert(vxc <= 0xff);
70 #if !defined(CONFIG_USER_ONLY)
71     /* Always store the VXC into the lowcore, without AFP it is undefined */
72     stl_phys(env_cpu(env)->as,
73              env->psa + offsetof(LowCore, data_exc_code), vxc);
74 #endif
75 
76     /* Always store the VXC into the FPC, without AFP it is undefined */
77     env->fpc = deposit32(env->fpc, 8, 8, vxc);
78     tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
79 }
80 
81 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
82 {
83     tcg_s390_data_exception(env, dxc, GETPC());
84 }
85 
86 /*
87  * Unaligned accesses are only diagnosed with MO_ALIGN.  At the moment,
88  * this is only for the atomic operations, for which we want to raise a
89  * specification exception.
90  */
91 static void QEMU_NORETURN do_unaligned_access(CPUState *cs, uintptr_t retaddr)
92 {
93     S390CPU *cpu = S390_CPU(cs);
94     CPUS390XState *env = &cpu->env;
95 
96     tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
97 }
98 
99 #if defined(CONFIG_USER_ONLY)
100 
101 void s390_cpu_do_interrupt(CPUState *cs)
102 {
103     cs->exception_index = -1;
104 }
105 
106 void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
107                              MMUAccessType access_type,
108                              bool maperr, uintptr_t retaddr)
109 {
110     S390CPU *cpu = S390_CPU(cs);
111 
112     trigger_pgm_exception(&cpu->env, maperr ? PGM_ADDRESSING : PGM_PROTECTION);
113     /*
114      * On real machines this value is dropped into LowMem. Since this
115      * is userland, simply put this someplace that cpu_loop can find it.
116      * S390 only gives the page of the fault, not the exact address.
117      * C.f. the construction of TEC in mmu_translate().
118      */
119     cpu->env.__excp_addr = address & TARGET_PAGE_MASK;
120     cpu_loop_exit_restore(cs, retaddr);
121 }
122 
123 void s390_cpu_record_sigbus(CPUState *cs, vaddr address,
124                             MMUAccessType access_type, uintptr_t retaddr)
125 {
126     do_unaligned_access(cs, retaddr);
127 }
128 
129 #else /* !CONFIG_USER_ONLY */
130 
131 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
132 {
133     switch (mmu_idx) {
134     case MMU_PRIMARY_IDX:
135         return PSW_ASC_PRIMARY;
136     case MMU_SECONDARY_IDX:
137         return PSW_ASC_SECONDARY;
138     case MMU_HOME_IDX:
139         return PSW_ASC_HOME;
140     default:
141         abort();
142     }
143 }
144 
145 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
146                        MMUAccessType access_type, int mmu_idx,
147                        bool probe, uintptr_t retaddr)
148 {
149     S390CPU *cpu = S390_CPU(cs);
150     CPUS390XState *env = &cpu->env;
151     target_ulong vaddr, raddr;
152     uint64_t asc, tec;
153     int prot, excp;
154 
155     qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
156                   __func__, address, access_type, mmu_idx);
157 
158     vaddr = address;
159 
160     if (mmu_idx < MMU_REAL_IDX) {
161         asc = cpu_mmu_idx_to_asc(mmu_idx);
162         /* 31-Bit mode */
163         if (!(env->psw.mask & PSW_MASK_64)) {
164             vaddr &= 0x7fffffff;
165         }
166         excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
167     } else if (mmu_idx == MMU_REAL_IDX) {
168         /* 31-Bit mode */
169         if (!(env->psw.mask & PSW_MASK_64)) {
170             vaddr &= 0x7fffffff;
171         }
172         excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
173     } else {
174         g_assert_not_reached();
175     }
176 
177     env->tlb_fill_exc = excp;
178     env->tlb_fill_tec = tec;
179 
180     if (!excp) {
181         qemu_log_mask(CPU_LOG_MMU,
182                       "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
183                       __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
184         tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
185                      mmu_idx, TARGET_PAGE_SIZE);
186         return true;
187     }
188     if (probe) {
189         return false;
190     }
191 
192     if (excp != PGM_ADDRESSING) {
193         stq_phys(env_cpu(env)->as,
194                  env->psa + offsetof(LowCore, trans_exc_code), tec);
195     }
196 
197     /*
198      * For data accesses, ILEN will be filled in from the unwind info,
199      * within cpu_loop_exit_restore.  For code accesses, retaddr == 0,
200      * and so unwinding will not occur.  However, ILEN is also undefined
201      * for that case -- we choose to set ILEN = 2.
202      */
203     env->int_pgm_ilen = 2;
204     trigger_pgm_exception(env, excp);
205     cpu_loop_exit_restore(cs, retaddr);
206 }
207 
208 static void do_program_interrupt(CPUS390XState *env)
209 {
210     uint64_t mask, addr;
211     LowCore *lowcore;
212     int ilen = env->int_pgm_ilen;
213 
214     assert(ilen == 2 || ilen == 4 || ilen == 6);
215 
216     switch (env->int_pgm_code) {
217     case PGM_PER:
218         if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
219             break;
220         }
221         /* FALL THROUGH */
222     case PGM_OPERATION:
223     case PGM_PRIVILEGED:
224     case PGM_EXECUTE:
225     case PGM_PROTECTION:
226     case PGM_ADDRESSING:
227     case PGM_SPECIFICATION:
228     case PGM_DATA:
229     case PGM_FIXPT_OVERFLOW:
230     case PGM_FIXPT_DIVIDE:
231     case PGM_DEC_OVERFLOW:
232     case PGM_DEC_DIVIDE:
233     case PGM_HFP_EXP_OVERFLOW:
234     case PGM_HFP_EXP_UNDERFLOW:
235     case PGM_HFP_SIGNIFICANCE:
236     case PGM_HFP_DIVIDE:
237     case PGM_TRANS_SPEC:
238     case PGM_SPECIAL_OP:
239     case PGM_OPERAND:
240     case PGM_HFP_SQRT:
241     case PGM_PC_TRANS_SPEC:
242     case PGM_ALET_SPEC:
243     case PGM_MONITOR:
244         /* advance the PSW if our exception is not nullifying */
245         env->psw.addr += ilen;
246         break;
247     }
248 
249     qemu_log_mask(CPU_LOG_INT,
250                   "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
251                   __func__, env->int_pgm_code, ilen, env->psw.mask,
252                   env->psw.addr);
253 
254     lowcore = cpu_map_lowcore(env);
255 
256     /* Signal PER events with the exception.  */
257     if (env->per_perc_atmid) {
258         env->int_pgm_code |= PGM_PER;
259         lowcore->per_address = cpu_to_be64(env->per_address);
260         lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
261         env->per_perc_atmid = 0;
262     }
263 
264     lowcore->pgm_ilen = cpu_to_be16(ilen);
265     lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
266     lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
267     lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
268     mask = be64_to_cpu(lowcore->program_new_psw.mask);
269     addr = be64_to_cpu(lowcore->program_new_psw.addr);
270     lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
271 
272     cpu_unmap_lowcore(lowcore);
273 
274     s390_cpu_set_psw(env, mask, addr);
275 }
276 
277 static void do_svc_interrupt(CPUS390XState *env)
278 {
279     uint64_t mask, addr;
280     LowCore *lowcore;
281 
282     lowcore = cpu_map_lowcore(env);
283 
284     lowcore->svc_code = cpu_to_be16(env->int_svc_code);
285     lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
286     lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
287     lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
288     mask = be64_to_cpu(lowcore->svc_new_psw.mask);
289     addr = be64_to_cpu(lowcore->svc_new_psw.addr);
290 
291     cpu_unmap_lowcore(lowcore);
292 
293     s390_cpu_set_psw(env, mask, addr);
294 
295     /* When a PER event is pending, the PER exception has to happen
296        immediately after the SERVICE CALL one.  */
297     if (env->per_perc_atmid) {
298         env->int_pgm_code = PGM_PER;
299         env->int_pgm_ilen = env->int_svc_ilen;
300         do_program_interrupt(env);
301     }
302 }
303 
304 #define VIRTIO_SUBCODE_64 0x0D00
305 
306 static void do_ext_interrupt(CPUS390XState *env)
307 {
308     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
309     S390CPU *cpu = env_archcpu(env);
310     uint64_t mask, addr;
311     uint16_t cpu_addr;
312     LowCore *lowcore;
313 
314     if (!(env->psw.mask & PSW_MASK_EXT)) {
315         cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
316     }
317 
318     lowcore = cpu_map_lowcore(env);
319 
320     if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
321         (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
322         MachineState *ms = MACHINE(qdev_get_machine());
323         unsigned int max_cpus = ms->smp.max_cpus;
324 
325         lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
326         cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
327         g_assert(cpu_addr < S390_MAX_CPUS);
328         lowcore->cpu_addr = cpu_to_be16(cpu_addr);
329         clear_bit(cpu_addr, env->emergency_signals);
330         if (bitmap_empty(env->emergency_signals, max_cpus)) {
331             env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
332         }
333     } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
334                (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
335         lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
336         lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
337         env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
338     } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
339                (env->cregs[0] & CR0_CKC_SC)) {
340         lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
341         lowcore->cpu_addr = 0;
342         env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
343     } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
344                (env->cregs[0] & CR0_CPU_TIMER_SC)) {
345         lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
346         lowcore->cpu_addr = 0;
347         env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
348     } else if (qemu_s390_flic_has_service(flic) &&
349                (env->cregs[0] & CR0_SERVICE_SC)) {
350         uint32_t param;
351 
352         param = qemu_s390_flic_dequeue_service(flic);
353         lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
354         lowcore->ext_params = cpu_to_be32(param);
355         lowcore->cpu_addr = 0;
356     } else {
357         g_assert_not_reached();
358     }
359 
360     mask = be64_to_cpu(lowcore->external_new_psw.mask);
361     addr = be64_to_cpu(lowcore->external_new_psw.addr);
362     lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
363     lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
364 
365     cpu_unmap_lowcore(lowcore);
366 
367     s390_cpu_set_psw(env, mask, addr);
368 }
369 
370 static void do_io_interrupt(CPUS390XState *env)
371 {
372     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
373     uint64_t mask, addr;
374     QEMUS390FlicIO *io;
375     LowCore *lowcore;
376 
377     g_assert(env->psw.mask & PSW_MASK_IO);
378     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
379     g_assert(io);
380 
381     lowcore = cpu_map_lowcore(env);
382 
383     lowcore->subchannel_id = cpu_to_be16(io->id);
384     lowcore->subchannel_nr = cpu_to_be16(io->nr);
385     lowcore->io_int_parm = cpu_to_be32(io->parm);
386     lowcore->io_int_word = cpu_to_be32(io->word);
387     lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
388     lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
389     mask = be64_to_cpu(lowcore->io_new_psw.mask);
390     addr = be64_to_cpu(lowcore->io_new_psw.addr);
391 
392     cpu_unmap_lowcore(lowcore);
393     g_free(io);
394 
395     s390_cpu_set_psw(env, mask, addr);
396 }
397 
398 typedef struct MchkExtSaveArea {
399     uint64_t    vregs[32][2];                     /* 0x0000 */
400     uint8_t     pad_0x0200[0x0400 - 0x0200];      /* 0x0200 */
401 } MchkExtSaveArea;
402 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
403 
404 static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
405 {
406     hwaddr len = sizeof(MchkExtSaveArea);
407     MchkExtSaveArea *sa;
408     int i;
409 
410     sa = cpu_physical_memory_map(mcesao, &len, true);
411     if (!sa) {
412         return -EFAULT;
413     }
414     if (len != sizeof(MchkExtSaveArea)) {
415         cpu_physical_memory_unmap(sa, len, 1, 0);
416         return -EFAULT;
417     }
418 
419     for (i = 0; i < 32; i++) {
420         sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
421         sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
422     }
423 
424     cpu_physical_memory_unmap(sa, len, 1, len);
425     return 0;
426 }
427 
428 static void do_mchk_interrupt(CPUS390XState *env)
429 {
430     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
431     uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
432     uint64_t mask, addr, mcesao = 0;
433     LowCore *lowcore;
434     int i;
435 
436     /* for now we only support channel report machine checks (floating) */
437     g_assert(env->psw.mask & PSW_MASK_MCHECK);
438     g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
439 
440     qemu_s390_flic_dequeue_crw_mchk(flic);
441 
442     lowcore = cpu_map_lowcore(env);
443 
444     /* extended save area */
445     if (mcic & MCIC_VB_VR) {
446         /* length and alignment is 1024 bytes */
447         mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
448     }
449 
450     /* try to store vector registers */
451     if (!mcesao || mchk_store_vregs(env, mcesao)) {
452         mcic &= ~MCIC_VB_VR;
453     }
454 
455     /* we are always in z/Architecture mode */
456     lowcore->ar_access_id = 1;
457 
458     for (i = 0; i < 16; i++) {
459         lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
460         lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
461         lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
462         lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
463     }
464     lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
465     lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
466     lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
467     lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
468     lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
469 
470     lowcore->mcic = cpu_to_be64(mcic);
471     lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
472     lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
473     mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
474     addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
475 
476     cpu_unmap_lowcore(lowcore);
477 
478     s390_cpu_set_psw(env, mask, addr);
479 }
480 
481 void s390_cpu_do_interrupt(CPUState *cs)
482 {
483     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
484     S390CPU *cpu = S390_CPU(cs);
485     CPUS390XState *env = &cpu->env;
486     bool stopped = false;
487 
488     qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
489                   __func__, cs->exception_index, env->psw.mask, env->psw.addr);
490 
491 try_deliver:
492     /* handle machine checks */
493     if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
494         cs->exception_index = EXCP_MCHK;
495     }
496     /* handle external interrupts */
497     if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
498         cs->exception_index = EXCP_EXT;
499     }
500     /* handle I/O interrupts */
501     if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
502         cs->exception_index = EXCP_IO;
503     }
504     /* RESTART interrupt */
505     if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
506         cs->exception_index = EXCP_RESTART;
507     }
508     /* STOP interrupt has least priority */
509     if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
510         cs->exception_index = EXCP_STOP;
511     }
512 
513     switch (cs->exception_index) {
514     case EXCP_PGM:
515         do_program_interrupt(env);
516         break;
517     case EXCP_SVC:
518         do_svc_interrupt(env);
519         break;
520     case EXCP_EXT:
521         do_ext_interrupt(env);
522         break;
523     case EXCP_IO:
524         do_io_interrupt(env);
525         break;
526     case EXCP_MCHK:
527         do_mchk_interrupt(env);
528         break;
529     case EXCP_RESTART:
530         do_restart_interrupt(env);
531         break;
532     case EXCP_STOP:
533         do_stop_interrupt(env);
534         stopped = true;
535         break;
536     }
537 
538     if (cs->exception_index != -1 && !stopped) {
539         /* check if there are more pending interrupts to deliver */
540         cs->exception_index = -1;
541         goto try_deliver;
542     }
543     cs->exception_index = -1;
544 
545     /* we might still have pending interrupts, but not deliverable */
546     if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
547         cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
548     }
549 
550     /* WAIT PSW during interrupt injection or STOP interrupt */
551     if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
552         /* don't trigger a cpu_loop_exit(), use an interrupt instead */
553         cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
554     } else if (cs->halted) {
555         /* unhalt if we had a WAIT PSW somehwere in our injection chain */
556         s390_cpu_unhalt(cpu);
557     }
558 }
559 
560 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
561 {
562     if (interrupt_request & CPU_INTERRUPT_HARD) {
563         S390CPU *cpu = S390_CPU(cs);
564         CPUS390XState *env = &cpu->env;
565 
566         if (env->ex_value) {
567             /* Execution of the target insn is indivisible from
568                the parent EXECUTE insn.  */
569             return false;
570         }
571         if (s390_cpu_has_int(cpu)) {
572             s390_cpu_do_interrupt(cs);
573             return true;
574         }
575         if (env->psw.mask & PSW_MASK_WAIT) {
576             /* Woken up because of a floating interrupt but it has already
577              * been delivered. Go back to sleep. */
578             cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
579         }
580     }
581     return false;
582 }
583 
584 void s390x_cpu_debug_excp_handler(CPUState *cs)
585 {
586     S390CPU *cpu = S390_CPU(cs);
587     CPUS390XState *env = &cpu->env;
588     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
589 
590     if (wp_hit && wp_hit->flags & BP_CPU) {
591         /* FIXME: When the storage-alteration-space control bit is set,
592            the exception should only be triggered if the memory access
593            is done using an address space with the storage-alteration-event
594            bit set.  We have no way to detect that with the current
595            watchpoint code.  */
596         cs->watchpoint_hit = NULL;
597 
598         env->per_address = env->psw.addr;
599         env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
600         /* FIXME: We currently no way to detect the address space used
601            to trigger the watchpoint.  For now just consider it is the
602            current default ASC. This turn to be true except when MVCP
603            and MVCS instrutions are not used.  */
604         env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
605 
606         /*
607          * Remove all watchpoints to re-execute the code.  A PER exception
608          * will be triggered, it will call s390_cpu_set_psw which will
609          * recompute the watchpoints.
610          */
611         cpu_watchpoint_remove_all(cs, BP_CPU);
612         cpu_loop_exit_noexc(cs);
613     }
614 }
615 
616 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
617                                    MMUAccessType access_type,
618                                    int mmu_idx, uintptr_t retaddr)
619 {
620     do_unaligned_access(cs, retaddr);
621 }
622 
623 static void QEMU_NORETURN monitor_event(CPUS390XState *env,
624                                         uint64_t monitor_code,
625                                         uint8_t monitor_class, uintptr_t ra)
626 {
627     /* Store the Monitor Code and the Monitor Class Number into the lowcore */
628     stq_phys(env_cpu(env)->as,
629              env->psa + offsetof(LowCore, monitor_code), monitor_code);
630     stw_phys(env_cpu(env)->as,
631              env->psa + offsetof(LowCore, mon_class_num), monitor_class);
632 
633     tcg_s390_program_interrupt(env, PGM_MONITOR, ra);
634 }
635 
636 void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
637                           uint32_t monitor_class)
638 {
639     g_assert(monitor_class <= 0xff);
640 
641     if (env->cregs[8] & (0x8000 >> monitor_class)) {
642         monitor_event(env, monitor_code, monitor_class, GETPC());
643     }
644 }
645 
646 #endif /* !CONFIG_USER_ONLY */
647