xref: /openbmc/qemu/target/s390x/tcg/excp_helper.c (revision 1141159c)
1 /*
2  * s390x exception / interrupt helpers
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2011 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/log.h"
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "s390x-internal.h"
28 #include "tcg_s390x.h"
29 #ifndef CONFIG_USER_ONLY
30 #include "qemu/timer.h"
31 #include "exec/address-spaces.h"
32 #include "hw/s390x/ioinst.h"
33 #include "hw/s390x/s390_flic.h"
34 #include "hw/boards.h"
35 #endif
36 
37 G_NORETURN void tcg_s390_program_interrupt(CPUS390XState *env,
38                                            uint32_t code, uintptr_t ra)
39 {
40     CPUState *cs = env_cpu(env);
41 
42     cpu_restore_state(cs, ra);
43     qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
44                   env->psw.addr);
45     trigger_pgm_exception(env, code);
46     cpu_loop_exit(cs);
47 }
48 
49 G_NORETURN void tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
50                                         uintptr_t ra)
51 {
52     g_assert(dxc <= 0xff);
53 #if !defined(CONFIG_USER_ONLY)
54     /* Store the DXC into the lowcore */
55     stl_phys(env_cpu(env)->as,
56              env->psa + offsetof(LowCore, data_exc_code), dxc);
57 #endif
58 
59     /* Store the DXC into the FPC if AFP is enabled */
60     if (env->cregs[0] & CR0_AFP) {
61         env->fpc = deposit32(env->fpc, 8, 8, dxc);
62     }
63     tcg_s390_program_interrupt(env, PGM_DATA, ra);
64 }
65 
66 G_NORETURN void tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
67                                           uintptr_t ra)
68 {
69     g_assert(vxc <= 0xff);
70 #if !defined(CONFIG_USER_ONLY)
71     /* Always store the VXC into the lowcore, without AFP it is undefined */
72     stl_phys(env_cpu(env)->as,
73              env->psa + offsetof(LowCore, data_exc_code), vxc);
74 #endif
75 
76     /* Always store the VXC into the FPC, without AFP it is undefined */
77     env->fpc = deposit32(env->fpc, 8, 8, vxc);
78     tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
79 }
80 
81 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
82 {
83     tcg_s390_data_exception(env, dxc, GETPC());
84 }
85 
86 /*
87  * Unaligned accesses are only diagnosed with MO_ALIGN.  At the moment,
88  * this is only for the atomic and relative long operations, for which we want
89  * to raise a specification exception.
90  */
91 static G_NORETURN
92 void do_unaligned_access(CPUState *cs, uintptr_t retaddr)
93 {
94     S390CPU *cpu = S390_CPU(cs);
95     CPUS390XState *env = &cpu->env;
96 
97     tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
98 }
99 
100 #if defined(CONFIG_USER_ONLY)
101 
102 void s390_cpu_do_interrupt(CPUState *cs)
103 {
104     cs->exception_index = -1;
105 }
106 
107 void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
108                              MMUAccessType access_type,
109                              bool maperr, uintptr_t retaddr)
110 {
111     S390CPU *cpu = S390_CPU(cs);
112 
113     trigger_pgm_exception(&cpu->env, maperr ? PGM_ADDRESSING : PGM_PROTECTION);
114     /*
115      * On real machines this value is dropped into LowMem. Since this
116      * is userland, simply put this someplace that cpu_loop can find it.
117      * S390 only gives the page of the fault, not the exact address.
118      * C.f. the construction of TEC in mmu_translate().
119      */
120     cpu->env.__excp_addr = address & TARGET_PAGE_MASK;
121     cpu_loop_exit_restore(cs, retaddr);
122 }
123 
124 void s390_cpu_record_sigbus(CPUState *cs, vaddr address,
125                             MMUAccessType access_type, uintptr_t retaddr)
126 {
127     do_unaligned_access(cs, retaddr);
128 }
129 
130 #else /* !CONFIG_USER_ONLY */
131 
132 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
133 {
134     switch (mmu_idx) {
135     case MMU_PRIMARY_IDX:
136         return PSW_ASC_PRIMARY;
137     case MMU_SECONDARY_IDX:
138         return PSW_ASC_SECONDARY;
139     case MMU_HOME_IDX:
140         return PSW_ASC_HOME;
141     default:
142         abort();
143     }
144 }
145 
146 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
147                        MMUAccessType access_type, int mmu_idx,
148                        bool probe, uintptr_t retaddr)
149 {
150     S390CPU *cpu = S390_CPU(cs);
151     CPUS390XState *env = &cpu->env;
152     target_ulong vaddr, raddr;
153     uint64_t asc, tec;
154     int prot, excp;
155 
156     qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
157                   __func__, address, access_type, mmu_idx);
158 
159     vaddr = address;
160 
161     if (mmu_idx < MMU_REAL_IDX) {
162         asc = cpu_mmu_idx_to_asc(mmu_idx);
163         /* 31-Bit mode */
164         if (!(env->psw.mask & PSW_MASK_64)) {
165             vaddr &= 0x7fffffff;
166         }
167         excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
168     } else if (mmu_idx == MMU_REAL_IDX) {
169         /* 31-Bit mode */
170         if (!(env->psw.mask & PSW_MASK_64)) {
171             vaddr &= 0x7fffffff;
172         }
173         excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
174     } else {
175         g_assert_not_reached();
176     }
177 
178     env->tlb_fill_exc = excp;
179     env->tlb_fill_tec = tec;
180 
181     if (!excp) {
182         qemu_log_mask(CPU_LOG_MMU,
183                       "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
184                       __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
185         tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
186                      mmu_idx, TARGET_PAGE_SIZE);
187         return true;
188     }
189     if (probe) {
190         return false;
191     }
192 
193     if (excp != PGM_ADDRESSING) {
194         stq_phys(env_cpu(env)->as,
195                  env->psa + offsetof(LowCore, trans_exc_code), tec);
196     }
197 
198     /*
199      * For data accesses, ILEN will be filled in from the unwind info,
200      * within cpu_loop_exit_restore.  For code accesses, retaddr == 0,
201      * and so unwinding will not occur.  However, ILEN is also undefined
202      * for that case -- we choose to set ILEN = 2.
203      */
204     env->int_pgm_ilen = 2;
205     trigger_pgm_exception(env, excp);
206     cpu_loop_exit_restore(cs, retaddr);
207 }
208 
209 static void do_program_interrupt(CPUS390XState *env)
210 {
211     uint64_t mask, addr;
212     LowCore *lowcore;
213     int ilen = env->int_pgm_ilen;
214 
215     assert((env->int_pgm_code == PGM_SPECIFICATION && ilen == 0) ||
216            ilen == 2 || ilen == 4 || ilen == 6);
217 
218     switch (env->int_pgm_code) {
219     case PGM_PER:
220         if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
221             break;
222         }
223         /* FALL THROUGH */
224     case PGM_OPERATION:
225     case PGM_PRIVILEGED:
226     case PGM_EXECUTE:
227     case PGM_PROTECTION:
228     case PGM_ADDRESSING:
229     case PGM_SPECIFICATION:
230     case PGM_DATA:
231     case PGM_FIXPT_OVERFLOW:
232     case PGM_FIXPT_DIVIDE:
233     case PGM_DEC_OVERFLOW:
234     case PGM_DEC_DIVIDE:
235     case PGM_HFP_EXP_OVERFLOW:
236     case PGM_HFP_EXP_UNDERFLOW:
237     case PGM_HFP_SIGNIFICANCE:
238     case PGM_HFP_DIVIDE:
239     case PGM_TRANS_SPEC:
240     case PGM_SPECIAL_OP:
241     case PGM_OPERAND:
242     case PGM_HFP_SQRT:
243     case PGM_PC_TRANS_SPEC:
244     case PGM_ALET_SPEC:
245     case PGM_MONITOR:
246         /* advance the PSW if our exception is not nullifying */
247         env->psw.addr += ilen;
248         break;
249     }
250 
251     qemu_log_mask(CPU_LOG_INT,
252                   "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
253                   __func__, env->int_pgm_code, ilen, env->psw.mask,
254                   env->psw.addr);
255 
256     lowcore = cpu_map_lowcore(env);
257 
258     /* Signal PER events with the exception.  */
259     if (env->per_perc_atmid) {
260         env->int_pgm_code |= PGM_PER;
261         lowcore->per_address = cpu_to_be64(env->per_address);
262         lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
263         env->per_perc_atmid = 0;
264     }
265 
266     lowcore->pgm_ilen = cpu_to_be16(ilen);
267     lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
268     lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
269     lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
270     mask = be64_to_cpu(lowcore->program_new_psw.mask);
271     addr = be64_to_cpu(lowcore->program_new_psw.addr);
272     lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
273 
274     cpu_unmap_lowcore(lowcore);
275 
276     s390_cpu_set_psw(env, mask, addr);
277 }
278 
279 static void do_svc_interrupt(CPUS390XState *env)
280 {
281     uint64_t mask, addr;
282     LowCore *lowcore;
283 
284     lowcore = cpu_map_lowcore(env);
285 
286     lowcore->svc_code = cpu_to_be16(env->int_svc_code);
287     lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
288     lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
289     lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
290     mask = be64_to_cpu(lowcore->svc_new_psw.mask);
291     addr = be64_to_cpu(lowcore->svc_new_psw.addr);
292 
293     cpu_unmap_lowcore(lowcore);
294 
295     s390_cpu_set_psw(env, mask, addr);
296 
297     /* When a PER event is pending, the PER exception has to happen
298        immediately after the SERVICE CALL one.  */
299     if (env->per_perc_atmid) {
300         env->int_pgm_code = PGM_PER;
301         env->int_pgm_ilen = env->int_svc_ilen;
302         do_program_interrupt(env);
303     }
304 }
305 
306 #define VIRTIO_SUBCODE_64 0x0D00
307 
308 static void do_ext_interrupt(CPUS390XState *env)
309 {
310     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
311     S390CPU *cpu = env_archcpu(env);
312     uint64_t mask, addr;
313     uint16_t cpu_addr;
314     LowCore *lowcore;
315 
316     if (!(env->psw.mask & PSW_MASK_EXT)) {
317         cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
318     }
319 
320     lowcore = cpu_map_lowcore(env);
321 
322     if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
323         (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
324         MachineState *ms = MACHINE(qdev_get_machine());
325         unsigned int max_cpus = ms->smp.max_cpus;
326 
327         lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
328         cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
329         g_assert(cpu_addr < S390_MAX_CPUS);
330         lowcore->cpu_addr = cpu_to_be16(cpu_addr);
331         clear_bit(cpu_addr, env->emergency_signals);
332         if (bitmap_empty(env->emergency_signals, max_cpus)) {
333             env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
334         }
335     } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
336                (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
337         lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
338         lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
339         env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
340     } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
341                (env->cregs[0] & CR0_CKC_SC)) {
342         lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
343         lowcore->cpu_addr = 0;
344         env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
345     } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
346                (env->cregs[0] & CR0_CPU_TIMER_SC)) {
347         lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
348         lowcore->cpu_addr = 0;
349         env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
350     } else if (qemu_s390_flic_has_service(flic) &&
351                (env->cregs[0] & CR0_SERVICE_SC)) {
352         uint32_t param;
353 
354         param = qemu_s390_flic_dequeue_service(flic);
355         lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
356         lowcore->ext_params = cpu_to_be32(param);
357         lowcore->cpu_addr = 0;
358     } else {
359         g_assert_not_reached();
360     }
361 
362     mask = be64_to_cpu(lowcore->external_new_psw.mask);
363     addr = be64_to_cpu(lowcore->external_new_psw.addr);
364     lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
365     lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
366 
367     cpu_unmap_lowcore(lowcore);
368 
369     s390_cpu_set_psw(env, mask, addr);
370 }
371 
372 static void do_io_interrupt(CPUS390XState *env)
373 {
374     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
375     uint64_t mask, addr;
376     QEMUS390FlicIO *io;
377     LowCore *lowcore;
378 
379     g_assert(env->psw.mask & PSW_MASK_IO);
380     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
381     g_assert(io);
382 
383     lowcore = cpu_map_lowcore(env);
384 
385     lowcore->subchannel_id = cpu_to_be16(io->id);
386     lowcore->subchannel_nr = cpu_to_be16(io->nr);
387     lowcore->io_int_parm = cpu_to_be32(io->parm);
388     lowcore->io_int_word = cpu_to_be32(io->word);
389     lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
390     lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
391     mask = be64_to_cpu(lowcore->io_new_psw.mask);
392     addr = be64_to_cpu(lowcore->io_new_psw.addr);
393 
394     cpu_unmap_lowcore(lowcore);
395     g_free(io);
396 
397     s390_cpu_set_psw(env, mask, addr);
398 }
399 
400 typedef struct MchkExtSaveArea {
401     uint64_t    vregs[32][2];                     /* 0x0000 */
402     uint8_t     pad_0x0200[0x0400 - 0x0200];      /* 0x0200 */
403 } MchkExtSaveArea;
404 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
405 
406 static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
407 {
408     hwaddr len = sizeof(MchkExtSaveArea);
409     MchkExtSaveArea *sa;
410     int i;
411 
412     sa = cpu_physical_memory_map(mcesao, &len, true);
413     if (!sa) {
414         return -EFAULT;
415     }
416     if (len != sizeof(MchkExtSaveArea)) {
417         cpu_physical_memory_unmap(sa, len, 1, 0);
418         return -EFAULT;
419     }
420 
421     for (i = 0; i < 32; i++) {
422         sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
423         sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
424     }
425 
426     cpu_physical_memory_unmap(sa, len, 1, len);
427     return 0;
428 }
429 
430 static void do_mchk_interrupt(CPUS390XState *env)
431 {
432     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
433     uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
434     uint64_t mask, addr, mcesao = 0;
435     LowCore *lowcore;
436     int i;
437 
438     /* for now we only support channel report machine checks (floating) */
439     g_assert(env->psw.mask & PSW_MASK_MCHECK);
440     g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
441 
442     qemu_s390_flic_dequeue_crw_mchk(flic);
443 
444     lowcore = cpu_map_lowcore(env);
445 
446     /* extended save area */
447     if (mcic & MCIC_VB_VR) {
448         /* length and alignment is 1024 bytes */
449         mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
450     }
451 
452     /* try to store vector registers */
453     if (!mcesao || mchk_store_vregs(env, mcesao)) {
454         mcic &= ~MCIC_VB_VR;
455     }
456 
457     /* we are always in z/Architecture mode */
458     lowcore->ar_access_id = 1;
459 
460     for (i = 0; i < 16; i++) {
461         lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
462         lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
463         lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
464         lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
465     }
466     lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
467     lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
468     lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
469     lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
470     lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
471 
472     lowcore->mcic = cpu_to_be64(mcic);
473     lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
474     lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
475     mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
476     addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
477 
478     cpu_unmap_lowcore(lowcore);
479 
480     s390_cpu_set_psw(env, mask, addr);
481 }
482 
483 void s390_cpu_do_interrupt(CPUState *cs)
484 {
485     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
486     S390CPU *cpu = S390_CPU(cs);
487     CPUS390XState *env = &cpu->env;
488     bool stopped = false;
489 
490     qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
491                   __func__, cs->exception_index, env->psw.mask, env->psw.addr);
492 
493 try_deliver:
494     /* handle machine checks */
495     if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
496         cs->exception_index = EXCP_MCHK;
497     }
498     /* handle external interrupts */
499     if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
500         cs->exception_index = EXCP_EXT;
501     }
502     /* handle I/O interrupts */
503     if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
504         cs->exception_index = EXCP_IO;
505     }
506     /* RESTART interrupt */
507     if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
508         cs->exception_index = EXCP_RESTART;
509     }
510     /* STOP interrupt has least priority */
511     if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
512         cs->exception_index = EXCP_STOP;
513     }
514 
515     switch (cs->exception_index) {
516     case EXCP_PGM:
517         do_program_interrupt(env);
518         break;
519     case EXCP_SVC:
520         do_svc_interrupt(env);
521         break;
522     case EXCP_EXT:
523         do_ext_interrupt(env);
524         break;
525     case EXCP_IO:
526         do_io_interrupt(env);
527         break;
528     case EXCP_MCHK:
529         do_mchk_interrupt(env);
530         break;
531     case EXCP_RESTART:
532         do_restart_interrupt(env);
533         break;
534     case EXCP_STOP:
535         do_stop_interrupt(env);
536         stopped = true;
537         break;
538     }
539 
540     if (cs->exception_index != -1 && !stopped) {
541         /* check if there are more pending interrupts to deliver */
542         cs->exception_index = -1;
543         goto try_deliver;
544     }
545     cs->exception_index = -1;
546 
547     /* we might still have pending interrupts, but not deliverable */
548     if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
549         cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
550     }
551 
552     /* WAIT PSW during interrupt injection or STOP interrupt */
553     if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
554         /* don't trigger a cpu_loop_exit(), use an interrupt instead */
555         cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
556     } else if (cs->halted) {
557         /* unhalt if we had a WAIT PSW somewhere in our injection chain */
558         s390_cpu_unhalt(cpu);
559     }
560 }
561 
562 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
563 {
564     if (interrupt_request & CPU_INTERRUPT_HARD) {
565         S390CPU *cpu = S390_CPU(cs);
566         CPUS390XState *env = &cpu->env;
567 
568         if (env->ex_value) {
569             /* Execution of the target insn is indivisible from
570                the parent EXECUTE insn.  */
571             return false;
572         }
573         if (s390_cpu_has_int(cpu)) {
574             s390_cpu_do_interrupt(cs);
575             return true;
576         }
577         if (env->psw.mask & PSW_MASK_WAIT) {
578             /* Woken up because of a floating interrupt but it has already
579              * been delivered. Go back to sleep. */
580             cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
581         }
582     }
583     return false;
584 }
585 
586 void s390x_cpu_debug_excp_handler(CPUState *cs)
587 {
588     S390CPU *cpu = S390_CPU(cs);
589     CPUS390XState *env = &cpu->env;
590     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
591 
592     if (wp_hit && wp_hit->flags & BP_CPU) {
593         /* FIXME: When the storage-alteration-space control bit is set,
594            the exception should only be triggered if the memory access
595            is done using an address space with the storage-alteration-event
596            bit set.  We have no way to detect that with the current
597            watchpoint code.  */
598         cs->watchpoint_hit = NULL;
599 
600         env->per_address = env->psw.addr;
601         env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
602         /* FIXME: We currently no way to detect the address space used
603            to trigger the watchpoint.  For now just consider it is the
604            current default ASC. This turn to be true except when MVCP
605            and MVCS instrutions are not used.  */
606         env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
607 
608         /*
609          * Remove all watchpoints to re-execute the code.  A PER exception
610          * will be triggered, it will call s390_cpu_set_psw which will
611          * recompute the watchpoints.
612          */
613         cpu_watchpoint_remove_all(cs, BP_CPU);
614         cpu_loop_exit_noexc(cs);
615     }
616 }
617 
618 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
619                                    MMUAccessType access_type,
620                                    int mmu_idx, uintptr_t retaddr)
621 {
622     do_unaligned_access(cs, retaddr);
623 }
624 
625 static G_NORETURN
626 void monitor_event(CPUS390XState *env,
627                    uint64_t monitor_code,
628                    uint8_t monitor_class, uintptr_t ra)
629 {
630     /* Store the Monitor Code and the Monitor Class Number into the lowcore */
631     stq_phys(env_cpu(env)->as,
632              env->psa + offsetof(LowCore, monitor_code), monitor_code);
633     stw_phys(env_cpu(env)->as,
634              env->psa + offsetof(LowCore, mon_class_num), monitor_class);
635 
636     tcg_s390_program_interrupt(env, PGM_MONITOR, ra);
637 }
638 
639 void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
640                           uint32_t monitor_class)
641 {
642     g_assert(monitor_class <= 0xff);
643 
644     if (env->cregs[8] & (0x8000 >> monitor_class)) {
645         monitor_event(env, monitor_code, monitor_class, GETPC());
646     }
647 }
648 
649 #endif /* !CONFIG_USER_ONLY */
650