xref: /openbmc/qemu/target/s390x/tcg/excp_helper.c (revision 19ac7b29)
1 /*
2  * s390x exception / interrupt helpers
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2011 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/log.h"
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "s390x-internal.h"
28 #include "tcg_s390x.h"
29 #ifndef CONFIG_USER_ONLY
30 #include "qemu/timer.h"
31 #include "exec/address-spaces.h"
32 #include "hw/s390x/ioinst.h"
33 #include "hw/s390x/s390_flic.h"
34 #include "hw/boards.h"
35 #endif
36 
37 G_NORETURN void tcg_s390_program_interrupt(CPUS390XState *env,
38                                            uint32_t code, uintptr_t ra)
39 {
40     CPUState *cs = env_cpu(env);
41 
42     cpu_restore_state(cs, ra);
43     qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
44                   env->psw.addr);
45     trigger_pgm_exception(env, code);
46     cpu_loop_exit(cs);
47 }
48 
49 G_NORETURN void tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
50                                         uintptr_t ra)
51 {
52     g_assert(dxc <= 0xff);
53 #if !defined(CONFIG_USER_ONLY)
54     /* Store the DXC into the lowcore */
55     stl_phys(env_cpu(env)->as,
56              env->psa + offsetof(LowCore, data_exc_code), dxc);
57 #endif
58 
59     /* Store the DXC into the FPC if AFP is enabled */
60     if (env->cregs[0] & CR0_AFP) {
61         env->fpc = deposit32(env->fpc, 8, 8, dxc);
62     }
63     tcg_s390_program_interrupt(env, PGM_DATA, ra);
64 }
65 
66 G_NORETURN void tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
67                                           uintptr_t ra)
68 {
69     g_assert(vxc <= 0xff);
70 #if !defined(CONFIG_USER_ONLY)
71     /* Always store the VXC into the lowcore, without AFP it is undefined */
72     stl_phys(env_cpu(env)->as,
73              env->psa + offsetof(LowCore, data_exc_code), vxc);
74 #endif
75 
76     /* Always store the VXC into the FPC, without AFP it is undefined */
77     env->fpc = deposit32(env->fpc, 8, 8, vxc);
78     tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
79 }
80 
81 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
82 {
83     tcg_s390_data_exception(env, dxc, GETPC());
84 }
85 
86 /*
87  * Unaligned accesses are only diagnosed with MO_ALIGN.  At the moment,
88  * this is only for the atomic and relative long operations, for which we want
89  * to raise a specification exception.
90  */
91 static G_NORETURN
92 void do_unaligned_access(CPUState *cs, uintptr_t retaddr)
93 {
94     S390CPU *cpu = S390_CPU(cs);
95     CPUS390XState *env = &cpu->env;
96 
97     tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
98 }
99 
100 #if defined(CONFIG_USER_ONLY)
101 
102 void s390_cpu_do_interrupt(CPUState *cs)
103 {
104     cs->exception_index = -1;
105 }
106 
107 void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
108                              MMUAccessType access_type,
109                              bool maperr, uintptr_t retaddr)
110 {
111     S390CPU *cpu = S390_CPU(cs);
112 
113     trigger_pgm_exception(&cpu->env, maperr ? PGM_ADDRESSING : PGM_PROTECTION);
114     /*
115      * On real machines this value is dropped into LowMem. Since this
116      * is userland, simply put this someplace that cpu_loop can find it.
117      * S390 only gives the page of the fault, not the exact address.
118      * C.f. the construction of TEC in mmu_translate().
119      */
120     cpu->env.__excp_addr = address & TARGET_PAGE_MASK;
121     cpu_loop_exit_restore(cs, retaddr);
122 }
123 
124 void s390_cpu_record_sigbus(CPUState *cs, vaddr address,
125                             MMUAccessType access_type, uintptr_t retaddr)
126 {
127     do_unaligned_access(cs, retaddr);
128 }
129 
130 #else /* !CONFIG_USER_ONLY */
131 
132 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
133 {
134     switch (mmu_idx) {
135     case MMU_PRIMARY_IDX:
136         return PSW_ASC_PRIMARY;
137     case MMU_SECONDARY_IDX:
138         return PSW_ASC_SECONDARY;
139     case MMU_HOME_IDX:
140         return PSW_ASC_HOME;
141     default:
142         abort();
143     }
144 }
145 
146 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
147                        MMUAccessType access_type, int mmu_idx,
148                        bool probe, uintptr_t retaddr)
149 {
150     S390CPU *cpu = S390_CPU(cs);
151     CPUS390XState *env = &cpu->env;
152     target_ulong vaddr, raddr;
153     uint64_t asc, tec;
154     int prot, excp;
155 
156     qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
157                   __func__, address, access_type, mmu_idx);
158 
159     vaddr = address;
160 
161     if (mmu_idx < MMU_REAL_IDX) {
162         asc = cpu_mmu_idx_to_asc(mmu_idx);
163         /* 31-Bit mode */
164         if (!(env->psw.mask & PSW_MASK_64)) {
165             vaddr &= 0x7fffffff;
166         }
167         excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
168     } else if (mmu_idx == MMU_REAL_IDX) {
169         /* 31-Bit mode */
170         if (!(env->psw.mask & PSW_MASK_64)) {
171             vaddr &= 0x7fffffff;
172         }
173         excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
174     } else {
175         g_assert_not_reached();
176     }
177 
178     env->tlb_fill_exc = excp;
179     env->tlb_fill_tec = tec;
180 
181     if (!excp) {
182         qemu_log_mask(CPU_LOG_MMU,
183                       "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
184                       __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
185         tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
186                      mmu_idx, TARGET_PAGE_SIZE);
187         return true;
188     }
189     if (probe) {
190         return false;
191     }
192 
193     /*
194      * For data accesses, ILEN will be filled in from the unwind info,
195      * within cpu_loop_exit_restore.  For code accesses, retaddr == 0,
196      * and so unwinding will not occur.  However, ILEN is also undefined
197      * for that case -- we choose to set ILEN = 2.
198      */
199     env->int_pgm_ilen = 2;
200     trigger_pgm_exception(env, excp);
201     cpu_loop_exit_restore(cs, retaddr);
202 }
203 
204 static void do_program_interrupt(CPUS390XState *env)
205 {
206     uint64_t mask, addr;
207     LowCore *lowcore;
208     int ilen = env->int_pgm_ilen;
209     bool set_trans_exc_code = false;
210     bool advance = false;
211 
212     assert((env->int_pgm_code == PGM_SPECIFICATION && ilen == 0) ||
213            ilen == 2 || ilen == 4 || ilen == 6);
214 
215     switch (env->int_pgm_code) {
216     case PGM_PER:
217         advance = !(env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION);
218         break;
219     case PGM_ASCE_TYPE:
220     case PGM_REG_FIRST_TRANS:
221     case PGM_REG_SEC_TRANS:
222     case PGM_REG_THIRD_TRANS:
223     case PGM_SEGMENT_TRANS:
224     case PGM_PAGE_TRANS:
225         assert(env->int_pgm_code == env->tlb_fill_exc);
226         set_trans_exc_code = true;
227         break;
228     case PGM_PROTECTION:
229         assert(env->int_pgm_code == env->tlb_fill_exc);
230         set_trans_exc_code = true;
231         advance = true;
232         break;
233     case PGM_OPERATION:
234     case PGM_PRIVILEGED:
235     case PGM_EXECUTE:
236     case PGM_ADDRESSING:
237     case PGM_SPECIFICATION:
238     case PGM_DATA:
239     case PGM_FIXPT_OVERFLOW:
240     case PGM_FIXPT_DIVIDE:
241     case PGM_DEC_OVERFLOW:
242     case PGM_DEC_DIVIDE:
243     case PGM_HFP_EXP_OVERFLOW:
244     case PGM_HFP_EXP_UNDERFLOW:
245     case PGM_HFP_SIGNIFICANCE:
246     case PGM_HFP_DIVIDE:
247     case PGM_TRANS_SPEC:
248     case PGM_SPECIAL_OP:
249     case PGM_OPERAND:
250     case PGM_HFP_SQRT:
251     case PGM_PC_TRANS_SPEC:
252     case PGM_ALET_SPEC:
253     case PGM_MONITOR:
254         advance = true;
255         break;
256     }
257 
258     /* advance the PSW if our exception is not nullifying */
259     if (advance) {
260         env->psw.addr += ilen;
261     }
262 
263     qemu_log_mask(CPU_LOG_INT,
264                   "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
265                   __func__, env->int_pgm_code, ilen, env->psw.mask,
266                   env->psw.addr);
267 
268     lowcore = cpu_map_lowcore(env);
269 
270     /* Signal PER events with the exception.  */
271     if (env->per_perc_atmid) {
272         env->int_pgm_code |= PGM_PER;
273         lowcore->per_address = cpu_to_be64(env->per_address);
274         lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
275         env->per_perc_atmid = 0;
276     }
277 
278     if (set_trans_exc_code) {
279         lowcore->trans_exc_code = cpu_to_be64(env->tlb_fill_tec);
280     }
281 
282     lowcore->pgm_ilen = cpu_to_be16(ilen);
283     lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
284     lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
285     lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
286     mask = be64_to_cpu(lowcore->program_new_psw.mask);
287     addr = be64_to_cpu(lowcore->program_new_psw.addr);
288     lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
289 
290     cpu_unmap_lowcore(lowcore);
291 
292     s390_cpu_set_psw(env, mask, addr);
293 }
294 
295 static void do_svc_interrupt(CPUS390XState *env)
296 {
297     uint64_t mask, addr;
298     LowCore *lowcore;
299 
300     lowcore = cpu_map_lowcore(env);
301 
302     lowcore->svc_code = cpu_to_be16(env->int_svc_code);
303     lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
304     lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
305     lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
306     mask = be64_to_cpu(lowcore->svc_new_psw.mask);
307     addr = be64_to_cpu(lowcore->svc_new_psw.addr);
308 
309     cpu_unmap_lowcore(lowcore);
310 
311     s390_cpu_set_psw(env, mask, addr);
312 
313     /* When a PER event is pending, the PER exception has to happen
314        immediately after the SERVICE CALL one.  */
315     if (env->per_perc_atmid) {
316         env->int_pgm_code = PGM_PER;
317         env->int_pgm_ilen = env->int_svc_ilen;
318         do_program_interrupt(env);
319     }
320 }
321 
322 #define VIRTIO_SUBCODE_64 0x0D00
323 
324 static void do_ext_interrupt(CPUS390XState *env)
325 {
326     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
327     S390CPU *cpu = env_archcpu(env);
328     uint64_t mask, addr;
329     uint16_t cpu_addr;
330     LowCore *lowcore;
331 
332     if (!(env->psw.mask & PSW_MASK_EXT)) {
333         cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
334     }
335 
336     lowcore = cpu_map_lowcore(env);
337 
338     if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
339         (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
340         MachineState *ms = MACHINE(qdev_get_machine());
341         unsigned int max_cpus = ms->smp.max_cpus;
342 
343         lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
344         cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
345         g_assert(cpu_addr < S390_MAX_CPUS);
346         lowcore->cpu_addr = cpu_to_be16(cpu_addr);
347         clear_bit(cpu_addr, env->emergency_signals);
348         if (bitmap_empty(env->emergency_signals, max_cpus)) {
349             env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
350         }
351     } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
352                (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
353         lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
354         lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
355         env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
356     } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
357                (env->cregs[0] & CR0_CKC_SC)) {
358         lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
359         lowcore->cpu_addr = 0;
360         env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
361     } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
362                (env->cregs[0] & CR0_CPU_TIMER_SC)) {
363         lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
364         lowcore->cpu_addr = 0;
365         env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
366     } else if (qemu_s390_flic_has_service(flic) &&
367                (env->cregs[0] & CR0_SERVICE_SC)) {
368         uint32_t param;
369 
370         param = qemu_s390_flic_dequeue_service(flic);
371         lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
372         lowcore->ext_params = cpu_to_be32(param);
373         lowcore->cpu_addr = 0;
374     } else {
375         g_assert_not_reached();
376     }
377 
378     mask = be64_to_cpu(lowcore->external_new_psw.mask);
379     addr = be64_to_cpu(lowcore->external_new_psw.addr);
380     lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
381     lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
382 
383     cpu_unmap_lowcore(lowcore);
384 
385     s390_cpu_set_psw(env, mask, addr);
386 }
387 
388 static void do_io_interrupt(CPUS390XState *env)
389 {
390     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
391     uint64_t mask, addr;
392     QEMUS390FlicIO *io;
393     LowCore *lowcore;
394 
395     g_assert(env->psw.mask & PSW_MASK_IO);
396     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
397     g_assert(io);
398 
399     lowcore = cpu_map_lowcore(env);
400 
401     lowcore->subchannel_id = cpu_to_be16(io->id);
402     lowcore->subchannel_nr = cpu_to_be16(io->nr);
403     lowcore->io_int_parm = cpu_to_be32(io->parm);
404     lowcore->io_int_word = cpu_to_be32(io->word);
405     lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
406     lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
407     mask = be64_to_cpu(lowcore->io_new_psw.mask);
408     addr = be64_to_cpu(lowcore->io_new_psw.addr);
409 
410     cpu_unmap_lowcore(lowcore);
411     g_free(io);
412 
413     s390_cpu_set_psw(env, mask, addr);
414 }
415 
416 typedef struct MchkExtSaveArea {
417     uint64_t    vregs[32][2];                     /* 0x0000 */
418     uint8_t     pad_0x0200[0x0400 - 0x0200];      /* 0x0200 */
419 } MchkExtSaveArea;
420 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
421 
422 static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
423 {
424     hwaddr len = sizeof(MchkExtSaveArea);
425     MchkExtSaveArea *sa;
426     int i;
427 
428     sa = cpu_physical_memory_map(mcesao, &len, true);
429     if (!sa) {
430         return -EFAULT;
431     }
432     if (len != sizeof(MchkExtSaveArea)) {
433         cpu_physical_memory_unmap(sa, len, 1, 0);
434         return -EFAULT;
435     }
436 
437     for (i = 0; i < 32; i++) {
438         sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
439         sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
440     }
441 
442     cpu_physical_memory_unmap(sa, len, 1, len);
443     return 0;
444 }
445 
446 static void do_mchk_interrupt(CPUS390XState *env)
447 {
448     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
449     uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
450     uint64_t mask, addr, mcesao = 0;
451     LowCore *lowcore;
452     int i;
453 
454     /* for now we only support channel report machine checks (floating) */
455     g_assert(env->psw.mask & PSW_MASK_MCHECK);
456     g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
457 
458     qemu_s390_flic_dequeue_crw_mchk(flic);
459 
460     lowcore = cpu_map_lowcore(env);
461 
462     /* extended save area */
463     if (mcic & MCIC_VB_VR) {
464         /* length and alignment is 1024 bytes */
465         mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
466     }
467 
468     /* try to store vector registers */
469     if (!mcesao || mchk_store_vregs(env, mcesao)) {
470         mcic &= ~MCIC_VB_VR;
471     }
472 
473     /* we are always in z/Architecture mode */
474     lowcore->ar_access_id = 1;
475 
476     for (i = 0; i < 16; i++) {
477         lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
478         lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
479         lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
480         lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
481     }
482     lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
483     lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
484     lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
485     lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
486     lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
487 
488     lowcore->mcic = cpu_to_be64(mcic);
489     lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
490     lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
491     mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
492     addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
493 
494     cpu_unmap_lowcore(lowcore);
495 
496     s390_cpu_set_psw(env, mask, addr);
497 }
498 
499 void s390_cpu_do_interrupt(CPUState *cs)
500 {
501     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
502     S390CPU *cpu = S390_CPU(cs);
503     CPUS390XState *env = &cpu->env;
504     bool stopped = false;
505 
506     qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
507                   __func__, cs->exception_index, env->psw.mask, env->psw.addr);
508 
509 try_deliver:
510     /* handle machine checks */
511     if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
512         cs->exception_index = EXCP_MCHK;
513     }
514     /* handle external interrupts */
515     if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
516         cs->exception_index = EXCP_EXT;
517     }
518     /* handle I/O interrupts */
519     if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
520         cs->exception_index = EXCP_IO;
521     }
522     /* RESTART interrupt */
523     if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
524         cs->exception_index = EXCP_RESTART;
525     }
526     /* STOP interrupt has least priority */
527     if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
528         cs->exception_index = EXCP_STOP;
529     }
530 
531     switch (cs->exception_index) {
532     case EXCP_PGM:
533         do_program_interrupt(env);
534         break;
535     case EXCP_SVC:
536         do_svc_interrupt(env);
537         break;
538     case EXCP_EXT:
539         do_ext_interrupt(env);
540         break;
541     case EXCP_IO:
542         do_io_interrupt(env);
543         break;
544     case EXCP_MCHK:
545         do_mchk_interrupt(env);
546         break;
547     case EXCP_RESTART:
548         do_restart_interrupt(env);
549         break;
550     case EXCP_STOP:
551         do_stop_interrupt(env);
552         stopped = true;
553         break;
554     }
555 
556     if (cs->exception_index != -1 && !stopped) {
557         /* check if there are more pending interrupts to deliver */
558         cs->exception_index = -1;
559         goto try_deliver;
560     }
561     cs->exception_index = -1;
562 
563     /* we might still have pending interrupts, but not deliverable */
564     if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
565         cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
566     }
567 
568     /* WAIT PSW during interrupt injection or STOP interrupt */
569     if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
570         /* don't trigger a cpu_loop_exit(), use an interrupt instead */
571         cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
572     } else if (cs->halted) {
573         /* unhalt if we had a WAIT PSW somewhere in our injection chain */
574         s390_cpu_unhalt(cpu);
575     }
576 }
577 
578 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
579 {
580     if (interrupt_request & CPU_INTERRUPT_HARD) {
581         S390CPU *cpu = S390_CPU(cs);
582         CPUS390XState *env = &cpu->env;
583 
584         if (env->ex_value) {
585             /* Execution of the target insn is indivisible from
586                the parent EXECUTE insn.  */
587             return false;
588         }
589         if (s390_cpu_has_int(cpu)) {
590             s390_cpu_do_interrupt(cs);
591             return true;
592         }
593         if (env->psw.mask & PSW_MASK_WAIT) {
594             /* Woken up because of a floating interrupt but it has already
595              * been delivered. Go back to sleep. */
596             cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
597         }
598     }
599     return false;
600 }
601 
602 void s390x_cpu_debug_excp_handler(CPUState *cs)
603 {
604     S390CPU *cpu = S390_CPU(cs);
605     CPUS390XState *env = &cpu->env;
606     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
607 
608     if (wp_hit && wp_hit->flags & BP_CPU) {
609         /* FIXME: When the storage-alteration-space control bit is set,
610            the exception should only be triggered if the memory access
611            is done using an address space with the storage-alteration-event
612            bit set.  We have no way to detect that with the current
613            watchpoint code.  */
614         cs->watchpoint_hit = NULL;
615 
616         env->per_address = env->psw.addr;
617         env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
618         /* FIXME: We currently no way to detect the address space used
619            to trigger the watchpoint.  For now just consider it is the
620            current default ASC. This turn to be true except when MVCP
621            and MVCS instrutions are not used.  */
622         env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
623 
624         /*
625          * Remove all watchpoints to re-execute the code.  A PER exception
626          * will be triggered, it will call s390_cpu_set_psw which will
627          * recompute the watchpoints.
628          */
629         cpu_watchpoint_remove_all(cs, BP_CPU);
630         cpu_loop_exit_noexc(cs);
631     }
632 }
633 
634 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
635                                    MMUAccessType access_type,
636                                    int mmu_idx, uintptr_t retaddr)
637 {
638     do_unaligned_access(cs, retaddr);
639 }
640 
641 static G_NORETURN
642 void monitor_event(CPUS390XState *env,
643                    uint64_t monitor_code,
644                    uint8_t monitor_class, uintptr_t ra)
645 {
646     /* Store the Monitor Code and the Monitor Class Number into the lowcore */
647     stq_phys(env_cpu(env)->as,
648              env->psa + offsetof(LowCore, monitor_code), monitor_code);
649     stw_phys(env_cpu(env)->as,
650              env->psa + offsetof(LowCore, mon_class_num), monitor_class);
651 
652     tcg_s390_program_interrupt(env, PGM_MONITOR, ra);
653 }
654 
655 void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
656                           uint32_t monitor_class)
657 {
658     g_assert(monitor_class <= 0xf);
659 
660     if (env->cregs[8] & (0x8000 >> monitor_class)) {
661         monitor_event(env, monitor_code, monitor_class, GETPC());
662     }
663 }
664 
665 #endif /* !CONFIG_USER_ONLY */
666