xref: /openbmc/qemu/target/s390x/tcg/excp_helper.c (revision f14eced5)
1 /*
2  * s390x exception / interrupt helpers
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2011 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/log.h"
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "s390x-internal.h"
27 #include "tcg_s390x.h"
28 #ifndef CONFIG_USER_ONLY
29 #include "qemu/timer.h"
30 #include "exec/address-spaces.h"
31 #include "hw/s390x/ioinst.h"
32 #include "hw/s390x/s390_flic.h"
33 #include "hw/boards.h"
34 #endif
35 
36 G_NORETURN void tcg_s390_program_interrupt(CPUS390XState *env,
37                                            uint32_t code, uintptr_t ra)
38 {
39     CPUState *cs = env_cpu(env);
40 
41     cpu_restore_state(cs, ra);
42     qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
43                   env->psw.addr);
44     trigger_pgm_exception(env, code);
45     cpu_loop_exit(cs);
46 }
47 
48 G_NORETURN void tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
49                                         uintptr_t ra)
50 {
51     g_assert(dxc <= 0xff);
52 #if !defined(CONFIG_USER_ONLY)
53     /* Store the DXC into the lowcore */
54     stl_phys(env_cpu(env)->as,
55              env->psa + offsetof(LowCore, data_exc_code), dxc);
56 #endif
57 
58     /* Store the DXC into the FPC if AFP is enabled */
59     if (env->cregs[0] & CR0_AFP) {
60         env->fpc = deposit32(env->fpc, 8, 8, dxc);
61     }
62     tcg_s390_program_interrupt(env, PGM_DATA, ra);
63 }
64 
65 G_NORETURN void tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
66                                           uintptr_t ra)
67 {
68     g_assert(vxc <= 0xff);
69 #if !defined(CONFIG_USER_ONLY)
70     /* Always store the VXC into the lowcore, without AFP it is undefined */
71     stl_phys(env_cpu(env)->as,
72              env->psa + offsetof(LowCore, data_exc_code), vxc);
73 #endif
74 
75     /* Always store the VXC into the FPC, without AFP it is undefined */
76     env->fpc = deposit32(env->fpc, 8, 8, vxc);
77     tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
78 }
79 
80 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
81 {
82     tcg_s390_data_exception(env, dxc, GETPC());
83 }
84 
85 /*
86  * Unaligned accesses are only diagnosed with MO_ALIGN.  At the moment,
87  * this is only for the atomic and relative long operations, for which we want
88  * to raise a specification exception.
89  */
90 static G_NORETURN
91 void do_unaligned_access(CPUState *cs, uintptr_t retaddr)
92 {
93     S390CPU *cpu = S390_CPU(cs);
94     CPUS390XState *env = &cpu->env;
95 
96     tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
97 }
98 
99 #if defined(CONFIG_USER_ONLY)
100 
101 void s390_cpu_do_interrupt(CPUState *cs)
102 {
103     cs->exception_index = -1;
104 }
105 
106 void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
107                              MMUAccessType access_type,
108                              bool maperr, uintptr_t retaddr)
109 {
110     S390CPU *cpu = S390_CPU(cs);
111 
112     trigger_pgm_exception(&cpu->env, maperr ? PGM_ADDRESSING : PGM_PROTECTION);
113     /*
114      * On real machines this value is dropped into LowMem. Since this
115      * is userland, simply put this someplace that cpu_loop can find it.
116      * S390 only gives the page of the fault, not the exact address.
117      * C.f. the construction of TEC in mmu_translate().
118      */
119     cpu->env.__excp_addr = address & TARGET_PAGE_MASK;
120     cpu_loop_exit_restore(cs, retaddr);
121 }
122 
123 void s390_cpu_record_sigbus(CPUState *cs, vaddr address,
124                             MMUAccessType access_type, uintptr_t retaddr)
125 {
126     do_unaligned_access(cs, retaddr);
127 }
128 
129 #else /* !CONFIG_USER_ONLY */
130 
131 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
132 {
133     switch (mmu_idx) {
134     case MMU_PRIMARY_IDX:
135         return PSW_ASC_PRIMARY;
136     case MMU_SECONDARY_IDX:
137         return PSW_ASC_SECONDARY;
138     case MMU_HOME_IDX:
139         return PSW_ASC_HOME;
140     default:
141         abort();
142     }
143 }
144 
145 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
146                        MMUAccessType access_type, int mmu_idx,
147                        bool probe, uintptr_t retaddr)
148 {
149     S390CPU *cpu = S390_CPU(cs);
150     CPUS390XState *env = &cpu->env;
151     target_ulong vaddr, raddr;
152     uint64_t asc, tec;
153     int prot, excp;
154 
155     qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
156                   __func__, address, access_type, mmu_idx);
157 
158     vaddr = address;
159 
160     if (mmu_idx < MMU_REAL_IDX) {
161         asc = cpu_mmu_idx_to_asc(mmu_idx);
162         /* 31-Bit mode */
163         if (!(env->psw.mask & PSW_MASK_64)) {
164             vaddr &= 0x7fffffff;
165         }
166         excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
167     } else if (mmu_idx == MMU_REAL_IDX) {
168         /* 31-Bit mode */
169         if (!(env->psw.mask & PSW_MASK_64)) {
170             vaddr &= 0x7fffffff;
171         }
172         excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
173     } else {
174         g_assert_not_reached();
175     }
176 
177     env->tlb_fill_exc = excp;
178     env->tlb_fill_tec = tec;
179 
180     if (!excp) {
181         qemu_log_mask(CPU_LOG_MMU,
182                       "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
183                       __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
184         tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
185                      mmu_idx, TARGET_PAGE_SIZE);
186         return true;
187     }
188     if (probe) {
189         return false;
190     }
191 
192     /*
193      * For data accesses, ILEN will be filled in from the unwind info,
194      * within cpu_loop_exit_restore.  For code accesses, retaddr == 0,
195      * and so unwinding will not occur.  However, ILEN is also undefined
196      * for that case -- we choose to set ILEN = 2.
197      */
198     env->int_pgm_ilen = 2;
199     trigger_pgm_exception(env, excp);
200     cpu_loop_exit_restore(cs, retaddr);
201 }
202 
203 static void do_program_interrupt(CPUS390XState *env)
204 {
205     uint64_t mask, addr;
206     LowCore *lowcore;
207     int ilen = env->int_pgm_ilen;
208     bool set_trans_exc_code = false;
209     bool advance = false;
210 
211     assert((env->int_pgm_code == PGM_SPECIFICATION && ilen == 0) ||
212            ilen == 2 || ilen == 4 || ilen == 6);
213 
214     switch (env->int_pgm_code) {
215     case PGM_PER:
216         advance = !(env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION);
217         break;
218     case PGM_ASCE_TYPE:
219     case PGM_REG_FIRST_TRANS:
220     case PGM_REG_SEC_TRANS:
221     case PGM_REG_THIRD_TRANS:
222     case PGM_SEGMENT_TRANS:
223     case PGM_PAGE_TRANS:
224         assert(env->int_pgm_code == env->tlb_fill_exc);
225         set_trans_exc_code = true;
226         break;
227     case PGM_PROTECTION:
228         assert(env->int_pgm_code == env->tlb_fill_exc);
229         set_trans_exc_code = true;
230         advance = true;
231         break;
232     case PGM_OPERATION:
233     case PGM_PRIVILEGED:
234     case PGM_EXECUTE:
235     case PGM_ADDRESSING:
236     case PGM_SPECIFICATION:
237     case PGM_DATA:
238     case PGM_FIXPT_OVERFLOW:
239     case PGM_FIXPT_DIVIDE:
240     case PGM_DEC_OVERFLOW:
241     case PGM_DEC_DIVIDE:
242     case PGM_HFP_EXP_OVERFLOW:
243     case PGM_HFP_EXP_UNDERFLOW:
244     case PGM_HFP_SIGNIFICANCE:
245     case PGM_HFP_DIVIDE:
246     case PGM_TRANS_SPEC:
247     case PGM_SPECIAL_OP:
248     case PGM_OPERAND:
249     case PGM_HFP_SQRT:
250     case PGM_PC_TRANS_SPEC:
251     case PGM_ALET_SPEC:
252     case PGM_MONITOR:
253         advance = true;
254         break;
255     }
256 
257     /* advance the PSW if our exception is not nullifying */
258     if (advance) {
259         env->psw.addr += ilen;
260     }
261 
262     qemu_log_mask(CPU_LOG_INT,
263                   "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
264                   __func__, env->int_pgm_code, ilen, env->psw.mask,
265                   env->psw.addr);
266 
267     lowcore = cpu_map_lowcore(env);
268 
269     /* Signal PER events with the exception.  */
270     if (env->per_perc_atmid) {
271         env->int_pgm_code |= PGM_PER;
272         lowcore->per_address = cpu_to_be64(env->per_address);
273         lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
274         env->per_perc_atmid = 0;
275     }
276 
277     if (set_trans_exc_code) {
278         lowcore->trans_exc_code = cpu_to_be64(env->tlb_fill_tec);
279     }
280 
281     lowcore->pgm_ilen = cpu_to_be16(ilen);
282     lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
283     lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
284     lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
285     mask = be64_to_cpu(lowcore->program_new_psw.mask);
286     addr = be64_to_cpu(lowcore->program_new_psw.addr);
287     lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
288 
289     cpu_unmap_lowcore(lowcore);
290 
291     s390_cpu_set_psw(env, mask, addr);
292 }
293 
294 static void do_svc_interrupt(CPUS390XState *env)
295 {
296     uint64_t mask, addr;
297     LowCore *lowcore;
298 
299     lowcore = cpu_map_lowcore(env);
300 
301     lowcore->svc_code = cpu_to_be16(env->int_svc_code);
302     lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
303     lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
304     lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
305     mask = be64_to_cpu(lowcore->svc_new_psw.mask);
306     addr = be64_to_cpu(lowcore->svc_new_psw.addr);
307 
308     cpu_unmap_lowcore(lowcore);
309 
310     s390_cpu_set_psw(env, mask, addr);
311 
312     /* When a PER event is pending, the PER exception has to happen
313        immediately after the SERVICE CALL one.  */
314     if (env->per_perc_atmid) {
315         env->int_pgm_code = PGM_PER;
316         env->int_pgm_ilen = env->int_svc_ilen;
317         do_program_interrupt(env);
318     }
319 }
320 
321 #define VIRTIO_SUBCODE_64 0x0D00
322 
323 static void do_ext_interrupt(CPUS390XState *env)
324 {
325     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
326     S390CPU *cpu = env_archcpu(env);
327     uint64_t mask, addr;
328     uint16_t cpu_addr;
329     LowCore *lowcore;
330 
331     if (!(env->psw.mask & PSW_MASK_EXT)) {
332         cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
333     }
334 
335     lowcore = cpu_map_lowcore(env);
336 
337     if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
338         (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
339         MachineState *ms = MACHINE(qdev_get_machine());
340         unsigned int max_cpus = ms->smp.max_cpus;
341 
342         lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
343         cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
344         g_assert(cpu_addr < S390_MAX_CPUS);
345         lowcore->cpu_addr = cpu_to_be16(cpu_addr);
346         clear_bit(cpu_addr, env->emergency_signals);
347         if (bitmap_empty(env->emergency_signals, max_cpus)) {
348             env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
349         }
350     } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
351                (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
352         lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
353         lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
354         env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
355     } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
356                (env->cregs[0] & CR0_CKC_SC)) {
357         lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
358         lowcore->cpu_addr = 0;
359         env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
360     } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
361                (env->cregs[0] & CR0_CPU_TIMER_SC)) {
362         lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
363         lowcore->cpu_addr = 0;
364         env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
365     } else if (qemu_s390_flic_has_service(flic) &&
366                (env->cregs[0] & CR0_SERVICE_SC)) {
367         uint32_t param;
368 
369         param = qemu_s390_flic_dequeue_service(flic);
370         lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
371         lowcore->ext_params = cpu_to_be32(param);
372         lowcore->cpu_addr = 0;
373     } else {
374         g_assert_not_reached();
375     }
376 
377     mask = be64_to_cpu(lowcore->external_new_psw.mask);
378     addr = be64_to_cpu(lowcore->external_new_psw.addr);
379     lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
380     lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
381 
382     cpu_unmap_lowcore(lowcore);
383 
384     s390_cpu_set_psw(env, mask, addr);
385 }
386 
387 static void do_io_interrupt(CPUS390XState *env)
388 {
389     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
390     uint64_t mask, addr;
391     QEMUS390FlicIO *io;
392     LowCore *lowcore;
393 
394     g_assert(env->psw.mask & PSW_MASK_IO);
395     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
396     g_assert(io);
397 
398     lowcore = cpu_map_lowcore(env);
399 
400     lowcore->subchannel_id = cpu_to_be16(io->id);
401     lowcore->subchannel_nr = cpu_to_be16(io->nr);
402     lowcore->io_int_parm = cpu_to_be32(io->parm);
403     lowcore->io_int_word = cpu_to_be32(io->word);
404     lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
405     lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
406     mask = be64_to_cpu(lowcore->io_new_psw.mask);
407     addr = be64_to_cpu(lowcore->io_new_psw.addr);
408 
409     cpu_unmap_lowcore(lowcore);
410     g_free(io);
411 
412     s390_cpu_set_psw(env, mask, addr);
413 }
414 
415 typedef struct MchkExtSaveArea {
416     uint64_t    vregs[32][2];                     /* 0x0000 */
417     uint8_t     pad_0x0200[0x0400 - 0x0200];      /* 0x0200 */
418 } MchkExtSaveArea;
419 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
420 
421 static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
422 {
423     hwaddr len = sizeof(MchkExtSaveArea);
424     MchkExtSaveArea *sa;
425     int i;
426 
427     sa = cpu_physical_memory_map(mcesao, &len, true);
428     if (!sa) {
429         return -EFAULT;
430     }
431     if (len != sizeof(MchkExtSaveArea)) {
432         cpu_physical_memory_unmap(sa, len, 1, 0);
433         return -EFAULT;
434     }
435 
436     for (i = 0; i < 32; i++) {
437         sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
438         sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
439     }
440 
441     cpu_physical_memory_unmap(sa, len, 1, len);
442     return 0;
443 }
444 
445 static void do_mchk_interrupt(CPUS390XState *env)
446 {
447     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
448     uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
449     uint64_t mask, addr, mcesao = 0;
450     LowCore *lowcore;
451     int i;
452 
453     /* for now we only support channel report machine checks (floating) */
454     g_assert(env->psw.mask & PSW_MASK_MCHECK);
455     g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
456 
457     qemu_s390_flic_dequeue_crw_mchk(flic);
458 
459     lowcore = cpu_map_lowcore(env);
460 
461     /* extended save area */
462     if (mcic & MCIC_VB_VR) {
463         /* length and alignment is 1024 bytes */
464         mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
465     }
466 
467     /* try to store vector registers */
468     if (!mcesao || mchk_store_vregs(env, mcesao)) {
469         mcic &= ~MCIC_VB_VR;
470     }
471 
472     /* we are always in z/Architecture mode */
473     lowcore->ar_access_id = 1;
474 
475     for (i = 0; i < 16; i++) {
476         lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
477         lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
478         lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
479         lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
480     }
481     lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
482     lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
483     lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
484     lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
485     lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
486 
487     lowcore->mcic = cpu_to_be64(mcic);
488     lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
489     lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
490     mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
491     addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
492 
493     cpu_unmap_lowcore(lowcore);
494 
495     s390_cpu_set_psw(env, mask, addr);
496 }
497 
498 void s390_cpu_do_interrupt(CPUState *cs)
499 {
500     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
501     S390CPU *cpu = S390_CPU(cs);
502     CPUS390XState *env = &cpu->env;
503     bool stopped = false;
504 
505     qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
506                   __func__, cs->exception_index, env->psw.mask, env->psw.addr);
507 
508 try_deliver:
509     /* handle machine checks */
510     if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
511         cs->exception_index = EXCP_MCHK;
512     }
513     /* handle external interrupts */
514     if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
515         cs->exception_index = EXCP_EXT;
516     }
517     /* handle I/O interrupts */
518     if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
519         cs->exception_index = EXCP_IO;
520     }
521     /* RESTART interrupt */
522     if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
523         cs->exception_index = EXCP_RESTART;
524     }
525     /* STOP interrupt has least priority */
526     if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
527         cs->exception_index = EXCP_STOP;
528     }
529 
530     switch (cs->exception_index) {
531     case EXCP_PGM:
532         do_program_interrupt(env);
533         break;
534     case EXCP_SVC:
535         do_svc_interrupt(env);
536         break;
537     case EXCP_EXT:
538         do_ext_interrupt(env);
539         break;
540     case EXCP_IO:
541         do_io_interrupt(env);
542         break;
543     case EXCP_MCHK:
544         do_mchk_interrupt(env);
545         break;
546     case EXCP_RESTART:
547         do_restart_interrupt(env);
548         break;
549     case EXCP_STOP:
550         do_stop_interrupt(env);
551         stopped = true;
552         break;
553     }
554 
555     if (cs->exception_index != -1 && !stopped) {
556         /* check if there are more pending interrupts to deliver */
557         cs->exception_index = -1;
558         goto try_deliver;
559     }
560     cs->exception_index = -1;
561 
562     /* we might still have pending interrupts, but not deliverable */
563     if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
564         cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
565     }
566 
567     /* WAIT PSW during interrupt injection or STOP interrupt */
568     if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
569         /* don't trigger a cpu_loop_exit(), use an interrupt instead */
570         cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
571     } else if (cs->halted) {
572         /* unhalt if we had a WAIT PSW somewhere in our injection chain */
573         s390_cpu_unhalt(cpu);
574     }
575 }
576 
577 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
578 {
579     if (interrupt_request & CPU_INTERRUPT_HARD) {
580         S390CPU *cpu = S390_CPU(cs);
581         CPUS390XState *env = &cpu->env;
582 
583         if (env->ex_value) {
584             /* Execution of the target insn is indivisible from
585                the parent EXECUTE insn.  */
586             return false;
587         }
588         if (s390_cpu_has_int(cpu)) {
589             s390_cpu_do_interrupt(cs);
590             return true;
591         }
592         if (env->psw.mask & PSW_MASK_WAIT) {
593             /* Woken up because of a floating interrupt but it has already
594              * been delivered. Go back to sleep. */
595             cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
596         }
597     }
598     return false;
599 }
600 
601 void s390x_cpu_debug_excp_handler(CPUState *cs)
602 {
603     S390CPU *cpu = S390_CPU(cs);
604     CPUS390XState *env = &cpu->env;
605     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
606 
607     if (wp_hit && wp_hit->flags & BP_CPU) {
608         /* FIXME: When the storage-alteration-space control bit is set,
609            the exception should only be triggered if the memory access
610            is done using an address space with the storage-alteration-event
611            bit set.  We have no way to detect that with the current
612            watchpoint code.  */
613         cs->watchpoint_hit = NULL;
614 
615         env->per_address = env->psw.addr;
616         env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
617         /* FIXME: We currently no way to detect the address space used
618            to trigger the watchpoint.  For now just consider it is the
619            current default ASC. This turn to be true except when MVCP
620            and MVCS instrutions are not used.  */
621         env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
622 
623         /*
624          * Remove all watchpoints to re-execute the code.  A PER exception
625          * will be triggered, it will call s390_cpu_set_psw which will
626          * recompute the watchpoints.
627          */
628         cpu_watchpoint_remove_all(cs, BP_CPU);
629         cpu_loop_exit_noexc(cs);
630     }
631 }
632 
633 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
634                                    MMUAccessType access_type,
635                                    int mmu_idx, uintptr_t retaddr)
636 {
637     do_unaligned_access(cs, retaddr);
638 }
639 
640 static G_NORETURN
641 void monitor_event(CPUS390XState *env,
642                    uint64_t monitor_code,
643                    uint8_t monitor_class, uintptr_t ra)
644 {
645     /* Store the Monitor Code and the Monitor Class Number into the lowcore */
646     stq_phys(env_cpu(env)->as,
647              env->psa + offsetof(LowCore, monitor_code), monitor_code);
648     stw_phys(env_cpu(env)->as,
649              env->psa + offsetof(LowCore, mon_class_num), monitor_class);
650 
651     tcg_s390_program_interrupt(env, PGM_MONITOR, ra);
652 }
653 
654 void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
655                           uint32_t monitor_class)
656 {
657     g_assert(monitor_class <= 0xf);
658 
659     if (env->cregs[8] & (0x8000 >> monitor_class)) {
660         monitor_event(env, monitor_code, monitor_class, GETPC());
661     }
662 }
663 
664 #endif /* !CONFIG_USER_ONLY */
665