xref: /openbmc/qemu/target/s390x/tcg/excp_helper.c (revision e9206163)
1 /*
2  * s390x exception / interrupt helpers
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2011 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/log.h"
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "s390x-internal.h"
27 #include "tcg_s390x.h"
28 #ifndef CONFIG_USER_ONLY
29 #include "qemu/timer.h"
30 #include "exec/address-spaces.h"
31 #include "hw/s390x/ioinst.h"
32 #include "hw/s390x/s390_flic.h"
33 #include "hw/boards.h"
34 #endif
35 
36 G_NORETURN void tcg_s390_program_interrupt(CPUS390XState *env,
37                                            uint32_t code, uintptr_t ra)
38 {
39     CPUState *cs = env_cpu(env);
40 
41     cpu_restore_state(cs, ra);
42     qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
43                   env->psw.addr);
44     trigger_pgm_exception(env, code);
45     cpu_loop_exit(cs);
46 }
47 
48 G_NORETURN void tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
49                                         uintptr_t ra)
50 {
51     g_assert(dxc <= 0xff);
52 #if !defined(CONFIG_USER_ONLY)
53     /* Store the DXC into the lowcore */
54     stl_phys(env_cpu(env)->as,
55              env->psa + offsetof(LowCore, data_exc_code), dxc);
56 #endif
57 
58     /* Store the DXC into the FPC if AFP is enabled */
59     if (env->cregs[0] & CR0_AFP) {
60         env->fpc = deposit32(env->fpc, 8, 8, dxc);
61     }
62     tcg_s390_program_interrupt(env, PGM_DATA, ra);
63 }
64 
65 G_NORETURN void tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
66                                           uintptr_t ra)
67 {
68     g_assert(vxc <= 0xff);
69 #if !defined(CONFIG_USER_ONLY)
70     /* Always store the VXC into the lowcore, without AFP it is undefined */
71     stl_phys(env_cpu(env)->as,
72              env->psa + offsetof(LowCore, data_exc_code), vxc);
73 #endif
74 
75     /* Always store the VXC into the FPC, without AFP it is undefined */
76     env->fpc = deposit32(env->fpc, 8, 8, vxc);
77     tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
78 }
79 
80 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
81 {
82     tcg_s390_data_exception(env, dxc, GETPC());
83 }
84 
85 /*
86  * Unaligned accesses are only diagnosed with MO_ALIGN.  At the moment,
87  * this is only for the atomic and relative long operations, for which we want
88  * to raise a specification exception.
89  */
90 static G_NORETURN
91 void do_unaligned_access(CPUState *cs, uintptr_t retaddr)
92 {
93     tcg_s390_program_interrupt(cpu_env(cs), PGM_SPECIFICATION, retaddr);
94 }
95 
96 #if defined(CONFIG_USER_ONLY)
97 
98 void s390_cpu_do_interrupt(CPUState *cs)
99 {
100     cs->exception_index = -1;
101 }
102 
103 void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
104                              MMUAccessType access_type,
105                              bool maperr, uintptr_t retaddr)
106 {
107     S390CPU *cpu = S390_CPU(cs);
108 
109     trigger_pgm_exception(&cpu->env, maperr ? PGM_ADDRESSING : PGM_PROTECTION);
110     /*
111      * On real machines this value is dropped into LowMem. Since this
112      * is userland, simply put this someplace that cpu_loop can find it.
113      * S390 only gives the page of the fault, not the exact address.
114      * C.f. the construction of TEC in mmu_translate().
115      */
116     cpu->env.__excp_addr = address & TARGET_PAGE_MASK;
117     cpu_loop_exit_restore(cs, retaddr);
118 }
119 
120 void s390_cpu_record_sigbus(CPUState *cs, vaddr address,
121                             MMUAccessType access_type, uintptr_t retaddr)
122 {
123     do_unaligned_access(cs, retaddr);
124 }
125 
126 #else /* !CONFIG_USER_ONLY */
127 
128 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
129 {
130     switch (mmu_idx) {
131     case MMU_PRIMARY_IDX:
132         return PSW_ASC_PRIMARY;
133     case MMU_SECONDARY_IDX:
134         return PSW_ASC_SECONDARY;
135     case MMU_HOME_IDX:
136         return PSW_ASC_HOME;
137     default:
138         abort();
139     }
140 }
141 
142 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
143                        MMUAccessType access_type, int mmu_idx,
144                        bool probe, uintptr_t retaddr)
145 {
146     CPUS390XState *env = cpu_env(cs);
147     target_ulong vaddr, raddr;
148     uint64_t asc, tec;
149     int prot, excp;
150 
151     qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
152                   __func__, address, access_type, mmu_idx);
153 
154     vaddr = address;
155 
156     if (mmu_idx < MMU_REAL_IDX) {
157         asc = cpu_mmu_idx_to_asc(mmu_idx);
158         /* 31-Bit mode */
159         if (!(env->psw.mask & PSW_MASK_64)) {
160             vaddr &= 0x7fffffff;
161         }
162         excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
163     } else if (mmu_idx == MMU_REAL_IDX) {
164         /* 31-Bit mode */
165         if (!(env->psw.mask & PSW_MASK_64)) {
166             vaddr &= 0x7fffffff;
167         }
168         excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
169     } else {
170         g_assert_not_reached();
171     }
172 
173     env->tlb_fill_exc = excp;
174     env->tlb_fill_tec = tec;
175 
176     if (!excp) {
177         qemu_log_mask(CPU_LOG_MMU,
178                       "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
179                       __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
180         tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
181                      mmu_idx, TARGET_PAGE_SIZE);
182         return true;
183     }
184     if (probe) {
185         return false;
186     }
187 
188     /*
189      * For data accesses, ILEN will be filled in from the unwind info,
190      * within cpu_loop_exit_restore.  For code accesses, retaddr == 0,
191      * and so unwinding will not occur.  However, ILEN is also undefined
192      * for that case -- we choose to set ILEN = 2.
193      */
194     env->int_pgm_ilen = 2;
195     trigger_pgm_exception(env, excp);
196     cpu_loop_exit_restore(cs, retaddr);
197 }
198 
199 static void do_program_interrupt(CPUS390XState *env)
200 {
201     uint64_t mask, addr;
202     LowCore *lowcore;
203     int ilen = env->int_pgm_ilen;
204     bool set_trans_exc_code = false;
205     bool advance = false;
206 
207     assert((env->int_pgm_code == PGM_SPECIFICATION && ilen == 0) ||
208            ilen == 2 || ilen == 4 || ilen == 6);
209 
210     switch (env->int_pgm_code) {
211     case PGM_PER:
212         advance = !(env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION);
213         break;
214     case PGM_ASCE_TYPE:
215     case PGM_REG_FIRST_TRANS:
216     case PGM_REG_SEC_TRANS:
217     case PGM_REG_THIRD_TRANS:
218     case PGM_SEGMENT_TRANS:
219     case PGM_PAGE_TRANS:
220         assert(env->int_pgm_code == env->tlb_fill_exc);
221         set_trans_exc_code = true;
222         break;
223     case PGM_PROTECTION:
224         assert(env->int_pgm_code == env->tlb_fill_exc);
225         set_trans_exc_code = true;
226         advance = true;
227         break;
228     case PGM_OPERATION:
229     case PGM_PRIVILEGED:
230     case PGM_EXECUTE:
231     case PGM_ADDRESSING:
232     case PGM_SPECIFICATION:
233     case PGM_DATA:
234     case PGM_FIXPT_OVERFLOW:
235     case PGM_FIXPT_DIVIDE:
236     case PGM_DEC_OVERFLOW:
237     case PGM_DEC_DIVIDE:
238     case PGM_HFP_EXP_OVERFLOW:
239     case PGM_HFP_EXP_UNDERFLOW:
240     case PGM_HFP_SIGNIFICANCE:
241     case PGM_HFP_DIVIDE:
242     case PGM_TRANS_SPEC:
243     case PGM_SPECIAL_OP:
244     case PGM_OPERAND:
245     case PGM_HFP_SQRT:
246     case PGM_PC_TRANS_SPEC:
247     case PGM_ALET_SPEC:
248     case PGM_MONITOR:
249         advance = true;
250         break;
251     }
252 
253     /* advance the PSW if our exception is not nullifying */
254     if (advance) {
255         env->psw.addr += ilen;
256     }
257 
258     qemu_log_mask(CPU_LOG_INT,
259                   "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
260                   __func__, env->int_pgm_code, ilen, env->psw.mask,
261                   env->psw.addr);
262 
263     lowcore = cpu_map_lowcore(env);
264 
265     /* Signal PER events with the exception.  */
266     if (env->per_perc_atmid) {
267         env->int_pgm_code |= PGM_PER;
268         lowcore->per_address = cpu_to_be64(env->per_address);
269         lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
270         env->per_perc_atmid = 0;
271     }
272 
273     if (set_trans_exc_code) {
274         lowcore->trans_exc_code = cpu_to_be64(env->tlb_fill_tec);
275     }
276 
277     lowcore->pgm_ilen = cpu_to_be16(ilen);
278     lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
279     lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
280     lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
281     mask = be64_to_cpu(lowcore->program_new_psw.mask);
282     addr = be64_to_cpu(lowcore->program_new_psw.addr);
283     lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
284 
285     cpu_unmap_lowcore(lowcore);
286 
287     s390_cpu_set_psw(env, mask, addr);
288 }
289 
290 static void do_svc_interrupt(CPUS390XState *env)
291 {
292     uint64_t mask, addr;
293     LowCore *lowcore;
294 
295     lowcore = cpu_map_lowcore(env);
296 
297     lowcore->svc_code = cpu_to_be16(env->int_svc_code);
298     lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
299     lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
300     lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
301     mask = be64_to_cpu(lowcore->svc_new_psw.mask);
302     addr = be64_to_cpu(lowcore->svc_new_psw.addr);
303 
304     cpu_unmap_lowcore(lowcore);
305 
306     s390_cpu_set_psw(env, mask, addr);
307 
308     /* When a PER event is pending, the PER exception has to happen
309        immediately after the SERVICE CALL one.  */
310     if (env->per_perc_atmid) {
311         env->int_pgm_code = PGM_PER;
312         env->int_pgm_ilen = env->int_svc_ilen;
313         do_program_interrupt(env);
314     }
315 }
316 
317 #define VIRTIO_SUBCODE_64 0x0D00
318 
319 static void do_ext_interrupt(CPUS390XState *env)
320 {
321     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
322     S390CPU *cpu = env_archcpu(env);
323     uint64_t mask, addr;
324     uint16_t cpu_addr;
325     LowCore *lowcore;
326 
327     if (!(env->psw.mask & PSW_MASK_EXT)) {
328         cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
329     }
330 
331     lowcore = cpu_map_lowcore(env);
332 
333     if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
334         (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
335         MachineState *ms = MACHINE(qdev_get_machine());
336         unsigned int max_cpus = ms->smp.max_cpus;
337 
338         lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
339         cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
340         g_assert(cpu_addr < S390_MAX_CPUS);
341         lowcore->cpu_addr = cpu_to_be16(cpu_addr);
342         clear_bit(cpu_addr, env->emergency_signals);
343         if (bitmap_empty(env->emergency_signals, max_cpus)) {
344             env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
345         }
346     } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
347                (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
348         lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
349         lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
350         env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
351     } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
352                (env->cregs[0] & CR0_CKC_SC)) {
353         lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
354         lowcore->cpu_addr = 0;
355         env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
356     } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
357                (env->cregs[0] & CR0_CPU_TIMER_SC)) {
358         lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
359         lowcore->cpu_addr = 0;
360         env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
361     } else if (qemu_s390_flic_has_service(flic) &&
362                (env->cregs[0] & CR0_SERVICE_SC)) {
363         uint32_t param;
364 
365         param = qemu_s390_flic_dequeue_service(flic);
366         lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
367         lowcore->ext_params = cpu_to_be32(param);
368         lowcore->cpu_addr = 0;
369     } else {
370         g_assert_not_reached();
371     }
372 
373     mask = be64_to_cpu(lowcore->external_new_psw.mask);
374     addr = be64_to_cpu(lowcore->external_new_psw.addr);
375     lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
376     lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
377 
378     cpu_unmap_lowcore(lowcore);
379 
380     s390_cpu_set_psw(env, mask, addr);
381 }
382 
383 static void do_io_interrupt(CPUS390XState *env)
384 {
385     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
386     uint64_t mask, addr;
387     QEMUS390FlicIO *io;
388     LowCore *lowcore;
389 
390     g_assert(env->psw.mask & PSW_MASK_IO);
391     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
392     g_assert(io);
393 
394     lowcore = cpu_map_lowcore(env);
395 
396     lowcore->subchannel_id = cpu_to_be16(io->id);
397     lowcore->subchannel_nr = cpu_to_be16(io->nr);
398     lowcore->io_int_parm = cpu_to_be32(io->parm);
399     lowcore->io_int_word = cpu_to_be32(io->word);
400     lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
401     lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
402     mask = be64_to_cpu(lowcore->io_new_psw.mask);
403     addr = be64_to_cpu(lowcore->io_new_psw.addr);
404 
405     cpu_unmap_lowcore(lowcore);
406     g_free(io);
407 
408     s390_cpu_set_psw(env, mask, addr);
409 }
410 
411 typedef struct MchkExtSaveArea {
412     uint64_t    vregs[32][2];                     /* 0x0000 */
413     uint8_t     pad_0x0200[0x0400 - 0x0200];      /* 0x0200 */
414 } MchkExtSaveArea;
415 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
416 
417 static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
418 {
419     hwaddr len = sizeof(MchkExtSaveArea);
420     MchkExtSaveArea *sa;
421     int i;
422 
423     sa = cpu_physical_memory_map(mcesao, &len, true);
424     if (!sa) {
425         return -EFAULT;
426     }
427     if (len != sizeof(MchkExtSaveArea)) {
428         cpu_physical_memory_unmap(sa, len, 1, 0);
429         return -EFAULT;
430     }
431 
432     for (i = 0; i < 32; i++) {
433         sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
434         sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
435     }
436 
437     cpu_physical_memory_unmap(sa, len, 1, len);
438     return 0;
439 }
440 
441 static void do_mchk_interrupt(CPUS390XState *env)
442 {
443     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
444     uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
445     uint64_t mask, addr, mcesao = 0;
446     LowCore *lowcore;
447     int i;
448 
449     /* for now we only support channel report machine checks (floating) */
450     g_assert(env->psw.mask & PSW_MASK_MCHECK);
451     g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
452 
453     qemu_s390_flic_dequeue_crw_mchk(flic);
454 
455     lowcore = cpu_map_lowcore(env);
456 
457     /* extended save area */
458     if (mcic & MCIC_VB_VR) {
459         /* length and alignment is 1024 bytes */
460         mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
461     }
462 
463     /* try to store vector registers */
464     if (!mcesao || mchk_store_vregs(env, mcesao)) {
465         mcic &= ~MCIC_VB_VR;
466     }
467 
468     /* we are always in z/Architecture mode */
469     lowcore->ar_access_id = 1;
470 
471     for (i = 0; i < 16; i++) {
472         lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
473         lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
474         lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
475         lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
476     }
477     lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
478     lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
479     lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
480     lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
481     lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
482 
483     lowcore->mcic = cpu_to_be64(mcic);
484     lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
485     lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
486     mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
487     addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
488 
489     cpu_unmap_lowcore(lowcore);
490 
491     s390_cpu_set_psw(env, mask, addr);
492 }
493 
494 void s390_cpu_do_interrupt(CPUState *cs)
495 {
496     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
497     S390CPU *cpu = S390_CPU(cs);
498     CPUS390XState *env = &cpu->env;
499     bool stopped = false;
500 
501     qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
502                   __func__, cs->exception_index, env->psw.mask, env->psw.addr);
503 
504 try_deliver:
505     /* handle machine checks */
506     if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
507         cs->exception_index = EXCP_MCHK;
508     }
509     /* handle external interrupts */
510     if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
511         cs->exception_index = EXCP_EXT;
512     }
513     /* handle I/O interrupts */
514     if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
515         cs->exception_index = EXCP_IO;
516     }
517     /* RESTART interrupt */
518     if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
519         cs->exception_index = EXCP_RESTART;
520     }
521     /* STOP interrupt has least priority */
522     if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
523         cs->exception_index = EXCP_STOP;
524     }
525 
526     switch (cs->exception_index) {
527     case EXCP_PGM:
528         do_program_interrupt(env);
529         break;
530     case EXCP_SVC:
531         do_svc_interrupt(env);
532         break;
533     case EXCP_EXT:
534         do_ext_interrupt(env);
535         break;
536     case EXCP_IO:
537         do_io_interrupt(env);
538         break;
539     case EXCP_MCHK:
540         do_mchk_interrupt(env);
541         break;
542     case EXCP_RESTART:
543         do_restart_interrupt(env);
544         break;
545     case EXCP_STOP:
546         do_stop_interrupt(env);
547         stopped = true;
548         break;
549     }
550 
551     if (cs->exception_index != -1 && !stopped) {
552         /* check if there are more pending interrupts to deliver */
553         cs->exception_index = -1;
554         goto try_deliver;
555     }
556     cs->exception_index = -1;
557 
558     /* we might still have pending interrupts, but not deliverable */
559     if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
560         cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
561     }
562 
563     /* WAIT PSW during interrupt injection or STOP interrupt */
564     if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
565         /* don't trigger a cpu_loop_exit(), use an interrupt instead */
566         cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
567     } else if (cs->halted) {
568         /* unhalt if we had a WAIT PSW somewhere in our injection chain */
569         s390_cpu_unhalt(cpu);
570     }
571 }
572 
573 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
574 {
575     if (interrupt_request & CPU_INTERRUPT_HARD) {
576         S390CPU *cpu = S390_CPU(cs);
577         CPUS390XState *env = &cpu->env;
578 
579         if (env->ex_value) {
580             /* Execution of the target insn is indivisible from
581                the parent EXECUTE insn.  */
582             return false;
583         }
584         if (s390_cpu_has_int(cpu)) {
585             s390_cpu_do_interrupt(cs);
586             return true;
587         }
588         if (env->psw.mask & PSW_MASK_WAIT) {
589             /* Woken up because of a floating interrupt but it has already
590              * been delivered. Go back to sleep. */
591             cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
592         }
593     }
594     return false;
595 }
596 
597 void s390x_cpu_debug_excp_handler(CPUState *cs)
598 {
599     CPUS390XState *env = cpu_env(cs);
600     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
601 
602     if (wp_hit && wp_hit->flags & BP_CPU) {
603         /* FIXME: When the storage-alteration-space control bit is set,
604            the exception should only be triggered if the memory access
605            is done using an address space with the storage-alteration-event
606            bit set.  We have no way to detect that with the current
607            watchpoint code.  */
608         cs->watchpoint_hit = NULL;
609 
610         env->per_address = env->psw.addr;
611         env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
612         /* FIXME: We currently no way to detect the address space used
613            to trigger the watchpoint.  For now just consider it is the
614            current default ASC. This turn to be true except when MVCP
615            and MVCS instrutions are not used.  */
616         env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
617 
618         /*
619          * Remove all watchpoints to re-execute the code.  A PER exception
620          * will be triggered, it will call s390_cpu_set_psw which will
621          * recompute the watchpoints.
622          */
623         cpu_watchpoint_remove_all(cs, BP_CPU);
624         cpu_loop_exit_noexc(cs);
625     }
626 }
627 
628 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
629                                    MMUAccessType access_type,
630                                    int mmu_idx, uintptr_t retaddr)
631 {
632     do_unaligned_access(cs, retaddr);
633 }
634 
635 static G_NORETURN
636 void monitor_event(CPUS390XState *env,
637                    uint64_t monitor_code,
638                    uint8_t monitor_class, uintptr_t ra)
639 {
640     /* Store the Monitor Code and the Monitor Class Number into the lowcore */
641     stq_phys(env_cpu(env)->as,
642              env->psa + offsetof(LowCore, monitor_code), monitor_code);
643     stw_phys(env_cpu(env)->as,
644              env->psa + offsetof(LowCore, mon_class_num), monitor_class);
645 
646     tcg_s390_program_interrupt(env, PGM_MONITOR, ra);
647 }
648 
649 void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
650                           uint32_t monitor_class)
651 {
652     g_assert(monitor_class <= 0xf);
653 
654     if (env->cregs[8] & (0x8000 >> monitor_class)) {
655         monitor_event(env, monitor_code, monitor_class, GETPC());
656     }
657 }
658 
659 #endif /* !CONFIG_USER_ONLY */
660