xref: /openbmc/qemu/target/s390x/tcg/excp_helper.c (revision 1580b897)
1 /*
2  * s390x exception / interrupt helpers
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2011 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "s390x-internal.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #include "tcg_s390x.h"
31 #ifndef CONFIG_USER_ONLY
32 #include "hw/s390x/s390_flic.h"
33 #include "hw/boards.h"
34 #endif
35 
36 void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env,
37                                               uint32_t code, uintptr_t ra)
38 {
39     CPUState *cs = env_cpu(env);
40 
41     cpu_restore_state(cs, ra, true);
42     qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
43                   env->psw.addr);
44     trigger_pgm_exception(env, code);
45     cpu_loop_exit(cs);
46 }
47 
48 void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
49                                            uintptr_t ra)
50 {
51     g_assert(dxc <= 0xff);
52 #if !defined(CONFIG_USER_ONLY)
53     /* Store the DXC into the lowcore */
54     stl_phys(env_cpu(env)->as,
55              env->psa + offsetof(LowCore, data_exc_code), dxc);
56 #endif
57 
58     /* Store the DXC into the FPC if AFP is enabled */
59     if (env->cregs[0] & CR0_AFP) {
60         env->fpc = deposit32(env->fpc, 8, 8, dxc);
61     }
62     tcg_s390_program_interrupt(env, PGM_DATA, ra);
63 }
64 
65 void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
66                                              uintptr_t ra)
67 {
68     g_assert(vxc <= 0xff);
69 #if !defined(CONFIG_USER_ONLY)
70     /* Always store the VXC into the lowcore, without AFP it is undefined */
71     stl_phys(env_cpu(env)->as,
72              env->psa + offsetof(LowCore, data_exc_code), vxc);
73 #endif
74 
75     /* Always store the VXC into the FPC, without AFP it is undefined */
76     env->fpc = deposit32(env->fpc, 8, 8, vxc);
77     tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
78 }
79 
80 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
81 {
82     tcg_s390_data_exception(env, dxc, GETPC());
83 }
84 
85 #if defined(CONFIG_USER_ONLY)
86 
87 void s390_cpu_do_interrupt(CPUState *cs)
88 {
89     cs->exception_index = -1;
90 }
91 
92 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
93                        MMUAccessType access_type, int mmu_idx,
94                        bool probe, uintptr_t retaddr)
95 {
96     S390CPU *cpu = S390_CPU(cs);
97 
98     trigger_pgm_exception(&cpu->env, PGM_ADDRESSING);
99     /* On real machines this value is dropped into LowMem.  Since this
100        is userland, simply put this someplace that cpu_loop can find it.  */
101     cpu->env.__excp_addr = address;
102     cpu_loop_exit_restore(cs, retaddr);
103 }
104 
105 #else /* !CONFIG_USER_ONLY */
106 
107 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
108 {
109     switch (mmu_idx) {
110     case MMU_PRIMARY_IDX:
111         return PSW_ASC_PRIMARY;
112     case MMU_SECONDARY_IDX:
113         return PSW_ASC_SECONDARY;
114     case MMU_HOME_IDX:
115         return PSW_ASC_HOME;
116     default:
117         abort();
118     }
119 }
120 
121 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
122                        MMUAccessType access_type, int mmu_idx,
123                        bool probe, uintptr_t retaddr)
124 {
125     S390CPU *cpu = S390_CPU(cs);
126     CPUS390XState *env = &cpu->env;
127     target_ulong vaddr, raddr;
128     uint64_t asc, tec;
129     int prot, excp;
130 
131     qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
132                   __func__, address, access_type, mmu_idx);
133 
134     vaddr = address;
135 
136     if (mmu_idx < MMU_REAL_IDX) {
137         asc = cpu_mmu_idx_to_asc(mmu_idx);
138         /* 31-Bit mode */
139         if (!(env->psw.mask & PSW_MASK_64)) {
140             vaddr &= 0x7fffffff;
141         }
142         excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
143     } else if (mmu_idx == MMU_REAL_IDX) {
144         /* 31-Bit mode */
145         if (!(env->psw.mask & PSW_MASK_64)) {
146             vaddr &= 0x7fffffff;
147         }
148         excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
149     } else {
150         g_assert_not_reached();
151     }
152 
153     /* check out of RAM access */
154     if (!excp &&
155         !address_space_access_valid(&address_space_memory, raddr,
156                                     TARGET_PAGE_SIZE, access_type,
157                                     MEMTXATTRS_UNSPECIFIED)) {
158         MachineState *ms = MACHINE(qdev_get_machine());
159         qemu_log_mask(CPU_LOG_MMU,
160                       "%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n",
161                       __func__, (uint64_t)raddr, (uint64_t)ms->ram_size);
162         excp = PGM_ADDRESSING;
163         tec = 0; /* unused */
164     }
165 
166     env->tlb_fill_exc = excp;
167     env->tlb_fill_tec = tec;
168 
169     if (!excp) {
170         qemu_log_mask(CPU_LOG_MMU,
171                       "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
172                       __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
173         tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
174                      mmu_idx, TARGET_PAGE_SIZE);
175         return true;
176     }
177     if (probe) {
178         return false;
179     }
180 
181     if (excp != PGM_ADDRESSING) {
182         stq_phys(env_cpu(env)->as,
183                  env->psa + offsetof(LowCore, trans_exc_code), tec);
184     }
185 
186     /*
187      * For data accesses, ILEN will be filled in from the unwind info,
188      * within cpu_loop_exit_restore.  For code accesses, retaddr == 0,
189      * and so unwinding will not occur.  However, ILEN is also undefined
190      * for that case -- we choose to set ILEN = 2.
191      */
192     env->int_pgm_ilen = 2;
193     trigger_pgm_exception(env, excp);
194     cpu_loop_exit_restore(cs, retaddr);
195 }
196 
197 static void do_program_interrupt(CPUS390XState *env)
198 {
199     uint64_t mask, addr;
200     LowCore *lowcore;
201     int ilen = env->int_pgm_ilen;
202 
203     assert(ilen == 2 || ilen == 4 || ilen == 6);
204 
205     switch (env->int_pgm_code) {
206     case PGM_PER:
207         if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
208             break;
209         }
210         /* FALL THROUGH */
211     case PGM_OPERATION:
212     case PGM_PRIVILEGED:
213     case PGM_EXECUTE:
214     case PGM_PROTECTION:
215     case PGM_ADDRESSING:
216     case PGM_SPECIFICATION:
217     case PGM_DATA:
218     case PGM_FIXPT_OVERFLOW:
219     case PGM_FIXPT_DIVIDE:
220     case PGM_DEC_OVERFLOW:
221     case PGM_DEC_DIVIDE:
222     case PGM_HFP_EXP_OVERFLOW:
223     case PGM_HFP_EXP_UNDERFLOW:
224     case PGM_HFP_SIGNIFICANCE:
225     case PGM_HFP_DIVIDE:
226     case PGM_TRANS_SPEC:
227     case PGM_SPECIAL_OP:
228     case PGM_OPERAND:
229     case PGM_HFP_SQRT:
230     case PGM_PC_TRANS_SPEC:
231     case PGM_ALET_SPEC:
232     case PGM_MONITOR:
233         /* advance the PSW if our exception is not nullifying */
234         env->psw.addr += ilen;
235         break;
236     }
237 
238     qemu_log_mask(CPU_LOG_INT,
239                   "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
240                   __func__, env->int_pgm_code, ilen, env->psw.mask,
241                   env->psw.addr);
242 
243     lowcore = cpu_map_lowcore(env);
244 
245     /* Signal PER events with the exception.  */
246     if (env->per_perc_atmid) {
247         env->int_pgm_code |= PGM_PER;
248         lowcore->per_address = cpu_to_be64(env->per_address);
249         lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
250         env->per_perc_atmid = 0;
251     }
252 
253     lowcore->pgm_ilen = cpu_to_be16(ilen);
254     lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
255     lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
256     lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
257     mask = be64_to_cpu(lowcore->program_new_psw.mask);
258     addr = be64_to_cpu(lowcore->program_new_psw.addr);
259     lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
260 
261     cpu_unmap_lowcore(lowcore);
262 
263     s390_cpu_set_psw(env, mask, addr);
264 }
265 
266 static void do_svc_interrupt(CPUS390XState *env)
267 {
268     uint64_t mask, addr;
269     LowCore *lowcore;
270 
271     lowcore = cpu_map_lowcore(env);
272 
273     lowcore->svc_code = cpu_to_be16(env->int_svc_code);
274     lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
275     lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
276     lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
277     mask = be64_to_cpu(lowcore->svc_new_psw.mask);
278     addr = be64_to_cpu(lowcore->svc_new_psw.addr);
279 
280     cpu_unmap_lowcore(lowcore);
281 
282     s390_cpu_set_psw(env, mask, addr);
283 
284     /* When a PER event is pending, the PER exception has to happen
285        immediately after the SERVICE CALL one.  */
286     if (env->per_perc_atmid) {
287         env->int_pgm_code = PGM_PER;
288         env->int_pgm_ilen = env->int_svc_ilen;
289         do_program_interrupt(env);
290     }
291 }
292 
293 #define VIRTIO_SUBCODE_64 0x0D00
294 
295 static void do_ext_interrupt(CPUS390XState *env)
296 {
297     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
298     S390CPU *cpu = env_archcpu(env);
299     uint64_t mask, addr;
300     uint16_t cpu_addr;
301     LowCore *lowcore;
302 
303     if (!(env->psw.mask & PSW_MASK_EXT)) {
304         cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
305     }
306 
307     lowcore = cpu_map_lowcore(env);
308 
309     if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
310         (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
311         MachineState *ms = MACHINE(qdev_get_machine());
312         unsigned int max_cpus = ms->smp.max_cpus;
313 
314         lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
315         cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
316         g_assert(cpu_addr < S390_MAX_CPUS);
317         lowcore->cpu_addr = cpu_to_be16(cpu_addr);
318         clear_bit(cpu_addr, env->emergency_signals);
319         if (bitmap_empty(env->emergency_signals, max_cpus)) {
320             env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
321         }
322     } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
323                (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
324         lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
325         lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
326         env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
327     } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
328                (env->cregs[0] & CR0_CKC_SC)) {
329         lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
330         lowcore->cpu_addr = 0;
331         env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
332     } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
333                (env->cregs[0] & CR0_CPU_TIMER_SC)) {
334         lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
335         lowcore->cpu_addr = 0;
336         env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
337     } else if (qemu_s390_flic_has_service(flic) &&
338                (env->cregs[0] & CR0_SERVICE_SC)) {
339         uint32_t param;
340 
341         param = qemu_s390_flic_dequeue_service(flic);
342         lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
343         lowcore->ext_params = cpu_to_be32(param);
344         lowcore->cpu_addr = 0;
345     } else {
346         g_assert_not_reached();
347     }
348 
349     mask = be64_to_cpu(lowcore->external_new_psw.mask);
350     addr = be64_to_cpu(lowcore->external_new_psw.addr);
351     lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
352     lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
353 
354     cpu_unmap_lowcore(lowcore);
355 
356     s390_cpu_set_psw(env, mask, addr);
357 }
358 
359 static void do_io_interrupt(CPUS390XState *env)
360 {
361     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
362     uint64_t mask, addr;
363     QEMUS390FlicIO *io;
364     LowCore *lowcore;
365 
366     g_assert(env->psw.mask & PSW_MASK_IO);
367     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
368     g_assert(io);
369 
370     lowcore = cpu_map_lowcore(env);
371 
372     lowcore->subchannel_id = cpu_to_be16(io->id);
373     lowcore->subchannel_nr = cpu_to_be16(io->nr);
374     lowcore->io_int_parm = cpu_to_be32(io->parm);
375     lowcore->io_int_word = cpu_to_be32(io->word);
376     lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
377     lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
378     mask = be64_to_cpu(lowcore->io_new_psw.mask);
379     addr = be64_to_cpu(lowcore->io_new_psw.addr);
380 
381     cpu_unmap_lowcore(lowcore);
382     g_free(io);
383 
384     s390_cpu_set_psw(env, mask, addr);
385 }
386 
387 typedef struct MchkExtSaveArea {
388     uint64_t    vregs[32][2];                     /* 0x0000 */
389     uint8_t     pad_0x0200[0x0400 - 0x0200];      /* 0x0200 */
390 } MchkExtSaveArea;
391 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
392 
393 static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
394 {
395     hwaddr len = sizeof(MchkExtSaveArea);
396     MchkExtSaveArea *sa;
397     int i;
398 
399     sa = cpu_physical_memory_map(mcesao, &len, true);
400     if (!sa) {
401         return -EFAULT;
402     }
403     if (len != sizeof(MchkExtSaveArea)) {
404         cpu_physical_memory_unmap(sa, len, 1, 0);
405         return -EFAULT;
406     }
407 
408     for (i = 0; i < 32; i++) {
409         sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
410         sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
411     }
412 
413     cpu_physical_memory_unmap(sa, len, 1, len);
414     return 0;
415 }
416 
417 static void do_mchk_interrupt(CPUS390XState *env)
418 {
419     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
420     uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
421     uint64_t mask, addr, mcesao = 0;
422     LowCore *lowcore;
423     int i;
424 
425     /* for now we only support channel report machine checks (floating) */
426     g_assert(env->psw.mask & PSW_MASK_MCHECK);
427     g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
428 
429     qemu_s390_flic_dequeue_crw_mchk(flic);
430 
431     lowcore = cpu_map_lowcore(env);
432 
433     /* extended save area */
434     if (mcic & MCIC_VB_VR) {
435         /* length and alignment is 1024 bytes */
436         mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
437     }
438 
439     /* try to store vector registers */
440     if (!mcesao || mchk_store_vregs(env, mcesao)) {
441         mcic &= ~MCIC_VB_VR;
442     }
443 
444     /* we are always in z/Architecture mode */
445     lowcore->ar_access_id = 1;
446 
447     for (i = 0; i < 16; i++) {
448         lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
449         lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
450         lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
451         lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
452     }
453     lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
454     lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
455     lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
456     lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
457     lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
458 
459     lowcore->mcic = cpu_to_be64(mcic);
460     lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
461     lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
462     mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
463     addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
464 
465     cpu_unmap_lowcore(lowcore);
466 
467     s390_cpu_set_psw(env, mask, addr);
468 }
469 
470 void s390_cpu_do_interrupt(CPUState *cs)
471 {
472     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
473     S390CPU *cpu = S390_CPU(cs);
474     CPUS390XState *env = &cpu->env;
475     bool stopped = false;
476 
477     qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
478                   __func__, cs->exception_index, env->psw.mask, env->psw.addr);
479 
480 try_deliver:
481     /* handle machine checks */
482     if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
483         cs->exception_index = EXCP_MCHK;
484     }
485     /* handle external interrupts */
486     if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
487         cs->exception_index = EXCP_EXT;
488     }
489     /* handle I/O interrupts */
490     if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
491         cs->exception_index = EXCP_IO;
492     }
493     /* RESTART interrupt */
494     if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
495         cs->exception_index = EXCP_RESTART;
496     }
497     /* STOP interrupt has least priority */
498     if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
499         cs->exception_index = EXCP_STOP;
500     }
501 
502     switch (cs->exception_index) {
503     case EXCP_PGM:
504         do_program_interrupt(env);
505         break;
506     case EXCP_SVC:
507         do_svc_interrupt(env);
508         break;
509     case EXCP_EXT:
510         do_ext_interrupt(env);
511         break;
512     case EXCP_IO:
513         do_io_interrupt(env);
514         break;
515     case EXCP_MCHK:
516         do_mchk_interrupt(env);
517         break;
518     case EXCP_RESTART:
519         do_restart_interrupt(env);
520         break;
521     case EXCP_STOP:
522         do_stop_interrupt(env);
523         stopped = true;
524         break;
525     }
526 
527     if (cs->exception_index != -1 && !stopped) {
528         /* check if there are more pending interrupts to deliver */
529         cs->exception_index = -1;
530         goto try_deliver;
531     }
532     cs->exception_index = -1;
533 
534     /* we might still have pending interrupts, but not deliverable */
535     if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
536         cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
537     }
538 
539     /* WAIT PSW during interrupt injection or STOP interrupt */
540     if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
541         /* don't trigger a cpu_loop_exit(), use an interrupt instead */
542         cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
543     } else if (cs->halted) {
544         /* unhalt if we had a WAIT PSW somehwere in our injection chain */
545         s390_cpu_unhalt(cpu);
546     }
547 }
548 
549 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
550 {
551     if (interrupt_request & CPU_INTERRUPT_HARD) {
552         S390CPU *cpu = S390_CPU(cs);
553         CPUS390XState *env = &cpu->env;
554 
555         if (env->ex_value) {
556             /* Execution of the target insn is indivisible from
557                the parent EXECUTE insn.  */
558             return false;
559         }
560         if (s390_cpu_has_int(cpu)) {
561             s390_cpu_do_interrupt(cs);
562             return true;
563         }
564         if (env->psw.mask & PSW_MASK_WAIT) {
565             /* Woken up because of a floating interrupt but it has already
566              * been delivered. Go back to sleep. */
567             cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
568         }
569     }
570     return false;
571 }
572 
573 void s390x_cpu_debug_excp_handler(CPUState *cs)
574 {
575     S390CPU *cpu = S390_CPU(cs);
576     CPUS390XState *env = &cpu->env;
577     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
578 
579     if (wp_hit && wp_hit->flags & BP_CPU) {
580         /* FIXME: When the storage-alteration-space control bit is set,
581            the exception should only be triggered if the memory access
582            is done using an address space with the storage-alteration-event
583            bit set.  We have no way to detect that with the current
584            watchpoint code.  */
585         cs->watchpoint_hit = NULL;
586 
587         env->per_address = env->psw.addr;
588         env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
589         /* FIXME: We currently no way to detect the address space used
590            to trigger the watchpoint.  For now just consider it is the
591            current default ASC. This turn to be true except when MVCP
592            and MVCS instrutions are not used.  */
593         env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
594 
595         /*
596          * Remove all watchpoints to re-execute the code.  A PER exception
597          * will be triggered, it will call s390_cpu_set_psw which will
598          * recompute the watchpoints.
599          */
600         cpu_watchpoint_remove_all(cs, BP_CPU);
601         cpu_loop_exit_noexc(cs);
602     }
603 }
604 
605 /* Unaligned accesses are only diagnosed with MO_ALIGN.  At the moment,
606    this is only for the atomic operations, for which we want to raise a
607    specification exception.  */
608 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
609                                    MMUAccessType access_type,
610                                    int mmu_idx, uintptr_t retaddr)
611 {
612     S390CPU *cpu = S390_CPU(cs);
613     CPUS390XState *env = &cpu->env;
614 
615     tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
616 }
617 
618 static void QEMU_NORETURN monitor_event(CPUS390XState *env,
619                                         uint64_t monitor_code,
620                                         uint8_t monitor_class, uintptr_t ra)
621 {
622     /* Store the Monitor Code and the Monitor Class Number into the lowcore */
623     stq_phys(env_cpu(env)->as,
624              env->psa + offsetof(LowCore, monitor_code), monitor_code);
625     stw_phys(env_cpu(env)->as,
626              env->psa + offsetof(LowCore, mon_class_num), monitor_class);
627 
628     tcg_s390_program_interrupt(env, PGM_MONITOR, ra);
629 }
630 
631 void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
632                           uint32_t monitor_class)
633 {
634     g_assert(monitor_class <= 0xff);
635 
636     if (env->cregs[8] & (0x8000 >> monitor_class)) {
637         monitor_event(env, monitor_code, monitor_class, GETPC());
638     }
639 }
640 
641 #endif /* !CONFIG_USER_ONLY */
642