xref: /openbmc/qemu/target/s390x/tcg/excp_helper.c (revision caac44a52aa71d5ff83607cad861d02ecbbfcdc0)
1 /*
2  * s390x exception / interrupt helpers
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2011 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "s390x-internal.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #include "tcg_s390x.h"
31 #ifndef CONFIG_USER_ONLY
32 #include "hw/s390x/s390_flic.h"
33 #include "hw/boards.h"
34 #endif
35 
36 void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env,
37                                               uint32_t code, uintptr_t ra)
38 {
39     CPUState *cs = env_cpu(env);
40 
41     cpu_restore_state(cs, ra, true);
42     qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
43                   env->psw.addr);
44     trigger_pgm_exception(env, code);
45     cpu_loop_exit(cs);
46 }
47 
48 void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
49                                            uintptr_t ra)
50 {
51     g_assert(dxc <= 0xff);
52 #if !defined(CONFIG_USER_ONLY)
53     /* Store the DXC into the lowcore */
54     stl_phys(env_cpu(env)->as,
55              env->psa + offsetof(LowCore, data_exc_code), dxc);
56 #endif
57 
58     /* Store the DXC into the FPC if AFP is enabled */
59     if (env->cregs[0] & CR0_AFP) {
60         env->fpc = deposit32(env->fpc, 8, 8, dxc);
61     }
62     tcg_s390_program_interrupt(env, PGM_DATA, ra);
63 }
64 
65 void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
66                                              uintptr_t ra)
67 {
68     g_assert(vxc <= 0xff);
69 #if !defined(CONFIG_USER_ONLY)
70     /* Always store the VXC into the lowcore, without AFP it is undefined */
71     stl_phys(env_cpu(env)->as,
72              env->psa + offsetof(LowCore, data_exc_code), vxc);
73 #endif
74 
75     /* Always store the VXC into the FPC, without AFP it is undefined */
76     env->fpc = deposit32(env->fpc, 8, 8, vxc);
77     tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
78 }
79 
80 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
81 {
82     tcg_s390_data_exception(env, dxc, GETPC());
83 }
84 
85 #if defined(CONFIG_USER_ONLY)
86 
87 void s390_cpu_do_interrupt(CPUState *cs)
88 {
89     cs->exception_index = -1;
90 }
91 
92 void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
93                              MMUAccessType access_type,
94                              bool maperr, uintptr_t retaddr)
95 {
96     S390CPU *cpu = S390_CPU(cs);
97 
98     trigger_pgm_exception(&cpu->env, maperr ? PGM_ADDRESSING : PGM_PROTECTION);
99     /*
100      * On real machines this value is dropped into LowMem. Since this
101      * is userland, simply put this someplace that cpu_loop can find it.
102      * S390 only gives the page of the fault, not the exact address.
103      * C.f. the construction of TEC in mmu_translate().
104      */
105     cpu->env.__excp_addr = address & TARGET_PAGE_MASK;
106     cpu_loop_exit_restore(cs, retaddr);
107 }
108 
109 #else /* !CONFIG_USER_ONLY */
110 
111 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
112 {
113     switch (mmu_idx) {
114     case MMU_PRIMARY_IDX:
115         return PSW_ASC_PRIMARY;
116     case MMU_SECONDARY_IDX:
117         return PSW_ASC_SECONDARY;
118     case MMU_HOME_IDX:
119         return PSW_ASC_HOME;
120     default:
121         abort();
122     }
123 }
124 
125 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
126                        MMUAccessType access_type, int mmu_idx,
127                        bool probe, uintptr_t retaddr)
128 {
129     S390CPU *cpu = S390_CPU(cs);
130     CPUS390XState *env = &cpu->env;
131     target_ulong vaddr, raddr;
132     uint64_t asc, tec;
133     int prot, excp;
134 
135     qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
136                   __func__, address, access_type, mmu_idx);
137 
138     vaddr = address;
139 
140     if (mmu_idx < MMU_REAL_IDX) {
141         asc = cpu_mmu_idx_to_asc(mmu_idx);
142         /* 31-Bit mode */
143         if (!(env->psw.mask & PSW_MASK_64)) {
144             vaddr &= 0x7fffffff;
145         }
146         excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
147     } else if (mmu_idx == MMU_REAL_IDX) {
148         /* 31-Bit mode */
149         if (!(env->psw.mask & PSW_MASK_64)) {
150             vaddr &= 0x7fffffff;
151         }
152         excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
153     } else {
154         g_assert_not_reached();
155     }
156 
157     env->tlb_fill_exc = excp;
158     env->tlb_fill_tec = tec;
159 
160     if (!excp) {
161         qemu_log_mask(CPU_LOG_MMU,
162                       "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
163                       __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
164         tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
165                      mmu_idx, TARGET_PAGE_SIZE);
166         return true;
167     }
168     if (probe) {
169         return false;
170     }
171 
172     if (excp != PGM_ADDRESSING) {
173         stq_phys(env_cpu(env)->as,
174                  env->psa + offsetof(LowCore, trans_exc_code), tec);
175     }
176 
177     /*
178      * For data accesses, ILEN will be filled in from the unwind info,
179      * within cpu_loop_exit_restore.  For code accesses, retaddr == 0,
180      * and so unwinding will not occur.  However, ILEN is also undefined
181      * for that case -- we choose to set ILEN = 2.
182      */
183     env->int_pgm_ilen = 2;
184     trigger_pgm_exception(env, excp);
185     cpu_loop_exit_restore(cs, retaddr);
186 }
187 
188 static void do_program_interrupt(CPUS390XState *env)
189 {
190     uint64_t mask, addr;
191     LowCore *lowcore;
192     int ilen = env->int_pgm_ilen;
193 
194     assert(ilen == 2 || ilen == 4 || ilen == 6);
195 
196     switch (env->int_pgm_code) {
197     case PGM_PER:
198         if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
199             break;
200         }
201         /* FALL THROUGH */
202     case PGM_OPERATION:
203     case PGM_PRIVILEGED:
204     case PGM_EXECUTE:
205     case PGM_PROTECTION:
206     case PGM_ADDRESSING:
207     case PGM_SPECIFICATION:
208     case PGM_DATA:
209     case PGM_FIXPT_OVERFLOW:
210     case PGM_FIXPT_DIVIDE:
211     case PGM_DEC_OVERFLOW:
212     case PGM_DEC_DIVIDE:
213     case PGM_HFP_EXP_OVERFLOW:
214     case PGM_HFP_EXP_UNDERFLOW:
215     case PGM_HFP_SIGNIFICANCE:
216     case PGM_HFP_DIVIDE:
217     case PGM_TRANS_SPEC:
218     case PGM_SPECIAL_OP:
219     case PGM_OPERAND:
220     case PGM_HFP_SQRT:
221     case PGM_PC_TRANS_SPEC:
222     case PGM_ALET_SPEC:
223     case PGM_MONITOR:
224         /* advance the PSW if our exception is not nullifying */
225         env->psw.addr += ilen;
226         break;
227     }
228 
229     qemu_log_mask(CPU_LOG_INT,
230                   "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
231                   __func__, env->int_pgm_code, ilen, env->psw.mask,
232                   env->psw.addr);
233 
234     lowcore = cpu_map_lowcore(env);
235 
236     /* Signal PER events with the exception.  */
237     if (env->per_perc_atmid) {
238         env->int_pgm_code |= PGM_PER;
239         lowcore->per_address = cpu_to_be64(env->per_address);
240         lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
241         env->per_perc_atmid = 0;
242     }
243 
244     lowcore->pgm_ilen = cpu_to_be16(ilen);
245     lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
246     lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
247     lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
248     mask = be64_to_cpu(lowcore->program_new_psw.mask);
249     addr = be64_to_cpu(lowcore->program_new_psw.addr);
250     lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
251 
252     cpu_unmap_lowcore(lowcore);
253 
254     s390_cpu_set_psw(env, mask, addr);
255 }
256 
257 static void do_svc_interrupt(CPUS390XState *env)
258 {
259     uint64_t mask, addr;
260     LowCore *lowcore;
261 
262     lowcore = cpu_map_lowcore(env);
263 
264     lowcore->svc_code = cpu_to_be16(env->int_svc_code);
265     lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
266     lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
267     lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
268     mask = be64_to_cpu(lowcore->svc_new_psw.mask);
269     addr = be64_to_cpu(lowcore->svc_new_psw.addr);
270 
271     cpu_unmap_lowcore(lowcore);
272 
273     s390_cpu_set_psw(env, mask, addr);
274 
275     /* When a PER event is pending, the PER exception has to happen
276        immediately after the SERVICE CALL one.  */
277     if (env->per_perc_atmid) {
278         env->int_pgm_code = PGM_PER;
279         env->int_pgm_ilen = env->int_svc_ilen;
280         do_program_interrupt(env);
281     }
282 }
283 
284 #define VIRTIO_SUBCODE_64 0x0D00
285 
286 static void do_ext_interrupt(CPUS390XState *env)
287 {
288     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
289     S390CPU *cpu = env_archcpu(env);
290     uint64_t mask, addr;
291     uint16_t cpu_addr;
292     LowCore *lowcore;
293 
294     if (!(env->psw.mask & PSW_MASK_EXT)) {
295         cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
296     }
297 
298     lowcore = cpu_map_lowcore(env);
299 
300     if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
301         (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
302         MachineState *ms = MACHINE(qdev_get_machine());
303         unsigned int max_cpus = ms->smp.max_cpus;
304 
305         lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
306         cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
307         g_assert(cpu_addr < S390_MAX_CPUS);
308         lowcore->cpu_addr = cpu_to_be16(cpu_addr);
309         clear_bit(cpu_addr, env->emergency_signals);
310         if (bitmap_empty(env->emergency_signals, max_cpus)) {
311             env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
312         }
313     } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
314                (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
315         lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
316         lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
317         env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
318     } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
319                (env->cregs[0] & CR0_CKC_SC)) {
320         lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
321         lowcore->cpu_addr = 0;
322         env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
323     } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
324                (env->cregs[0] & CR0_CPU_TIMER_SC)) {
325         lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
326         lowcore->cpu_addr = 0;
327         env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
328     } else if (qemu_s390_flic_has_service(flic) &&
329                (env->cregs[0] & CR0_SERVICE_SC)) {
330         uint32_t param;
331 
332         param = qemu_s390_flic_dequeue_service(flic);
333         lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
334         lowcore->ext_params = cpu_to_be32(param);
335         lowcore->cpu_addr = 0;
336     } else {
337         g_assert_not_reached();
338     }
339 
340     mask = be64_to_cpu(lowcore->external_new_psw.mask);
341     addr = be64_to_cpu(lowcore->external_new_psw.addr);
342     lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
343     lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
344 
345     cpu_unmap_lowcore(lowcore);
346 
347     s390_cpu_set_psw(env, mask, addr);
348 }
349 
350 static void do_io_interrupt(CPUS390XState *env)
351 {
352     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
353     uint64_t mask, addr;
354     QEMUS390FlicIO *io;
355     LowCore *lowcore;
356 
357     g_assert(env->psw.mask & PSW_MASK_IO);
358     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
359     g_assert(io);
360 
361     lowcore = cpu_map_lowcore(env);
362 
363     lowcore->subchannel_id = cpu_to_be16(io->id);
364     lowcore->subchannel_nr = cpu_to_be16(io->nr);
365     lowcore->io_int_parm = cpu_to_be32(io->parm);
366     lowcore->io_int_word = cpu_to_be32(io->word);
367     lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
368     lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
369     mask = be64_to_cpu(lowcore->io_new_psw.mask);
370     addr = be64_to_cpu(lowcore->io_new_psw.addr);
371 
372     cpu_unmap_lowcore(lowcore);
373     g_free(io);
374 
375     s390_cpu_set_psw(env, mask, addr);
376 }
377 
378 typedef struct MchkExtSaveArea {
379     uint64_t    vregs[32][2];                     /* 0x0000 */
380     uint8_t     pad_0x0200[0x0400 - 0x0200];      /* 0x0200 */
381 } MchkExtSaveArea;
382 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
383 
384 static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
385 {
386     hwaddr len = sizeof(MchkExtSaveArea);
387     MchkExtSaveArea *sa;
388     int i;
389 
390     sa = cpu_physical_memory_map(mcesao, &len, true);
391     if (!sa) {
392         return -EFAULT;
393     }
394     if (len != sizeof(MchkExtSaveArea)) {
395         cpu_physical_memory_unmap(sa, len, 1, 0);
396         return -EFAULT;
397     }
398 
399     for (i = 0; i < 32; i++) {
400         sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
401         sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
402     }
403 
404     cpu_physical_memory_unmap(sa, len, 1, len);
405     return 0;
406 }
407 
408 static void do_mchk_interrupt(CPUS390XState *env)
409 {
410     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
411     uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
412     uint64_t mask, addr, mcesao = 0;
413     LowCore *lowcore;
414     int i;
415 
416     /* for now we only support channel report machine checks (floating) */
417     g_assert(env->psw.mask & PSW_MASK_MCHECK);
418     g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
419 
420     qemu_s390_flic_dequeue_crw_mchk(flic);
421 
422     lowcore = cpu_map_lowcore(env);
423 
424     /* extended save area */
425     if (mcic & MCIC_VB_VR) {
426         /* length and alignment is 1024 bytes */
427         mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
428     }
429 
430     /* try to store vector registers */
431     if (!mcesao || mchk_store_vregs(env, mcesao)) {
432         mcic &= ~MCIC_VB_VR;
433     }
434 
435     /* we are always in z/Architecture mode */
436     lowcore->ar_access_id = 1;
437 
438     for (i = 0; i < 16; i++) {
439         lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
440         lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
441         lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
442         lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
443     }
444     lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
445     lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
446     lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
447     lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
448     lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
449 
450     lowcore->mcic = cpu_to_be64(mcic);
451     lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
452     lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
453     mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
454     addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
455 
456     cpu_unmap_lowcore(lowcore);
457 
458     s390_cpu_set_psw(env, mask, addr);
459 }
460 
461 void s390_cpu_do_interrupt(CPUState *cs)
462 {
463     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
464     S390CPU *cpu = S390_CPU(cs);
465     CPUS390XState *env = &cpu->env;
466     bool stopped = false;
467 
468     qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
469                   __func__, cs->exception_index, env->psw.mask, env->psw.addr);
470 
471 try_deliver:
472     /* handle machine checks */
473     if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
474         cs->exception_index = EXCP_MCHK;
475     }
476     /* handle external interrupts */
477     if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
478         cs->exception_index = EXCP_EXT;
479     }
480     /* handle I/O interrupts */
481     if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
482         cs->exception_index = EXCP_IO;
483     }
484     /* RESTART interrupt */
485     if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
486         cs->exception_index = EXCP_RESTART;
487     }
488     /* STOP interrupt has least priority */
489     if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
490         cs->exception_index = EXCP_STOP;
491     }
492 
493     switch (cs->exception_index) {
494     case EXCP_PGM:
495         do_program_interrupt(env);
496         break;
497     case EXCP_SVC:
498         do_svc_interrupt(env);
499         break;
500     case EXCP_EXT:
501         do_ext_interrupt(env);
502         break;
503     case EXCP_IO:
504         do_io_interrupt(env);
505         break;
506     case EXCP_MCHK:
507         do_mchk_interrupt(env);
508         break;
509     case EXCP_RESTART:
510         do_restart_interrupt(env);
511         break;
512     case EXCP_STOP:
513         do_stop_interrupt(env);
514         stopped = true;
515         break;
516     }
517 
518     if (cs->exception_index != -1 && !stopped) {
519         /* check if there are more pending interrupts to deliver */
520         cs->exception_index = -1;
521         goto try_deliver;
522     }
523     cs->exception_index = -1;
524 
525     /* we might still have pending interrupts, but not deliverable */
526     if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
527         cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
528     }
529 
530     /* WAIT PSW during interrupt injection or STOP interrupt */
531     if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
532         /* don't trigger a cpu_loop_exit(), use an interrupt instead */
533         cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
534     } else if (cs->halted) {
535         /* unhalt if we had a WAIT PSW somehwere in our injection chain */
536         s390_cpu_unhalt(cpu);
537     }
538 }
539 
540 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
541 {
542     if (interrupt_request & CPU_INTERRUPT_HARD) {
543         S390CPU *cpu = S390_CPU(cs);
544         CPUS390XState *env = &cpu->env;
545 
546         if (env->ex_value) {
547             /* Execution of the target insn is indivisible from
548                the parent EXECUTE insn.  */
549             return false;
550         }
551         if (s390_cpu_has_int(cpu)) {
552             s390_cpu_do_interrupt(cs);
553             return true;
554         }
555         if (env->psw.mask & PSW_MASK_WAIT) {
556             /* Woken up because of a floating interrupt but it has already
557              * been delivered. Go back to sleep. */
558             cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
559         }
560     }
561     return false;
562 }
563 
564 void s390x_cpu_debug_excp_handler(CPUState *cs)
565 {
566     S390CPU *cpu = S390_CPU(cs);
567     CPUS390XState *env = &cpu->env;
568     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
569 
570     if (wp_hit && wp_hit->flags & BP_CPU) {
571         /* FIXME: When the storage-alteration-space control bit is set,
572            the exception should only be triggered if the memory access
573            is done using an address space with the storage-alteration-event
574            bit set.  We have no way to detect that with the current
575            watchpoint code.  */
576         cs->watchpoint_hit = NULL;
577 
578         env->per_address = env->psw.addr;
579         env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
580         /* FIXME: We currently no way to detect the address space used
581            to trigger the watchpoint.  For now just consider it is the
582            current default ASC. This turn to be true except when MVCP
583            and MVCS instrutions are not used.  */
584         env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
585 
586         /*
587          * Remove all watchpoints to re-execute the code.  A PER exception
588          * will be triggered, it will call s390_cpu_set_psw which will
589          * recompute the watchpoints.
590          */
591         cpu_watchpoint_remove_all(cs, BP_CPU);
592         cpu_loop_exit_noexc(cs);
593     }
594 }
595 
596 /* Unaligned accesses are only diagnosed with MO_ALIGN.  At the moment,
597    this is only for the atomic operations, for which we want to raise a
598    specification exception.  */
599 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
600                                    MMUAccessType access_type,
601                                    int mmu_idx, uintptr_t retaddr)
602 {
603     S390CPU *cpu = S390_CPU(cs);
604     CPUS390XState *env = &cpu->env;
605 
606     tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
607 }
608 
609 static void QEMU_NORETURN monitor_event(CPUS390XState *env,
610                                         uint64_t monitor_code,
611                                         uint8_t monitor_class, uintptr_t ra)
612 {
613     /* Store the Monitor Code and the Monitor Class Number into the lowcore */
614     stq_phys(env_cpu(env)->as,
615              env->psa + offsetof(LowCore, monitor_code), monitor_code);
616     stw_phys(env_cpu(env)->as,
617              env->psa + offsetof(LowCore, mon_class_num), monitor_class);
618 
619     tcg_s390_program_interrupt(env, PGM_MONITOR, ra);
620 }
621 
622 void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
623                           uint32_t monitor_class)
624 {
625     g_assert(monitor_class <= 0xff);
626 
627     if (env->cregs[8] & (0x8000 >> monitor_class)) {
628         monitor_event(env, monitor_code, monitor_class, GETPC());
629     }
630 }
631 
632 #endif /* !CONFIG_USER_ONLY */
633