xref: /openbmc/qemu/target/s390x/helper.c (revision 4a09d0bb)
1 /*
2  *  S/390 helpers
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2011 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #include "exec/gdbstub.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #ifndef CONFIG_USER_ONLY
30 #include "sysemu/sysemu.h"
31 #endif
32 
33 //#define DEBUG_S390
34 //#define DEBUG_S390_STDOUT
35 
36 #ifdef DEBUG_S390
37 #ifdef DEBUG_S390_STDOUT
38 #define DPRINTF(fmt, ...) \
39     do { fprintf(stderr, fmt, ## __VA_ARGS__); \
40          if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
41 #else
42 #define DPRINTF(fmt, ...) \
43     do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
44 #endif
45 #else
46 #define DPRINTF(fmt, ...) \
47     do { } while (0)
48 #endif
49 
50 
51 #ifndef CONFIG_USER_ONLY
52 void s390x_tod_timer(void *opaque)
53 {
54     S390CPU *cpu = opaque;
55     CPUS390XState *env = &cpu->env;
56 
57     env->pending_int |= INTERRUPT_TOD;
58     cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
59 }
60 
61 void s390x_cpu_timer(void *opaque)
62 {
63     S390CPU *cpu = opaque;
64     CPUS390XState *env = &cpu->env;
65 
66     env->pending_int |= INTERRUPT_CPUTIMER;
67     cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
68 }
69 #endif
70 
71 S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp)
72 {
73     static bool features_parsed;
74     char *name, *features;
75     const char *typename;
76     ObjectClass *oc;
77     CPUClass *cc;
78 
79     name = g_strdup(cpu_model);
80     features = strchr(name, ',');
81     if (features) {
82         features[0] = 0;
83         features++;
84     }
85 
86     oc = cpu_class_by_name(TYPE_S390_CPU, name);
87     if (!oc) {
88         error_setg(errp, "Unknown CPU definition \'%s\'", name);
89         g_free(name);
90         return NULL;
91     }
92     typename = object_class_get_name(oc);
93 
94     if (!features_parsed) {
95         features_parsed = true;
96         cc = CPU_CLASS(oc);
97         cc->parse_features(typename, features, errp);
98     }
99     g_free(name);
100 
101     if (*errp) {
102         return NULL;
103     }
104     return S390_CPU(CPU(object_new(typename)));
105 }
106 
107 S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp)
108 {
109     S390CPU *cpu;
110     Error *err = NULL;
111 
112     cpu = cpu_s390x_create(cpu_model, &err);
113     if (err != NULL) {
114         goto out;
115     }
116 
117     object_property_set_int(OBJECT(cpu), id, "id", &err);
118     if (err != NULL) {
119         goto out;
120     }
121     object_property_set_bool(OBJECT(cpu), true, "realized", &err);
122 
123 out:
124     if (err) {
125         error_propagate(errp, err);
126         object_unref(OBJECT(cpu));
127         cpu = NULL;
128     }
129     return cpu;
130 }
131 
132 S390CPU *cpu_s390x_init(const char *cpu_model)
133 {
134     Error *err = NULL;
135     S390CPU *cpu;
136     /* Use to track CPU ID for linux-user only */
137     static int64_t next_cpu_id;
138 
139     cpu = s390x_new_cpu(cpu_model, next_cpu_id++, &err);
140     if (err) {
141         error_report_err(err);
142     }
143     return cpu;
144 }
145 
146 #if defined(CONFIG_USER_ONLY)
147 
148 void s390_cpu_do_interrupt(CPUState *cs)
149 {
150     cs->exception_index = -1;
151 }
152 
153 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
154                               int rw, int mmu_idx)
155 {
156     S390CPU *cpu = S390_CPU(cs);
157 
158     cs->exception_index = EXCP_PGM;
159     cpu->env.int_pgm_code = PGM_ADDRESSING;
160     /* On real machines this value is dropped into LowMem.  Since this
161        is userland, simply put this someplace that cpu_loop can find it.  */
162     cpu->env.__excp_addr = address;
163     return 1;
164 }
165 
166 #else /* !CONFIG_USER_ONLY */
167 
168 /* Ensure to exit the TB after this call! */
169 void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
170 {
171     CPUState *cs = CPU(s390_env_get_cpu(env));
172 
173     cs->exception_index = EXCP_PGM;
174     env->int_pgm_code = code;
175     env->int_pgm_ilen = ilen;
176 }
177 
178 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
179                               int rw, int mmu_idx)
180 {
181     S390CPU *cpu = S390_CPU(cs);
182     CPUS390XState *env = &cpu->env;
183     uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
184     target_ulong vaddr, raddr;
185     int prot;
186 
187     DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
188             __func__, orig_vaddr, rw, mmu_idx);
189 
190     orig_vaddr &= TARGET_PAGE_MASK;
191     vaddr = orig_vaddr;
192 
193     /* 31-Bit mode */
194     if (!(env->psw.mask & PSW_MASK_64)) {
195         vaddr &= 0x7fffffff;
196     }
197 
198     if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
199         /* Translation ended in exception */
200         return 1;
201     }
202 
203     /* check out of RAM access */
204     if (raddr > ram_size) {
205         DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
206                 (uint64_t)raddr, (uint64_t)ram_size);
207         trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
208         return 1;
209     }
210 
211     qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
212             __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
213 
214     tlb_set_page(cs, orig_vaddr, raddr, prot,
215                  mmu_idx, TARGET_PAGE_SIZE);
216 
217     return 0;
218 }
219 
220 hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
221 {
222     S390CPU *cpu = S390_CPU(cs);
223     CPUS390XState *env = &cpu->env;
224     target_ulong raddr;
225     int prot;
226     uint64_t asc = env->psw.mask & PSW_MASK_ASC;
227 
228     /* 31-Bit mode */
229     if (!(env->psw.mask & PSW_MASK_64)) {
230         vaddr &= 0x7fffffff;
231     }
232 
233     if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
234         return -1;
235     }
236     return raddr;
237 }
238 
239 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
240 {
241     hwaddr phys_addr;
242     target_ulong page;
243 
244     page = vaddr & TARGET_PAGE_MASK;
245     phys_addr = cpu_get_phys_page_debug(cs, page);
246     phys_addr += (vaddr & ~TARGET_PAGE_MASK);
247 
248     return phys_addr;
249 }
250 
251 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
252 {
253     uint64_t old_mask = env->psw.mask;
254 
255     env->psw.addr = addr;
256     env->psw.mask = mask;
257     if (tcg_enabled()) {
258         env->cc_op = (mask >> 44) & 3;
259     }
260 
261     if ((old_mask ^ mask) & PSW_MASK_PER) {
262         s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
263     }
264 
265     if (mask & PSW_MASK_WAIT) {
266         S390CPU *cpu = s390_env_get_cpu(env);
267         if (s390_cpu_halt(cpu) == 0) {
268 #ifndef CONFIG_USER_ONLY
269             qemu_system_shutdown_request();
270 #endif
271         }
272     }
273 }
274 
275 static uint64_t get_psw_mask(CPUS390XState *env)
276 {
277     uint64_t r = env->psw.mask;
278 
279     if (tcg_enabled()) {
280         env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
281                              env->cc_vr);
282 
283         r &= ~PSW_MASK_CC;
284         assert(!(env->cc_op & ~3));
285         r |= (uint64_t)env->cc_op << 44;
286     }
287 
288     return r;
289 }
290 
291 static LowCore *cpu_map_lowcore(CPUS390XState *env)
292 {
293     S390CPU *cpu = s390_env_get_cpu(env);
294     LowCore *lowcore;
295     hwaddr len = sizeof(LowCore);
296 
297     lowcore = cpu_physical_memory_map(env->psa, &len, 1);
298 
299     if (len < sizeof(LowCore)) {
300         cpu_abort(CPU(cpu), "Could not map lowcore\n");
301     }
302 
303     return lowcore;
304 }
305 
306 static void cpu_unmap_lowcore(LowCore *lowcore)
307 {
308     cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
309 }
310 
311 void do_restart_interrupt(CPUS390XState *env)
312 {
313     uint64_t mask, addr;
314     LowCore *lowcore;
315 
316     lowcore = cpu_map_lowcore(env);
317 
318     lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
319     lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
320     mask = be64_to_cpu(lowcore->restart_new_psw.mask);
321     addr = be64_to_cpu(lowcore->restart_new_psw.addr);
322 
323     cpu_unmap_lowcore(lowcore);
324 
325     load_psw(env, mask, addr);
326 }
327 
328 static void do_program_interrupt(CPUS390XState *env)
329 {
330     uint64_t mask, addr;
331     LowCore *lowcore;
332     int ilen = env->int_pgm_ilen;
333 
334     switch (ilen) {
335     case ILEN_LATER:
336         ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
337         break;
338     case ILEN_LATER_INC:
339         ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
340         env->psw.addr += ilen;
341         break;
342     default:
343         assert(ilen == 2 || ilen == 4 || ilen == 6);
344     }
345 
346     qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
347                   __func__, env->int_pgm_code, ilen);
348 
349     lowcore = cpu_map_lowcore(env);
350 
351     /* Signal PER events with the exception.  */
352     if (env->per_perc_atmid) {
353         env->int_pgm_code |= PGM_PER;
354         lowcore->per_address = cpu_to_be64(env->per_address);
355         lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
356         env->per_perc_atmid = 0;
357     }
358 
359     lowcore->pgm_ilen = cpu_to_be16(ilen);
360     lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
361     lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
362     lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
363     mask = be64_to_cpu(lowcore->program_new_psw.mask);
364     addr = be64_to_cpu(lowcore->program_new_psw.addr);
365     lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
366 
367     cpu_unmap_lowcore(lowcore);
368 
369     DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
370             env->int_pgm_code, ilen, env->psw.mask,
371             env->psw.addr);
372 
373     load_psw(env, mask, addr);
374 }
375 
376 static void do_svc_interrupt(CPUS390XState *env)
377 {
378     uint64_t mask, addr;
379     LowCore *lowcore;
380 
381     lowcore = cpu_map_lowcore(env);
382 
383     lowcore->svc_code = cpu_to_be16(env->int_svc_code);
384     lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
385     lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
386     lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
387     mask = be64_to_cpu(lowcore->svc_new_psw.mask);
388     addr = be64_to_cpu(lowcore->svc_new_psw.addr);
389 
390     cpu_unmap_lowcore(lowcore);
391 
392     load_psw(env, mask, addr);
393 
394     /* When a PER event is pending, the PER exception has to happen
395        immediately after the SERVICE CALL one.  */
396     if (env->per_perc_atmid) {
397         env->int_pgm_code = PGM_PER;
398         env->int_pgm_ilen = env->int_svc_ilen;
399         do_program_interrupt(env);
400     }
401 }
402 
403 #define VIRTIO_SUBCODE_64 0x0D00
404 
405 static void do_ext_interrupt(CPUS390XState *env)
406 {
407     S390CPU *cpu = s390_env_get_cpu(env);
408     uint64_t mask, addr;
409     LowCore *lowcore;
410     ExtQueue *q;
411 
412     if (!(env->psw.mask & PSW_MASK_EXT)) {
413         cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
414     }
415 
416     if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
417         cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
418     }
419 
420     q = &env->ext_queue[env->ext_index];
421     lowcore = cpu_map_lowcore(env);
422 
423     lowcore->ext_int_code = cpu_to_be16(q->code);
424     lowcore->ext_params = cpu_to_be32(q->param);
425     lowcore->ext_params2 = cpu_to_be64(q->param64);
426     lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
427     lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
428     lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
429     mask = be64_to_cpu(lowcore->external_new_psw.mask);
430     addr = be64_to_cpu(lowcore->external_new_psw.addr);
431 
432     cpu_unmap_lowcore(lowcore);
433 
434     env->ext_index--;
435     if (env->ext_index == -1) {
436         env->pending_int &= ~INTERRUPT_EXT;
437     }
438 
439     DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
440             env->psw.mask, env->psw.addr);
441 
442     load_psw(env, mask, addr);
443 }
444 
445 static void do_io_interrupt(CPUS390XState *env)
446 {
447     S390CPU *cpu = s390_env_get_cpu(env);
448     LowCore *lowcore;
449     IOIntQueue *q;
450     uint8_t isc;
451     int disable = 1;
452     int found = 0;
453 
454     if (!(env->psw.mask & PSW_MASK_IO)) {
455         cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
456     }
457 
458     for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
459         uint64_t isc_bits;
460 
461         if (env->io_index[isc] < 0) {
462             continue;
463         }
464         if (env->io_index[isc] >= MAX_IO_QUEUE) {
465             cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
466                       isc, env->io_index[isc]);
467         }
468 
469         q = &env->io_queue[env->io_index[isc]][isc];
470         isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
471         if (!(env->cregs[6] & isc_bits)) {
472             disable = 0;
473             continue;
474         }
475         if (!found) {
476             uint64_t mask, addr;
477 
478             found = 1;
479             lowcore = cpu_map_lowcore(env);
480 
481             lowcore->subchannel_id = cpu_to_be16(q->id);
482             lowcore->subchannel_nr = cpu_to_be16(q->nr);
483             lowcore->io_int_parm = cpu_to_be32(q->parm);
484             lowcore->io_int_word = cpu_to_be32(q->word);
485             lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
486             lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
487             mask = be64_to_cpu(lowcore->io_new_psw.mask);
488             addr = be64_to_cpu(lowcore->io_new_psw.addr);
489 
490             cpu_unmap_lowcore(lowcore);
491 
492             env->io_index[isc]--;
493 
494             DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
495                     env->psw.mask, env->psw.addr);
496             load_psw(env, mask, addr);
497         }
498         if (env->io_index[isc] >= 0) {
499             disable = 0;
500         }
501         continue;
502     }
503 
504     if (disable) {
505         env->pending_int &= ~INTERRUPT_IO;
506     }
507 
508 }
509 
510 static void do_mchk_interrupt(CPUS390XState *env)
511 {
512     S390CPU *cpu = s390_env_get_cpu(env);
513     uint64_t mask, addr;
514     LowCore *lowcore;
515     MchkQueue *q;
516     int i;
517 
518     if (!(env->psw.mask & PSW_MASK_MCHECK)) {
519         cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
520     }
521 
522     if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
523         cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
524     }
525 
526     q = &env->mchk_queue[env->mchk_index];
527 
528     if (q->type != 1) {
529         /* Don't know how to handle this... */
530         cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
531     }
532     if (!(env->cregs[14] & (1 << 28))) {
533         /* CRW machine checks disabled */
534         return;
535     }
536 
537     lowcore = cpu_map_lowcore(env);
538 
539     for (i = 0; i < 16; i++) {
540         lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
541         lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
542         lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
543         lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
544     }
545     lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
546     lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
547     lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
548     lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
549     lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
550     lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
551     lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
552 
553     lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
554     lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
555     lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
556     lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
557     mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
558     addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
559 
560     cpu_unmap_lowcore(lowcore);
561 
562     env->mchk_index--;
563     if (env->mchk_index == -1) {
564         env->pending_int &= ~INTERRUPT_MCHK;
565     }
566 
567     DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
568             env->psw.mask, env->psw.addr);
569 
570     load_psw(env, mask, addr);
571 }
572 
573 void s390_cpu_do_interrupt(CPUState *cs)
574 {
575     S390CPU *cpu = S390_CPU(cs);
576     CPUS390XState *env = &cpu->env;
577 
578     qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
579                   __func__, cs->exception_index, env->psw.addr);
580 
581     s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
582     /* handle machine checks */
583     if ((env->psw.mask & PSW_MASK_MCHECK) &&
584         (cs->exception_index == -1)) {
585         if (env->pending_int & INTERRUPT_MCHK) {
586             cs->exception_index = EXCP_MCHK;
587         }
588     }
589     /* handle external interrupts */
590     if ((env->psw.mask & PSW_MASK_EXT) &&
591         cs->exception_index == -1) {
592         if (env->pending_int & INTERRUPT_EXT) {
593             /* code is already in env */
594             cs->exception_index = EXCP_EXT;
595         } else if (env->pending_int & INTERRUPT_TOD) {
596             cpu_inject_ext(cpu, 0x1004, 0, 0);
597             cs->exception_index = EXCP_EXT;
598             env->pending_int &= ~INTERRUPT_EXT;
599             env->pending_int &= ~INTERRUPT_TOD;
600         } else if (env->pending_int & INTERRUPT_CPUTIMER) {
601             cpu_inject_ext(cpu, 0x1005, 0, 0);
602             cs->exception_index = EXCP_EXT;
603             env->pending_int &= ~INTERRUPT_EXT;
604             env->pending_int &= ~INTERRUPT_TOD;
605         }
606     }
607     /* handle I/O interrupts */
608     if ((env->psw.mask & PSW_MASK_IO) &&
609         (cs->exception_index == -1)) {
610         if (env->pending_int & INTERRUPT_IO) {
611             cs->exception_index = EXCP_IO;
612         }
613     }
614 
615     switch (cs->exception_index) {
616     case EXCP_PGM:
617         do_program_interrupt(env);
618         break;
619     case EXCP_SVC:
620         do_svc_interrupt(env);
621         break;
622     case EXCP_EXT:
623         do_ext_interrupt(env);
624         break;
625     case EXCP_IO:
626         do_io_interrupt(env);
627         break;
628     case EXCP_MCHK:
629         do_mchk_interrupt(env);
630         break;
631     }
632     cs->exception_index = -1;
633 
634     if (!env->pending_int) {
635         cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
636     }
637 }
638 
639 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
640 {
641     if (interrupt_request & CPU_INTERRUPT_HARD) {
642         S390CPU *cpu = S390_CPU(cs);
643         CPUS390XState *env = &cpu->env;
644 
645         if (env->psw.mask & PSW_MASK_EXT) {
646             s390_cpu_do_interrupt(cs);
647             return true;
648         }
649     }
650     return false;
651 }
652 
653 void s390_cpu_recompute_watchpoints(CPUState *cs)
654 {
655     const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
656     S390CPU *cpu = S390_CPU(cs);
657     CPUS390XState *env = &cpu->env;
658 
659     /* We are called when the watchpoints have changed. First
660        remove them all.  */
661     cpu_watchpoint_remove_all(cs, BP_CPU);
662 
663     /* Return if PER is not enabled */
664     if (!(env->psw.mask & PSW_MASK_PER)) {
665         return;
666     }
667 
668     /* Return if storage-alteration event is not enabled.  */
669     if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
670         return;
671     }
672 
673     if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
674         /* We can't create a watchoint spanning the whole memory range, so
675            split it in two parts.   */
676         cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
677         cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
678     } else if (env->cregs[10] > env->cregs[11]) {
679         /* The address range loops, create two watchpoints.  */
680         cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
681                               wp_flags, NULL);
682         cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
683 
684     } else {
685         /* Default case, create a single watchpoint.  */
686         cpu_watchpoint_insert(cs, env->cregs[10],
687                               env->cregs[11] - env->cregs[10] + 1,
688                               wp_flags, NULL);
689     }
690 }
691 
692 void s390x_cpu_debug_excp_handler(CPUState *cs)
693 {
694     S390CPU *cpu = S390_CPU(cs);
695     CPUS390XState *env = &cpu->env;
696     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
697 
698     if (wp_hit && wp_hit->flags & BP_CPU) {
699         /* FIXME: When the storage-alteration-space control bit is set,
700            the exception should only be triggered if the memory access
701            is done using an address space with the storage-alteration-event
702            bit set.  We have no way to detect that with the current
703            watchpoint code.  */
704         cs->watchpoint_hit = NULL;
705 
706         env->per_address = env->psw.addr;
707         env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
708         /* FIXME: We currently no way to detect the address space used
709            to trigger the watchpoint.  For now just consider it is the
710            current default ASC. This turn to be true except when MVCP
711            and MVCS instrutions are not used.  */
712         env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
713 
714         /* Remove all watchpoints to re-execute the code.  A PER exception
715            will be triggered, it will call load_psw which will recompute
716            the watchpoints.  */
717         cpu_watchpoint_remove_all(cs, BP_CPU);
718         cpu_loop_exit_noexc(cs);
719     }
720 }
721 #endif /* CONFIG_USER_ONLY */
722