xref: /openbmc/qemu/target/s390x/tcg/misc_helper.c (revision da3c22c7)
1 /*
2  *  S/390 misc helper routines
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2009 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/cutils.h"
23 #include "cpu.h"
24 #include "s390x-internal.h"
25 #include "qemu/host-utils.h"
26 #include "exec/helper-proto.h"
27 #include "qemu/timer.h"
28 #include "exec/exec-all.h"
29 #include "exec/cpu_ldst.h"
30 #include "qapi/error.h"
31 #include "tcg_s390x.h"
32 #include "s390-tod.h"
33 
34 #if !defined(CONFIG_USER_ONLY)
35 #include "sysemu/cpus.h"
36 #include "sysemu/sysemu.h"
37 #include "hw/s390x/ebcdic.h"
38 #include "hw/s390x/s390-virtio-hcall.h"
39 #include "hw/s390x/sclp.h"
40 #include "hw/s390x/s390_flic.h"
41 #include "hw/s390x/ioinst.h"
42 #include "hw/s390x/s390-pci-inst.h"
43 #include "hw/boards.h"
44 #include "hw/s390x/tod.h"
45 #endif
46 
47 /* #define DEBUG_HELPER */
48 #ifdef DEBUG_HELPER
49 #define HELPER_LOG(x...) qemu_log(x)
50 #else
51 #define HELPER_LOG(x...)
52 #endif
53 
54 /* Raise an exception statically from a TB.  */
55 void HELPER(exception)(CPUS390XState *env, uint32_t excp)
56 {
57     CPUState *cs = env_cpu(env);
58 
59     HELPER_LOG("%s: exception %d\n", __func__, excp);
60     cs->exception_index = excp;
61     cpu_loop_exit(cs);
62 }
63 
64 /* Store CPU Timer (also used for EXTRACT CPU TIME) */
65 uint64_t HELPER(stpt)(CPUS390XState *env)
66 {
67 #if defined(CONFIG_USER_ONLY)
68     /*
69      * Fake a descending CPU timer. We could get negative values here,
70      * but we don't care as it is up to the OS when to process that
71      * interrupt and reset to > 0.
72      */
73     return UINT64_MAX - (uint64_t)cpu_get_host_ticks();
74 #else
75     return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
76 #endif
77 }
78 
79 /* Store Clock */
80 uint64_t HELPER(stck)(CPUS390XState *env)
81 {
82 #ifdef CONFIG_USER_ONLY
83     struct timespec ts;
84     uint64_t ns;
85 
86     clock_gettime(CLOCK_REALTIME, &ts);
87     ns = ts.tv_sec * NANOSECONDS_PER_SECOND + ts.tv_nsec;
88 
89     return TOD_UNIX_EPOCH + time2tod(ns);
90 #else
91     S390TODState *td = s390_get_todstate();
92     S390TODClass *tdc = S390_TOD_GET_CLASS(td);
93     S390TOD tod;
94 
95     tdc->get(td, &tod, &error_abort);
96     return tod.low;
97 #endif
98 }
99 
100 #ifndef CONFIG_USER_ONLY
101 /* SCLP service call */
102 uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
103 {
104     qemu_mutex_lock_iothread();
105     int r = sclp_service_call(env, r1, r2);
106     qemu_mutex_unlock_iothread();
107     if (r < 0) {
108         tcg_s390_program_interrupt(env, -r, GETPC());
109     }
110     return r;
111 }
112 
113 void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
114 {
115     uint64_t r;
116 
117     switch (num) {
118     case 0x500:
119         /* KVM hypercall */
120         qemu_mutex_lock_iothread();
121         r = s390_virtio_hypercall(env);
122         qemu_mutex_unlock_iothread();
123         break;
124     case 0x44:
125         /* yield */
126         r = 0;
127         break;
128     case 0x308:
129         /* ipl */
130         qemu_mutex_lock_iothread();
131         handle_diag_308(env, r1, r3, GETPC());
132         qemu_mutex_unlock_iothread();
133         r = 0;
134         break;
135     case 0x288:
136         /* time bomb (watchdog) */
137         r = handle_diag_288(env, r1, r3);
138         break;
139     default:
140         r = -1;
141         break;
142     }
143 
144     if (r) {
145         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
146     }
147 }
148 
149 /* Set Prefix */
150 void HELPER(spx)(CPUS390XState *env, uint64_t a1)
151 {
152     const uint32_t prefix = a1 & 0x7fffe000;
153     const uint32_t old_prefix = env->psa;
154     CPUState *cs = env_cpu(env);
155 
156     if (prefix == old_prefix) {
157         return;
158     }
159     /*
160      * Since prefix got aligned to 8k and memory increments are a multiple of
161      * 8k checking the first page is sufficient
162      */
163     if (!mmu_absolute_addr_valid(prefix, true)) {
164         tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC());
165     }
166 
167     env->psa = prefix;
168     HELPER_LOG("prefix: %#x\n", prefix);
169     tlb_flush_page(cs, 0);
170     tlb_flush_page(cs, TARGET_PAGE_SIZE);
171     if (prefix != 0) {
172         tlb_flush_page(cs, prefix);
173         tlb_flush_page(cs, prefix + TARGET_PAGE_SIZE);
174     }
175     if (old_prefix != 0) {
176         tlb_flush_page(cs, old_prefix);
177         tlb_flush_page(cs, old_prefix + TARGET_PAGE_SIZE);
178     }
179 }
180 
181 static void update_ckc_timer(CPUS390XState *env)
182 {
183     S390TODState *td = s390_get_todstate();
184     uint64_t time;
185 
186     /* stop the timer and remove pending CKC IRQs */
187     timer_del(env->tod_timer);
188     g_assert(qemu_mutex_iothread_locked());
189     env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
190 
191     /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */
192     if (env->ckc == -1ULL) {
193         return;
194     }
195 
196     /* difference between origins */
197     time = env->ckc - td->base.low;
198 
199     /* nanoseconds */
200     time = tod2time(time);
201 
202     timer_mod(env->tod_timer, time);
203 }
204 
205 /* Set Clock Comparator */
206 void HELPER(sckc)(CPUS390XState *env, uint64_t ckc)
207 {
208     env->ckc = ckc;
209 
210     qemu_mutex_lock_iothread();
211     update_ckc_timer(env);
212     qemu_mutex_unlock_iothread();
213 }
214 
215 void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque)
216 {
217     S390CPU *cpu = S390_CPU(cs);
218 
219     update_ckc_timer(&cpu->env);
220 }
221 
222 /* Set Clock */
223 uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low)
224 {
225     S390TODState *td = s390_get_todstate();
226     S390TODClass *tdc = S390_TOD_GET_CLASS(td);
227     S390TOD tod = {
228         .high = 0,
229         .low = tod_low,
230     };
231 
232     qemu_mutex_lock_iothread();
233     tdc->set(td, &tod, &error_abort);
234     qemu_mutex_unlock_iothread();
235     return 0;
236 }
237 
238 /* Set Tod Programmable Field */
239 void HELPER(sckpf)(CPUS390XState *env, uint64_t r0)
240 {
241     uint32_t val = r0;
242 
243     if (val & 0xffff0000) {
244         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
245     }
246     env->todpr = val;
247 }
248 
249 /* Store Clock Comparator */
250 uint64_t HELPER(stckc)(CPUS390XState *env)
251 {
252     return env->ckc;
253 }
254 
255 /* Set CPU Timer */
256 void HELPER(spt)(CPUS390XState *env, uint64_t time)
257 {
258     if (time == -1ULL) {
259         return;
260     }
261 
262     /* nanoseconds */
263     time = tod2time(time);
264 
265     env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time;
266 
267     timer_mod(env->cpu_timer, env->cputm);
268 }
269 
270 /* Store System Information */
271 uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1)
272 {
273     const uintptr_t ra = GETPC();
274     const uint32_t sel1 = r0 & STSI_R0_SEL1_MASK;
275     const uint32_t sel2 = r1 & STSI_R1_SEL2_MASK;
276     const MachineState *ms = MACHINE(qdev_get_machine());
277     uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0;
278     S390CPU *cpu = env_archcpu(env);
279     SysIB sysib = { };
280     int i, cc = 0;
281 
282     if ((r0 & STSI_R0_FC_MASK) > STSI_R0_FC_LEVEL_3) {
283         /* invalid function code: no other checks are performed */
284         return 3;
285     }
286 
287     if ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK)) {
288         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
289     }
290 
291     if ((r0 & STSI_R0_FC_MASK) == STSI_R0_FC_CURRENT) {
292         /* query the current level: no further checks are performed */
293         env->regs[0] = STSI_R0_FC_LEVEL_3;
294         return 0;
295     }
296 
297     if (a0 & ~TARGET_PAGE_MASK) {
298         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
299     }
300 
301     /* count the cpus and split them into configured and reserved ones */
302     for (i = 0; i < ms->possible_cpus->len; i++) {
303         total_cpus++;
304         if (ms->possible_cpus->cpus[i].cpu) {
305             conf_cpus++;
306         } else {
307             reserved_cpus++;
308         }
309     }
310 
311     /*
312      * In theory, we could report Level 1 / Level 2 as current. However,
313      * the Linux kernel will detect this as running under LPAR and assume
314      * that we have a sclp linemode console (which is always present on
315      * LPAR, but not the default for QEMU), therefore not displaying boot
316      * messages and making booting a Linux kernel under TCG harder.
317      *
318      * For now we fake the same SMP configuration on all levels.
319      *
320      * TODO: We could later make the level configurable via the machine
321      *       and change defaults (linemode console) based on machine type
322      *       and accelerator.
323      */
324     switch (r0 & STSI_R0_FC_MASK) {
325     case STSI_R0_FC_LEVEL_1:
326         if ((sel1 == 1) && (sel2 == 1)) {
327             /* Basic Machine Configuration */
328             char type[5] = {};
329 
330             ebcdic_put(sysib.sysib_111.manuf, "QEMU            ", 16);
331             /* same as machine type number in STORE CPU ID, but in EBCDIC */
332             snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type);
333             ebcdic_put(sysib.sysib_111.type, type, 4);
334             /* model number (not stored in STORE CPU ID for z/Architecture) */
335             ebcdic_put(sysib.sysib_111.model, "QEMU            ", 16);
336             ebcdic_put(sysib.sysib_111.sequence, "QEMU            ", 16);
337             ebcdic_put(sysib.sysib_111.plant, "QEMU", 4);
338         } else if ((sel1 == 2) && (sel2 == 1)) {
339             /* Basic Machine CPU */
340             ebcdic_put(sysib.sysib_121.sequence, "QEMUQEMUQEMUQEMU", 16);
341             ebcdic_put(sysib.sysib_121.plant, "QEMU", 4);
342             sysib.sysib_121.cpu_addr = cpu_to_be16(env->core_id);
343         } else if ((sel1 == 2) && (sel2 == 2)) {
344             /* Basic Machine CPUs */
345             sysib.sysib_122.capability = cpu_to_be32(0x443afc29);
346             sysib.sysib_122.total_cpus = cpu_to_be16(total_cpus);
347             sysib.sysib_122.conf_cpus = cpu_to_be16(conf_cpus);
348             sysib.sysib_122.reserved_cpus = cpu_to_be16(reserved_cpus);
349         } else {
350             cc = 3;
351         }
352         break;
353     case STSI_R0_FC_LEVEL_2:
354         if ((sel1 == 2) && (sel2 == 1)) {
355             /* LPAR CPU */
356             ebcdic_put(sysib.sysib_221.sequence, "QEMUQEMUQEMUQEMU", 16);
357             ebcdic_put(sysib.sysib_221.plant, "QEMU", 4);
358             sysib.sysib_221.cpu_addr = cpu_to_be16(env->core_id);
359         } else if ((sel1 == 2) && (sel2 == 2)) {
360             /* LPAR CPUs */
361             sysib.sysib_222.lcpuc = 0x80; /* dedicated */
362             sysib.sysib_222.total_cpus = cpu_to_be16(total_cpus);
363             sysib.sysib_222.conf_cpus = cpu_to_be16(conf_cpus);
364             sysib.sysib_222.reserved_cpus = cpu_to_be16(reserved_cpus);
365             ebcdic_put(sysib.sysib_222.name, "QEMU    ", 8);
366             sysib.sysib_222.caf = cpu_to_be32(1000);
367             sysib.sysib_222.dedicated_cpus = cpu_to_be16(conf_cpus);
368         } else {
369             cc = 3;
370         }
371         break;
372     case STSI_R0_FC_LEVEL_3:
373         if ((sel1 == 2) && (sel2 == 2)) {
374             /* VM CPUs */
375             sysib.sysib_322.count = 1;
376             sysib.sysib_322.vm[0].total_cpus = cpu_to_be16(total_cpus);
377             sysib.sysib_322.vm[0].conf_cpus = cpu_to_be16(conf_cpus);
378             sysib.sysib_322.vm[0].reserved_cpus = cpu_to_be16(reserved_cpus);
379             sysib.sysib_322.vm[0].caf = cpu_to_be32(1000);
380             /* Linux kernel uses this to distinguish us from z/VM */
381             ebcdic_put(sysib.sysib_322.vm[0].cpi, "KVM/Linux       ", 16);
382             sysib.sysib_322.vm[0].ext_name_encoding = 2; /* UTF-8 */
383 
384             /* If our VM has a name, use the real name */
385             if (qemu_name) {
386                 memset(sysib.sysib_322.vm[0].name, 0x40,
387                        sizeof(sysib.sysib_322.vm[0].name));
388                 ebcdic_put(sysib.sysib_322.vm[0].name, qemu_name,
389                            MIN(sizeof(sysib.sysib_322.vm[0].name),
390                                strlen(qemu_name)));
391                 strpadcpy((char *)sysib.sysib_322.ext_names[0],
392                           sizeof(sysib.sysib_322.ext_names[0]),
393                           qemu_name, '\0');
394 
395             } else {
396                 ebcdic_put(sysib.sysib_322.vm[0].name, "TCGguest", 8);
397                 strcpy((char *)sysib.sysib_322.ext_names[0], "TCGguest");
398             }
399 
400             /* add the uuid */
401             memcpy(sysib.sysib_322.vm[0].uuid, &qemu_uuid,
402                    sizeof(sysib.sysib_322.vm[0].uuid));
403         } else {
404             cc = 3;
405         }
406         break;
407     }
408 
409     if (cc == 0) {
410         if (s390_cpu_virt_mem_write(cpu, a0, 0, &sysib, sizeof(sysib))) {
411             s390_cpu_virt_mem_handle_exc(cpu, ra);
412         }
413     }
414 
415     return cc;
416 }
417 
418 uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
419                       uint32_t r3)
420 {
421     int cc;
422 
423     /* TODO: needed to inject interrupts  - push further down */
424     qemu_mutex_lock_iothread();
425     cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3);
426     qemu_mutex_unlock_iothread();
427 
428     return cc;
429 }
430 #endif
431 
432 #ifndef CONFIG_USER_ONLY
433 void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
434 {
435     S390CPU *cpu = env_archcpu(env);
436     qemu_mutex_lock_iothread();
437     ioinst_handle_xsch(cpu, r1, GETPC());
438     qemu_mutex_unlock_iothread();
439 }
440 
441 void HELPER(csch)(CPUS390XState *env, uint64_t r1)
442 {
443     S390CPU *cpu = env_archcpu(env);
444     qemu_mutex_lock_iothread();
445     ioinst_handle_csch(cpu, r1, GETPC());
446     qemu_mutex_unlock_iothread();
447 }
448 
449 void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
450 {
451     S390CPU *cpu = env_archcpu(env);
452     qemu_mutex_lock_iothread();
453     ioinst_handle_hsch(cpu, r1, GETPC());
454     qemu_mutex_unlock_iothread();
455 }
456 
457 void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
458 {
459     S390CPU *cpu = env_archcpu(env);
460     qemu_mutex_lock_iothread();
461     ioinst_handle_msch(cpu, r1, inst >> 16, GETPC());
462     qemu_mutex_unlock_iothread();
463 }
464 
465 void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
466 {
467     S390CPU *cpu = env_archcpu(env);
468     qemu_mutex_lock_iothread();
469     ioinst_handle_rchp(cpu, r1, GETPC());
470     qemu_mutex_unlock_iothread();
471 }
472 
473 void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
474 {
475     S390CPU *cpu = env_archcpu(env);
476     qemu_mutex_lock_iothread();
477     ioinst_handle_rsch(cpu, r1, GETPC());
478     qemu_mutex_unlock_iothread();
479 }
480 
481 void HELPER(sal)(CPUS390XState *env, uint64_t r1)
482 {
483     S390CPU *cpu = env_archcpu(env);
484 
485     qemu_mutex_lock_iothread();
486     ioinst_handle_sal(cpu, r1, GETPC());
487     qemu_mutex_unlock_iothread();
488 }
489 
490 void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst)
491 {
492     S390CPU *cpu = env_archcpu(env);
493 
494     qemu_mutex_lock_iothread();
495     ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC());
496     qemu_mutex_unlock_iothread();
497 }
498 
499 void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
500 {
501     S390CPU *cpu = env_archcpu(env);
502     qemu_mutex_lock_iothread();
503     ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC());
504     qemu_mutex_unlock_iothread();
505 }
506 
507 void HELPER(stcrw)(CPUS390XState *env, uint64_t inst)
508 {
509     S390CPU *cpu = env_archcpu(env);
510 
511     qemu_mutex_lock_iothread();
512     ioinst_handle_stcrw(cpu, inst >> 16, GETPC());
513     qemu_mutex_unlock_iothread();
514 }
515 
516 void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
517 {
518     S390CPU *cpu = env_archcpu(env);
519     qemu_mutex_lock_iothread();
520     ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC());
521     qemu_mutex_unlock_iothread();
522 }
523 
524 uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
525 {
526     const uintptr_t ra = GETPC();
527     S390CPU *cpu = env_archcpu(env);
528     QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic());
529     QEMUS390FlicIO *io = NULL;
530     LowCore *lowcore;
531 
532     if (addr & 0x3) {
533         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
534     }
535 
536     qemu_mutex_lock_iothread();
537     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
538     if (!io) {
539         qemu_mutex_unlock_iothread();
540         return 0;
541     }
542 
543     if (addr) {
544         struct {
545             uint16_t id;
546             uint16_t nr;
547             uint32_t parm;
548         } intc = {
549             .id = cpu_to_be16(io->id),
550             .nr = cpu_to_be16(io->nr),
551             .parm = cpu_to_be32(io->parm),
552         };
553 
554         if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) {
555             /* writing failed, reinject and properly clean up */
556             s390_io_interrupt(io->id, io->nr, io->parm, io->word);
557             qemu_mutex_unlock_iothread();
558             g_free(io);
559             s390_cpu_virt_mem_handle_exc(cpu, ra);
560             return 0;
561         }
562     } else {
563         /* no protection applies */
564         lowcore = cpu_map_lowcore(env);
565         lowcore->subchannel_id = cpu_to_be16(io->id);
566         lowcore->subchannel_nr = cpu_to_be16(io->nr);
567         lowcore->io_int_parm = cpu_to_be32(io->parm);
568         lowcore->io_int_word = cpu_to_be32(io->word);
569         cpu_unmap_lowcore(lowcore);
570     }
571 
572     g_free(io);
573     qemu_mutex_unlock_iothread();
574     return 1;
575 }
576 
577 void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
578 {
579     S390CPU *cpu = env_archcpu(env);
580     qemu_mutex_lock_iothread();
581     ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC());
582     qemu_mutex_unlock_iothread();
583 }
584 
585 void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
586 {
587     S390CPU *cpu = env_archcpu(env);
588     qemu_mutex_lock_iothread();
589     ioinst_handle_chsc(cpu, inst >> 16, GETPC());
590     qemu_mutex_unlock_iothread();
591 }
592 #endif
593 
594 #ifndef CONFIG_USER_ONLY
595 void HELPER(per_check_exception)(CPUS390XState *env)
596 {
597     if (env->per_perc_atmid) {
598         tcg_s390_program_interrupt(env, PGM_PER, GETPC());
599     }
600 }
601 
602 /* Check if an address is within the PER starting address and the PER
603    ending address.  The address range might loop.  */
604 static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
605 {
606     if (env->cregs[10] <= env->cregs[11]) {
607         return env->cregs[10] <= addr && addr <= env->cregs[11];
608     } else {
609         return env->cregs[10] <= addr || addr <= env->cregs[11];
610     }
611 }
612 
613 void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to)
614 {
615     if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) {
616         if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
617             || get_per_in_range(env, to)) {
618             env->per_address = from;
619             env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
620         }
621     }
622 }
623 
624 void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
625 {
626     if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) {
627         env->per_address = addr;
628         env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
629 
630         /* If the instruction has to be nullified, trigger the
631            exception immediately. */
632         if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) {
633             CPUState *cs = env_cpu(env);
634 
635             env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION;
636             env->int_pgm_code = PGM_PER;
637             env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr));
638 
639             cs->exception_index = EXCP_PGM;
640             cpu_loop_exit(cs);
641         }
642     }
643 }
644 
645 void HELPER(per_store_real)(CPUS390XState *env)
646 {
647     if ((env->cregs[9] & PER_CR9_EVENT_STORE) &&
648         (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
649         /* PSW is saved just before calling the helper.  */
650         env->per_address = env->psw.addr;
651         env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
652     }
653 }
654 #endif
655 
656 static uint8_t stfl_bytes[2048];
657 static unsigned int used_stfl_bytes;
658 
659 static void prepare_stfl(void)
660 {
661     static bool initialized;
662     int i;
663 
664     /* racy, but we don't care, the same values are always written */
665     if (initialized) {
666         return;
667     }
668 
669     s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
670     for (i = 0; i < sizeof(stfl_bytes); i++) {
671         if (stfl_bytes[i]) {
672             used_stfl_bytes = i + 1;
673         }
674     }
675     initialized = true;
676 }
677 
678 #ifndef CONFIG_USER_ONLY
679 void HELPER(stfl)(CPUS390XState *env)
680 {
681     LowCore *lowcore;
682 
683     lowcore = cpu_map_lowcore(env);
684     prepare_stfl();
685     memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list));
686     cpu_unmap_lowcore(lowcore);
687 }
688 #endif
689 
690 uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
691 {
692     const uintptr_t ra = GETPC();
693     const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8;
694     int max_bytes;
695     int i;
696 
697     if (addr & 0x7) {
698         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
699     }
700 
701     prepare_stfl();
702     max_bytes = ROUND_UP(used_stfl_bytes, 8);
703 
704     /*
705      * The PoP says that doublewords beyond the highest-numbered facility
706      * bit may or may not be stored.  However, existing hardware appears to
707      * not store the words, and existing software depend on that.
708      */
709     for (i = 0; i < MIN(count_bytes, max_bytes); ++i) {
710         cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra);
711     }
712 
713     env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1);
714     return count_bytes >= max_bytes ? 0 : 3;
715 }
716 
717 #ifndef CONFIG_USER_ONLY
718 /*
719  * Note: we ignore any return code of the functions called for the pci
720  * instructions, as the only time they return !0 is when the stub is
721  * called, and in that case we didn't even offer the zpci facility.
722  * The only exception is SIC, where program checks need to be handled
723  * by the caller.
724  */
725 void HELPER(clp)(CPUS390XState *env, uint32_t r2)
726 {
727     S390CPU *cpu = env_archcpu(env);
728 
729     qemu_mutex_lock_iothread();
730     clp_service_call(cpu, r2, GETPC());
731     qemu_mutex_unlock_iothread();
732 }
733 
734 void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
735 {
736     S390CPU *cpu = env_archcpu(env);
737 
738     qemu_mutex_lock_iothread();
739     pcilg_service_call(cpu, r1, r2, GETPC());
740     qemu_mutex_unlock_iothread();
741 }
742 
743 void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
744 {
745     S390CPU *cpu = env_archcpu(env);
746 
747     qemu_mutex_lock_iothread();
748     pcistg_service_call(cpu, r1, r2, GETPC());
749     qemu_mutex_unlock_iothread();
750 }
751 
752 void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
753                      uint32_t ar)
754 {
755     S390CPU *cpu = env_archcpu(env);
756 
757     qemu_mutex_lock_iothread();
758     stpcifc_service_call(cpu, r1, fiba, ar, GETPC());
759     qemu_mutex_unlock_iothread();
760 }
761 
762 void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3)
763 {
764     int r;
765 
766     qemu_mutex_lock_iothread();
767     r = css_do_sic(env, (r3 >> 27) & 0x7, r1 & 0xffff);
768     qemu_mutex_unlock_iothread();
769     /* css_do_sic() may actually return a PGM_xxx value to inject */
770     if (r) {
771         tcg_s390_program_interrupt(env, -r, GETPC());
772     }
773 }
774 
775 void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2)
776 {
777     S390CPU *cpu = env_archcpu(env);
778 
779     qemu_mutex_lock_iothread();
780     rpcit_service_call(cpu, r1, r2, GETPC());
781     qemu_mutex_unlock_iothread();
782 }
783 
784 void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3,
785                     uint64_t gaddr, uint32_t ar)
786 {
787     S390CPU *cpu = env_archcpu(env);
788 
789     qemu_mutex_lock_iothread();
790     pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC());
791     qemu_mutex_unlock_iothread();
792 }
793 
794 void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
795                     uint32_t ar)
796 {
797     S390CPU *cpu = env_archcpu(env);
798 
799     qemu_mutex_lock_iothread();
800     mpcifc_service_call(cpu, r1, fiba, ar, GETPC());
801     qemu_mutex_unlock_iothread();
802 }
803 #endif
804