xref: /openbmc/qemu/target/s390x/tcg/misc_helper.c (revision 2a8af382)
1 /*
2  *  S/390 misc helper routines
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2009 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/cutils.h"
23 #include "qemu/main-loop.h"
24 #include "cpu.h"
25 #include "s390x-internal.h"
26 #include "qemu/host-utils.h"
27 #include "exec/helper-proto.h"
28 #include "qemu/timer.h"
29 #include "exec/exec-all.h"
30 #include "exec/cpu_ldst.h"
31 #include "qapi/error.h"
32 #include "tcg_s390x.h"
33 #include "s390-tod.h"
34 
35 #if !defined(CONFIG_USER_ONLY)
36 #include "sysemu/cpus.h"
37 #include "sysemu/sysemu.h"
38 #include "hw/s390x/ebcdic.h"
39 #include "hw/s390x/s390-virtio-hcall.h"
40 #include "hw/s390x/sclp.h"
41 #include "hw/s390x/s390_flic.h"
42 #include "hw/s390x/ioinst.h"
43 #include "hw/s390x/s390-pci-inst.h"
44 #include "hw/boards.h"
45 #include "hw/s390x/tod.h"
46 #endif
47 
48 /* #define DEBUG_HELPER */
49 #ifdef DEBUG_HELPER
50 #define HELPER_LOG(x...) qemu_log(x)
51 #else
52 #define HELPER_LOG(x...)
53 #endif
54 
55 /* Raise an exception statically from a TB.  */
56 void HELPER(exception)(CPUS390XState *env, uint32_t excp)
57 {
58     CPUState *cs = env_cpu(env);
59 
60     HELPER_LOG("%s: exception %d\n", __func__, excp);
61     cs->exception_index = excp;
62     cpu_loop_exit(cs);
63 }
64 
65 /* Store CPU Timer (also used for EXTRACT CPU TIME) */
66 uint64_t HELPER(stpt)(CPUS390XState *env)
67 {
68 #if defined(CONFIG_USER_ONLY)
69     /*
70      * Fake a descending CPU timer. We could get negative values here,
71      * but we don't care as it is up to the OS when to process that
72      * interrupt and reset to > 0.
73      */
74     return UINT64_MAX - (uint64_t)cpu_get_host_ticks();
75 #else
76     return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
77 #endif
78 }
79 
80 /* Store Clock */
81 uint64_t HELPER(stck)(CPUS390XState *env)
82 {
83 #ifdef CONFIG_USER_ONLY
84     struct timespec ts;
85     uint64_t ns;
86 
87     clock_gettime(CLOCK_REALTIME, &ts);
88     ns = ts.tv_sec * NANOSECONDS_PER_SECOND + ts.tv_nsec;
89 
90     return TOD_UNIX_EPOCH + time2tod(ns);
91 #else
92     S390TODState *td = s390_get_todstate();
93     S390TODClass *tdc = S390_TOD_GET_CLASS(td);
94     S390TOD tod;
95 
96     tdc->get(td, &tod, &error_abort);
97     return tod.low;
98 #endif
99 }
100 
101 #ifndef CONFIG_USER_ONLY
102 /* SCLP service call */
103 uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
104 {
105     qemu_mutex_lock_iothread();
106     int r = sclp_service_call(env, r1, r2);
107     qemu_mutex_unlock_iothread();
108     if (r < 0) {
109         tcg_s390_program_interrupt(env, -r, GETPC());
110     }
111     return r;
112 }
113 
114 void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
115 {
116     uint64_t r;
117 
118     switch (num) {
119     case 0x500:
120         /* KVM hypercall */
121         qemu_mutex_lock_iothread();
122         r = s390_virtio_hypercall(env);
123         qemu_mutex_unlock_iothread();
124         break;
125     case 0x44:
126         /* yield */
127         r = 0;
128         break;
129     case 0x308:
130         /* ipl */
131         qemu_mutex_lock_iothread();
132         handle_diag_308(env, r1, r3, GETPC());
133         qemu_mutex_unlock_iothread();
134         r = 0;
135         break;
136     case 0x288:
137         /* time bomb (watchdog) */
138         r = handle_diag_288(env, r1, r3);
139         break;
140     default:
141         r = -1;
142         break;
143     }
144 
145     if (r) {
146         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
147     }
148 }
149 
150 /* Set Prefix */
151 void HELPER(spx)(CPUS390XState *env, uint64_t a1)
152 {
153     const uint32_t prefix = a1 & 0x7fffe000;
154     const uint32_t old_prefix = env->psa;
155     CPUState *cs = env_cpu(env);
156 
157     if (prefix == old_prefix) {
158         return;
159     }
160     /*
161      * Since prefix got aligned to 8k and memory increments are a multiple of
162      * 8k checking the first page is sufficient
163      */
164     if (!mmu_absolute_addr_valid(prefix, true)) {
165         tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC());
166     }
167 
168     env->psa = prefix;
169     HELPER_LOG("prefix: %#x\n", prefix);
170     tlb_flush_page(cs, 0);
171     tlb_flush_page(cs, TARGET_PAGE_SIZE);
172     if (prefix != 0) {
173         tlb_flush_page(cs, prefix);
174         tlb_flush_page(cs, prefix + TARGET_PAGE_SIZE);
175     }
176     if (old_prefix != 0) {
177         tlb_flush_page(cs, old_prefix);
178         tlb_flush_page(cs, old_prefix + TARGET_PAGE_SIZE);
179     }
180 }
181 
182 static void update_ckc_timer(CPUS390XState *env)
183 {
184     S390TODState *td = s390_get_todstate();
185     uint64_t time;
186 
187     /* stop the timer and remove pending CKC IRQs */
188     timer_del(env->tod_timer);
189     g_assert(qemu_mutex_iothread_locked());
190     env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
191 
192     /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */
193     if (env->ckc == -1ULL) {
194         return;
195     }
196 
197     /* difference between origins */
198     time = env->ckc - td->base.low;
199 
200     /* nanoseconds */
201     time = tod2time(time);
202 
203     timer_mod(env->tod_timer, time);
204 }
205 
206 /* Set Clock Comparator */
207 void HELPER(sckc)(CPUS390XState *env, uint64_t ckc)
208 {
209     env->ckc = ckc;
210 
211     qemu_mutex_lock_iothread();
212     update_ckc_timer(env);
213     qemu_mutex_unlock_iothread();
214 }
215 
216 void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque)
217 {
218     S390CPU *cpu = S390_CPU(cs);
219 
220     update_ckc_timer(&cpu->env);
221 }
222 
223 /* Set Clock */
224 uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low)
225 {
226     S390TODState *td = s390_get_todstate();
227     S390TODClass *tdc = S390_TOD_GET_CLASS(td);
228     S390TOD tod = {
229         .high = 0,
230         .low = tod_low,
231     };
232 
233     qemu_mutex_lock_iothread();
234     tdc->set(td, &tod, &error_abort);
235     qemu_mutex_unlock_iothread();
236     return 0;
237 }
238 
239 /* Set Tod Programmable Field */
240 void HELPER(sckpf)(CPUS390XState *env, uint64_t r0)
241 {
242     uint32_t val = r0;
243 
244     if (val & 0xffff0000) {
245         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
246     }
247     env->todpr = val;
248 }
249 
250 /* Store Clock Comparator */
251 uint64_t HELPER(stckc)(CPUS390XState *env)
252 {
253     return env->ckc;
254 }
255 
256 /* Set CPU Timer */
257 void HELPER(spt)(CPUS390XState *env, uint64_t time)
258 {
259     if (time == -1ULL) {
260         return;
261     }
262 
263     /* nanoseconds */
264     time = tod2time(time);
265 
266     env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time;
267 
268     timer_mod(env->cpu_timer, env->cputm);
269 }
270 
271 /* Store System Information */
272 uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1)
273 {
274     const uintptr_t ra = GETPC();
275     const uint32_t sel1 = r0 & STSI_R0_SEL1_MASK;
276     const uint32_t sel2 = r1 & STSI_R1_SEL2_MASK;
277     const MachineState *ms = MACHINE(qdev_get_machine());
278     uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0;
279     S390CPU *cpu = env_archcpu(env);
280     SysIB sysib = { };
281     int i, cc = 0;
282 
283     if ((r0 & STSI_R0_FC_MASK) > STSI_R0_FC_LEVEL_3) {
284         /* invalid function code: no other checks are performed */
285         return 3;
286     }
287 
288     if ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK)) {
289         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
290     }
291 
292     if ((r0 & STSI_R0_FC_MASK) == STSI_R0_FC_CURRENT) {
293         /* query the current level: no further checks are performed */
294         env->regs[0] = STSI_R0_FC_LEVEL_3;
295         return 0;
296     }
297 
298     if (a0 & ~TARGET_PAGE_MASK) {
299         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
300     }
301 
302     /* count the cpus and split them into configured and reserved ones */
303     for (i = 0; i < ms->possible_cpus->len; i++) {
304         total_cpus++;
305         if (ms->possible_cpus->cpus[i].cpu) {
306             conf_cpus++;
307         } else {
308             reserved_cpus++;
309         }
310     }
311 
312     /*
313      * In theory, we could report Level 1 / Level 2 as current. However,
314      * the Linux kernel will detect this as running under LPAR and assume
315      * that we have a sclp linemode console (which is always present on
316      * LPAR, but not the default for QEMU), therefore not displaying boot
317      * messages and making booting a Linux kernel under TCG harder.
318      *
319      * For now we fake the same SMP configuration on all levels.
320      *
321      * TODO: We could later make the level configurable via the machine
322      *       and change defaults (linemode console) based on machine type
323      *       and accelerator.
324      */
325     switch (r0 & STSI_R0_FC_MASK) {
326     case STSI_R0_FC_LEVEL_1:
327         if ((sel1 == 1) && (sel2 == 1)) {
328             /* Basic Machine Configuration */
329             char type[5] = {};
330 
331             ebcdic_put(sysib.sysib_111.manuf, "QEMU            ", 16);
332             /* same as machine type number in STORE CPU ID, but in EBCDIC */
333             snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type);
334             ebcdic_put(sysib.sysib_111.type, type, 4);
335             /* model number (not stored in STORE CPU ID for z/Architecture) */
336             ebcdic_put(sysib.sysib_111.model, "QEMU            ", 16);
337             ebcdic_put(sysib.sysib_111.sequence, "QEMU            ", 16);
338             ebcdic_put(sysib.sysib_111.plant, "QEMU", 4);
339         } else if ((sel1 == 2) && (sel2 == 1)) {
340             /* Basic Machine CPU */
341             ebcdic_put(sysib.sysib_121.sequence, "QEMUQEMUQEMUQEMU", 16);
342             ebcdic_put(sysib.sysib_121.plant, "QEMU", 4);
343             sysib.sysib_121.cpu_addr = cpu_to_be16(env->core_id);
344         } else if ((sel1 == 2) && (sel2 == 2)) {
345             /* Basic Machine CPUs */
346             sysib.sysib_122.capability = cpu_to_be32(0x443afc29);
347             sysib.sysib_122.total_cpus = cpu_to_be16(total_cpus);
348             sysib.sysib_122.conf_cpus = cpu_to_be16(conf_cpus);
349             sysib.sysib_122.reserved_cpus = cpu_to_be16(reserved_cpus);
350         } else {
351             cc = 3;
352         }
353         break;
354     case STSI_R0_FC_LEVEL_2:
355         if ((sel1 == 2) && (sel2 == 1)) {
356             /* LPAR CPU */
357             ebcdic_put(sysib.sysib_221.sequence, "QEMUQEMUQEMUQEMU", 16);
358             ebcdic_put(sysib.sysib_221.plant, "QEMU", 4);
359             sysib.sysib_221.cpu_addr = cpu_to_be16(env->core_id);
360         } else if ((sel1 == 2) && (sel2 == 2)) {
361             /* LPAR CPUs */
362             sysib.sysib_222.lcpuc = 0x80; /* dedicated */
363             sysib.sysib_222.total_cpus = cpu_to_be16(total_cpus);
364             sysib.sysib_222.conf_cpus = cpu_to_be16(conf_cpus);
365             sysib.sysib_222.reserved_cpus = cpu_to_be16(reserved_cpus);
366             ebcdic_put(sysib.sysib_222.name, "QEMU    ", 8);
367             sysib.sysib_222.caf = cpu_to_be32(1000);
368             sysib.sysib_222.dedicated_cpus = cpu_to_be16(conf_cpus);
369         } else {
370             cc = 3;
371         }
372         break;
373     case STSI_R0_FC_LEVEL_3:
374         if ((sel1 == 2) && (sel2 == 2)) {
375             /* VM CPUs */
376             sysib.sysib_322.count = 1;
377             sysib.sysib_322.vm[0].total_cpus = cpu_to_be16(total_cpus);
378             sysib.sysib_322.vm[0].conf_cpus = cpu_to_be16(conf_cpus);
379             sysib.sysib_322.vm[0].reserved_cpus = cpu_to_be16(reserved_cpus);
380             sysib.sysib_322.vm[0].caf = cpu_to_be32(1000);
381             /* Linux kernel uses this to distinguish us from z/VM */
382             ebcdic_put(sysib.sysib_322.vm[0].cpi, "KVM/Linux       ", 16);
383             sysib.sysib_322.vm[0].ext_name_encoding = 2; /* UTF-8 */
384 
385             /* If our VM has a name, use the real name */
386             if (qemu_name) {
387                 memset(sysib.sysib_322.vm[0].name, 0x40,
388                        sizeof(sysib.sysib_322.vm[0].name));
389                 ebcdic_put(sysib.sysib_322.vm[0].name, qemu_name,
390                            MIN(sizeof(sysib.sysib_322.vm[0].name),
391                                strlen(qemu_name)));
392                 strpadcpy((char *)sysib.sysib_322.ext_names[0],
393                           sizeof(sysib.sysib_322.ext_names[0]),
394                           qemu_name, '\0');
395 
396             } else {
397                 ebcdic_put(sysib.sysib_322.vm[0].name, "TCGguest", 8);
398                 strcpy((char *)sysib.sysib_322.ext_names[0], "TCGguest");
399             }
400 
401             /* add the uuid */
402             memcpy(sysib.sysib_322.vm[0].uuid, &qemu_uuid,
403                    sizeof(sysib.sysib_322.vm[0].uuid));
404         } else {
405             cc = 3;
406         }
407         break;
408     }
409 
410     if (cc == 0) {
411         if (s390_cpu_virt_mem_write(cpu, a0, 0, &sysib, sizeof(sysib))) {
412             s390_cpu_virt_mem_handle_exc(cpu, ra);
413         }
414     }
415 
416     return cc;
417 }
418 
419 uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
420                       uint32_t r3)
421 {
422     int cc;
423 
424     /* TODO: needed to inject interrupts  - push further down */
425     qemu_mutex_lock_iothread();
426     cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3);
427     qemu_mutex_unlock_iothread();
428 
429     return cc;
430 }
431 #endif
432 
433 #ifndef CONFIG_USER_ONLY
434 void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
435 {
436     S390CPU *cpu = env_archcpu(env);
437     qemu_mutex_lock_iothread();
438     ioinst_handle_xsch(cpu, r1, GETPC());
439     qemu_mutex_unlock_iothread();
440 }
441 
442 void HELPER(csch)(CPUS390XState *env, uint64_t r1)
443 {
444     S390CPU *cpu = env_archcpu(env);
445     qemu_mutex_lock_iothread();
446     ioinst_handle_csch(cpu, r1, GETPC());
447     qemu_mutex_unlock_iothread();
448 }
449 
450 void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
451 {
452     S390CPU *cpu = env_archcpu(env);
453     qemu_mutex_lock_iothread();
454     ioinst_handle_hsch(cpu, r1, GETPC());
455     qemu_mutex_unlock_iothread();
456 }
457 
458 void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
459 {
460     S390CPU *cpu = env_archcpu(env);
461     qemu_mutex_lock_iothread();
462     ioinst_handle_msch(cpu, r1, inst >> 16, GETPC());
463     qemu_mutex_unlock_iothread();
464 }
465 
466 void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
467 {
468     S390CPU *cpu = env_archcpu(env);
469     qemu_mutex_lock_iothread();
470     ioinst_handle_rchp(cpu, r1, GETPC());
471     qemu_mutex_unlock_iothread();
472 }
473 
474 void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
475 {
476     S390CPU *cpu = env_archcpu(env);
477     qemu_mutex_lock_iothread();
478     ioinst_handle_rsch(cpu, r1, GETPC());
479     qemu_mutex_unlock_iothread();
480 }
481 
482 void HELPER(sal)(CPUS390XState *env, uint64_t r1)
483 {
484     S390CPU *cpu = env_archcpu(env);
485 
486     qemu_mutex_lock_iothread();
487     ioinst_handle_sal(cpu, r1, GETPC());
488     qemu_mutex_unlock_iothread();
489 }
490 
491 void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst)
492 {
493     S390CPU *cpu = env_archcpu(env);
494 
495     qemu_mutex_lock_iothread();
496     ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC());
497     qemu_mutex_unlock_iothread();
498 }
499 
500 void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
501 {
502     S390CPU *cpu = env_archcpu(env);
503     qemu_mutex_lock_iothread();
504     ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC());
505     qemu_mutex_unlock_iothread();
506 }
507 
508 void HELPER(stcrw)(CPUS390XState *env, uint64_t inst)
509 {
510     S390CPU *cpu = env_archcpu(env);
511 
512     qemu_mutex_lock_iothread();
513     ioinst_handle_stcrw(cpu, inst >> 16, GETPC());
514     qemu_mutex_unlock_iothread();
515 }
516 
517 void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
518 {
519     S390CPU *cpu = env_archcpu(env);
520     qemu_mutex_lock_iothread();
521     ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC());
522     qemu_mutex_unlock_iothread();
523 }
524 
525 uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
526 {
527     const uintptr_t ra = GETPC();
528     S390CPU *cpu = env_archcpu(env);
529     QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic());
530     QEMUS390FlicIO *io = NULL;
531     LowCore *lowcore;
532 
533     if (addr & 0x3) {
534         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
535     }
536 
537     qemu_mutex_lock_iothread();
538     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
539     if (!io) {
540         qemu_mutex_unlock_iothread();
541         return 0;
542     }
543 
544     if (addr) {
545         struct {
546             uint16_t id;
547             uint16_t nr;
548             uint32_t parm;
549         } intc = {
550             .id = cpu_to_be16(io->id),
551             .nr = cpu_to_be16(io->nr),
552             .parm = cpu_to_be32(io->parm),
553         };
554 
555         if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) {
556             /* writing failed, reinject and properly clean up */
557             s390_io_interrupt(io->id, io->nr, io->parm, io->word);
558             qemu_mutex_unlock_iothread();
559             g_free(io);
560             s390_cpu_virt_mem_handle_exc(cpu, ra);
561             return 0;
562         }
563     } else {
564         /* no protection applies */
565         lowcore = cpu_map_lowcore(env);
566         lowcore->subchannel_id = cpu_to_be16(io->id);
567         lowcore->subchannel_nr = cpu_to_be16(io->nr);
568         lowcore->io_int_parm = cpu_to_be32(io->parm);
569         lowcore->io_int_word = cpu_to_be32(io->word);
570         cpu_unmap_lowcore(lowcore);
571     }
572 
573     g_free(io);
574     qemu_mutex_unlock_iothread();
575     return 1;
576 }
577 
578 void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
579 {
580     S390CPU *cpu = env_archcpu(env);
581     qemu_mutex_lock_iothread();
582     ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC());
583     qemu_mutex_unlock_iothread();
584 }
585 
586 void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
587 {
588     S390CPU *cpu = env_archcpu(env);
589     qemu_mutex_lock_iothread();
590     ioinst_handle_chsc(cpu, inst >> 16, GETPC());
591     qemu_mutex_unlock_iothread();
592 }
593 #endif
594 
595 #ifndef CONFIG_USER_ONLY
596 void HELPER(per_check_exception)(CPUS390XState *env)
597 {
598     if (env->per_perc_atmid) {
599         tcg_s390_program_interrupt(env, PGM_PER, GETPC());
600     }
601 }
602 
603 /* Check if an address is within the PER starting address and the PER
604    ending address.  The address range might loop.  */
605 static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
606 {
607     if (env->cregs[10] <= env->cregs[11]) {
608         return env->cregs[10] <= addr && addr <= env->cregs[11];
609     } else {
610         return env->cregs[10] <= addr || addr <= env->cregs[11];
611     }
612 }
613 
614 void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to)
615 {
616     if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) {
617         if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
618             || get_per_in_range(env, to)) {
619             env->per_address = from;
620             env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
621         }
622     }
623 }
624 
625 void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
626 {
627     if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) {
628         env->per_address = addr;
629         env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
630 
631         /* If the instruction has to be nullified, trigger the
632            exception immediately. */
633         if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) {
634             CPUState *cs = env_cpu(env);
635 
636             env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION;
637             env->int_pgm_code = PGM_PER;
638             env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr));
639 
640             cs->exception_index = EXCP_PGM;
641             cpu_loop_exit(cs);
642         }
643     }
644 }
645 
646 void HELPER(per_store_real)(CPUS390XState *env)
647 {
648     if ((env->cregs[9] & PER_CR9_EVENT_STORE) &&
649         (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
650         /* PSW is saved just before calling the helper.  */
651         env->per_address = env->psw.addr;
652         env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
653     }
654 }
655 #endif
656 
657 static uint8_t stfl_bytes[2048];
658 static unsigned int used_stfl_bytes;
659 
660 static void prepare_stfl(void)
661 {
662     static bool initialized;
663     int i;
664 
665     /* racy, but we don't care, the same values are always written */
666     if (initialized) {
667         return;
668     }
669 
670     s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
671     for (i = 0; i < sizeof(stfl_bytes); i++) {
672         if (stfl_bytes[i]) {
673             used_stfl_bytes = i + 1;
674         }
675     }
676     initialized = true;
677 }
678 
679 #ifndef CONFIG_USER_ONLY
680 void HELPER(stfl)(CPUS390XState *env)
681 {
682     LowCore *lowcore;
683 
684     lowcore = cpu_map_lowcore(env);
685     prepare_stfl();
686     memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list));
687     cpu_unmap_lowcore(lowcore);
688 }
689 #endif
690 
691 uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
692 {
693     const uintptr_t ra = GETPC();
694     const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8;
695     int max_bytes;
696     int i;
697 
698     if (addr & 0x7) {
699         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
700     }
701 
702     prepare_stfl();
703     max_bytes = ROUND_UP(used_stfl_bytes, 8);
704 
705     /*
706      * The PoP says that doublewords beyond the highest-numbered facility
707      * bit may or may not be stored.  However, existing hardware appears to
708      * not store the words, and existing software depend on that.
709      */
710     for (i = 0; i < MIN(count_bytes, max_bytes); ++i) {
711         cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra);
712     }
713 
714     env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1);
715     return count_bytes >= max_bytes ? 0 : 3;
716 }
717 
718 #ifndef CONFIG_USER_ONLY
719 /*
720  * Note: we ignore any return code of the functions called for the pci
721  * instructions, as the only time they return !0 is when the stub is
722  * called, and in that case we didn't even offer the zpci facility.
723  * The only exception is SIC, where program checks need to be handled
724  * by the caller.
725  */
726 void HELPER(clp)(CPUS390XState *env, uint32_t r2)
727 {
728     S390CPU *cpu = env_archcpu(env);
729 
730     qemu_mutex_lock_iothread();
731     clp_service_call(cpu, r2, GETPC());
732     qemu_mutex_unlock_iothread();
733 }
734 
735 void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
736 {
737     S390CPU *cpu = env_archcpu(env);
738 
739     qemu_mutex_lock_iothread();
740     pcilg_service_call(cpu, r1, r2, GETPC());
741     qemu_mutex_unlock_iothread();
742 }
743 
744 void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
745 {
746     S390CPU *cpu = env_archcpu(env);
747 
748     qemu_mutex_lock_iothread();
749     pcistg_service_call(cpu, r1, r2, GETPC());
750     qemu_mutex_unlock_iothread();
751 }
752 
753 void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
754                      uint32_t ar)
755 {
756     S390CPU *cpu = env_archcpu(env);
757 
758     qemu_mutex_lock_iothread();
759     stpcifc_service_call(cpu, r1, fiba, ar, GETPC());
760     qemu_mutex_unlock_iothread();
761 }
762 
763 void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3)
764 {
765     int r;
766 
767     qemu_mutex_lock_iothread();
768     r = css_do_sic(env, (r3 >> 27) & 0x7, r1 & 0xffff);
769     qemu_mutex_unlock_iothread();
770     /* css_do_sic() may actually return a PGM_xxx value to inject */
771     if (r) {
772         tcg_s390_program_interrupt(env, -r, GETPC());
773     }
774 }
775 
776 void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2)
777 {
778     S390CPU *cpu = env_archcpu(env);
779 
780     qemu_mutex_lock_iothread();
781     rpcit_service_call(cpu, r1, r2, GETPC());
782     qemu_mutex_unlock_iothread();
783 }
784 
785 void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3,
786                     uint64_t gaddr, uint32_t ar)
787 {
788     S390CPU *cpu = env_archcpu(env);
789 
790     qemu_mutex_lock_iothread();
791     pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC());
792     qemu_mutex_unlock_iothread();
793 }
794 
795 void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
796                     uint32_t ar)
797 {
798     S390CPU *cpu = env_archcpu(env);
799 
800     qemu_mutex_lock_iothread();
801     mpcifc_service_call(cpu, r1, fiba, ar, GETPC());
802     qemu_mutex_unlock_iothread();
803 }
804 #endif
805