xref: /openbmc/qemu/target/s390x/tcg/misc_helper.c (revision 7682ecd4)
1 /*
2  *  S/390 misc helper routines
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2009 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/cutils.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "s390x-internal.h"
26 #include "qemu/host-utils.h"
27 #include "exec/helper-proto.h"
28 #include "qemu/timer.h"
29 #include "exec/exec-all.h"
30 #include "exec/cpu_ldst.h"
31 #include "qapi/error.h"
32 #include "tcg_s390x.h"
33 #include "s390-tod.h"
34 
35 #if !defined(CONFIG_USER_ONLY)
36 #include "sysemu/cpus.h"
37 #include "sysemu/sysemu.h"
38 #include "hw/s390x/ebcdic.h"
39 #include "hw/s390x/s390-virtio-hcall.h"
40 #include "hw/s390x/sclp.h"
41 #include "hw/s390x/s390_flic.h"
42 #include "hw/s390x/ioinst.h"
43 #include "hw/s390x/s390-pci-inst.h"
44 #include "hw/boards.h"
45 #include "hw/s390x/tod.h"
46 #endif
47 
48 /* #define DEBUG_HELPER */
49 #ifdef DEBUG_HELPER
50 #define HELPER_LOG(x...) qemu_log(x)
51 #else
52 #define HELPER_LOG(x...)
53 #endif
54 
55 /* Raise an exception statically from a TB.  */
56 void HELPER(exception)(CPUS390XState *env, uint32_t excp)
57 {
58     CPUState *cs = env_cpu(env);
59 
60     HELPER_LOG("%s: exception %d\n", __func__, excp);
61     cs->exception_index = excp;
62     cpu_loop_exit(cs);
63 }
64 
65 /* Store CPU Timer (also used for EXTRACT CPU TIME) */
66 uint64_t HELPER(stpt)(CPUS390XState *env)
67 {
68 #if defined(CONFIG_USER_ONLY)
69     /*
70      * Fake a descending CPU timer. We could get negative values here,
71      * but we don't care as it is up to the OS when to process that
72      * interrupt and reset to > 0.
73      */
74     return UINT64_MAX - (uint64_t)cpu_get_host_ticks();
75 #else
76     return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
77 #endif
78 }
79 
80 /* Store Clock */
81 uint64_t HELPER(stck)(CPUS390XState *env)
82 {
83 #ifdef CONFIG_USER_ONLY
84     struct timespec ts;
85     uint64_t ns;
86 
87     clock_gettime(CLOCK_REALTIME, &ts);
88     ns = ts.tv_sec * NANOSECONDS_PER_SECOND + ts.tv_nsec;
89 
90     return TOD_UNIX_EPOCH + time2tod(ns);
91 #else
92     S390TODState *td = s390_get_todstate();
93     S390TODClass *tdc = S390_TOD_GET_CLASS(td);
94     S390TOD tod;
95 
96     tdc->get(td, &tod, &error_abort);
97     return tod.low;
98 #endif
99 }
100 
101 #ifndef CONFIG_USER_ONLY
102 /* SCLP service call */
103 uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
104 {
105     bql_lock();
106     int r = sclp_service_call(env_archcpu(env), r1, r2);
107     bql_unlock();
108     if (r < 0) {
109         tcg_s390_program_interrupt(env, -r, GETPC());
110     }
111     return r;
112 }
113 
114 void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
115 {
116     uint64_t r;
117 
118     switch (num) {
119     case 0x500:
120         /* KVM hypercall */
121         bql_lock();
122         r = s390_virtio_hypercall(env);
123         bql_unlock();
124         break;
125     case 0x44:
126         /* yield */
127         r = 0;
128         break;
129     case 0x308:
130         /* ipl */
131         bql_lock();
132         handle_diag_308(env, r1, r3, GETPC());
133         bql_unlock();
134         r = 0;
135         break;
136     case 0x288:
137         /* time bomb (watchdog) */
138         r = handle_diag_288(env, r1, r3);
139         break;
140     default:
141         r = -1;
142         break;
143     }
144 
145     if (r) {
146         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
147     }
148 }
149 
150 /* Set Prefix */
151 void HELPER(spx)(CPUS390XState *env, uint64_t a1)
152 {
153     const uint32_t prefix = a1 & 0x7fffe000;
154     const uint32_t old_prefix = env->psa;
155     CPUState *cs = env_cpu(env);
156 
157     if (prefix == old_prefix) {
158         return;
159     }
160     /*
161      * Since prefix got aligned to 8k and memory increments are a multiple of
162      * 8k checking the first page is sufficient
163      */
164     if (!mmu_absolute_addr_valid(prefix, true)) {
165         tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC());
166     }
167 
168     env->psa = prefix;
169     HELPER_LOG("prefix: %#x\n", prefix);
170     tlb_flush_page(cs, 0);
171     tlb_flush_page(cs, TARGET_PAGE_SIZE);
172     if (prefix != 0) {
173         tlb_flush_page(cs, prefix);
174         tlb_flush_page(cs, prefix + TARGET_PAGE_SIZE);
175     }
176     if (old_prefix != 0) {
177         tlb_flush_page(cs, old_prefix);
178         tlb_flush_page(cs, old_prefix + TARGET_PAGE_SIZE);
179     }
180 }
181 
182 static void update_ckc_timer(CPUS390XState *env)
183 {
184     S390TODState *td = s390_get_todstate();
185     uint64_t time;
186 
187     /* stop the timer and remove pending CKC IRQs */
188     timer_del(env->tod_timer);
189     g_assert(bql_locked());
190     env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
191 
192     /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */
193     if (env->ckc == -1ULL) {
194         return;
195     }
196 
197     /* difference between origins */
198     time = env->ckc - td->base.low;
199 
200     /* nanoseconds */
201     time = tod2time(time);
202 
203     timer_mod(env->tod_timer, time);
204 }
205 
206 /* Set Clock Comparator */
207 void HELPER(sckc)(CPUS390XState *env, uint64_t ckc)
208 {
209     env->ckc = ckc;
210 
211     bql_lock();
212     update_ckc_timer(env);
213     bql_unlock();
214 }
215 
216 void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque)
217 {
218     update_ckc_timer(cpu_env(cs));
219 }
220 
221 /* Set Clock */
222 uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low)
223 {
224     S390TODState *td = s390_get_todstate();
225     S390TODClass *tdc = S390_TOD_GET_CLASS(td);
226     S390TOD tod = {
227         .high = 0,
228         .low = tod_low,
229     };
230 
231     bql_lock();
232     tdc->set(td, &tod, &error_abort);
233     bql_unlock();
234     return 0;
235 }
236 
237 /* Set Tod Programmable Field */
238 void HELPER(sckpf)(CPUS390XState *env, uint64_t r0)
239 {
240     uint32_t val = r0;
241 
242     if (val & 0xffff0000) {
243         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
244     }
245     env->todpr = val;
246 }
247 
248 /* Store Clock Comparator */
249 uint64_t HELPER(stckc)(CPUS390XState *env)
250 {
251     return env->ckc;
252 }
253 
254 /* Set CPU Timer */
255 void HELPER(spt)(CPUS390XState *env, uint64_t time)
256 {
257     if (time == -1ULL) {
258         return;
259     }
260 
261     /* nanoseconds */
262     time = tod2time(time);
263 
264     env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time;
265 
266     timer_mod(env->cpu_timer, env->cputm);
267 }
268 
269 /* Store System Information */
270 uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1)
271 {
272     const uintptr_t ra = GETPC();
273     const uint32_t sel1 = r0 & STSI_R0_SEL1_MASK;
274     const uint32_t sel2 = r1 & STSI_R1_SEL2_MASK;
275     const MachineState *ms = MACHINE(qdev_get_machine());
276     uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0;
277     S390CPU *cpu = env_archcpu(env);
278     SysIB sysib = { };
279     int i, cc = 0;
280 
281     if ((r0 & STSI_R0_FC_MASK) > STSI_R0_FC_LEVEL_3) {
282         /* invalid function code: no other checks are performed */
283         return 3;
284     }
285 
286     if ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK)) {
287         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
288     }
289 
290     if ((r0 & STSI_R0_FC_MASK) == STSI_R0_FC_CURRENT) {
291         /* query the current level: no further checks are performed */
292         env->regs[0] = STSI_R0_FC_LEVEL_3;
293         return 0;
294     }
295 
296     if (a0 & ~TARGET_PAGE_MASK) {
297         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
298     }
299 
300     /* count the cpus and split them into configured and reserved ones */
301     for (i = 0; i < ms->possible_cpus->len; i++) {
302         total_cpus++;
303         if (ms->possible_cpus->cpus[i].cpu) {
304             conf_cpus++;
305         } else {
306             reserved_cpus++;
307         }
308     }
309 
310     /*
311      * In theory, we could report Level 1 / Level 2 as current. However,
312      * the Linux kernel will detect this as running under LPAR and assume
313      * that we have a sclp linemode console (which is always present on
314      * LPAR, but not the default for QEMU), therefore not displaying boot
315      * messages and making booting a Linux kernel under TCG harder.
316      *
317      * For now we fake the same SMP configuration on all levels.
318      *
319      * TODO: We could later make the level configurable via the machine
320      *       and change defaults (linemode console) based on machine type
321      *       and accelerator.
322      */
323     switch (r0 & STSI_R0_FC_MASK) {
324     case STSI_R0_FC_LEVEL_1:
325         if ((sel1 == 1) && (sel2 == 1)) {
326             /* Basic Machine Configuration */
327             char type[5] = {};
328 
329             ebcdic_put(sysib.sysib_111.manuf, "QEMU            ", 16);
330             /* same as machine type number in STORE CPU ID, but in EBCDIC */
331             snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type);
332             ebcdic_put(sysib.sysib_111.type, type, 4);
333             /* model number (not stored in STORE CPU ID for z/Architecture) */
334             ebcdic_put(sysib.sysib_111.model, "QEMU            ", 16);
335             ebcdic_put(sysib.sysib_111.sequence, "QEMU            ", 16);
336             ebcdic_put(sysib.sysib_111.plant, "QEMU", 4);
337         } else if ((sel1 == 2) && (sel2 == 1)) {
338             /* Basic Machine CPU */
339             ebcdic_put(sysib.sysib_121.sequence, "QEMUQEMUQEMUQEMU", 16);
340             ebcdic_put(sysib.sysib_121.plant, "QEMU", 4);
341             sysib.sysib_121.cpu_addr = cpu_to_be16(env->core_id);
342         } else if ((sel1 == 2) && (sel2 == 2)) {
343             /* Basic Machine CPUs */
344             sysib.sysib_122.capability = cpu_to_be32(0x443afc29);
345             sysib.sysib_122.total_cpus = cpu_to_be16(total_cpus);
346             sysib.sysib_122.conf_cpus = cpu_to_be16(conf_cpus);
347             sysib.sysib_122.reserved_cpus = cpu_to_be16(reserved_cpus);
348         } else {
349             cc = 3;
350         }
351         break;
352     case STSI_R0_FC_LEVEL_2:
353         if ((sel1 == 2) && (sel2 == 1)) {
354             /* LPAR CPU */
355             ebcdic_put(sysib.sysib_221.sequence, "QEMUQEMUQEMUQEMU", 16);
356             ebcdic_put(sysib.sysib_221.plant, "QEMU", 4);
357             sysib.sysib_221.cpu_addr = cpu_to_be16(env->core_id);
358         } else if ((sel1 == 2) && (sel2 == 2)) {
359             /* LPAR CPUs */
360             sysib.sysib_222.lcpuc = 0x80; /* dedicated */
361             sysib.sysib_222.total_cpus = cpu_to_be16(total_cpus);
362             sysib.sysib_222.conf_cpus = cpu_to_be16(conf_cpus);
363             sysib.sysib_222.reserved_cpus = cpu_to_be16(reserved_cpus);
364             ebcdic_put(sysib.sysib_222.name, "QEMU    ", 8);
365             sysib.sysib_222.caf = cpu_to_be32(1000);
366             sysib.sysib_222.dedicated_cpus = cpu_to_be16(conf_cpus);
367         } else {
368             cc = 3;
369         }
370         break;
371     case STSI_R0_FC_LEVEL_3:
372         if ((sel1 == 2) && (sel2 == 2)) {
373             /* VM CPUs */
374             sysib.sysib_322.count = 1;
375             sysib.sysib_322.vm[0].total_cpus = cpu_to_be16(total_cpus);
376             sysib.sysib_322.vm[0].conf_cpus = cpu_to_be16(conf_cpus);
377             sysib.sysib_322.vm[0].reserved_cpus = cpu_to_be16(reserved_cpus);
378             sysib.sysib_322.vm[0].caf = cpu_to_be32(1000);
379             /* Linux kernel uses this to distinguish us from z/VM */
380             ebcdic_put(sysib.sysib_322.vm[0].cpi, "KVM/Linux       ", 16);
381             sysib.sysib_322.vm[0].ext_name_encoding = 2; /* UTF-8 */
382 
383             /* If our VM has a name, use the real name */
384             if (qemu_name) {
385                 memset(sysib.sysib_322.vm[0].name, 0x40,
386                        sizeof(sysib.sysib_322.vm[0].name));
387                 ebcdic_put(sysib.sysib_322.vm[0].name, qemu_name,
388                            MIN(sizeof(sysib.sysib_322.vm[0].name),
389                                strlen(qemu_name)));
390                 strpadcpy((char *)sysib.sysib_322.ext_names[0],
391                           sizeof(sysib.sysib_322.ext_names[0]),
392                           qemu_name, '\0');
393 
394             } else {
395                 ebcdic_put(sysib.sysib_322.vm[0].name, "TCGguest", 8);
396                 strcpy((char *)sysib.sysib_322.ext_names[0], "TCGguest");
397             }
398 
399             /* add the uuid */
400             memcpy(sysib.sysib_322.vm[0].uuid, &qemu_uuid,
401                    sizeof(sysib.sysib_322.vm[0].uuid));
402         } else {
403             cc = 3;
404         }
405         break;
406     }
407 
408     if (cc == 0) {
409         if (s390_cpu_virt_mem_write(cpu, a0, 0, &sysib, sizeof(sysib))) {
410             s390_cpu_virt_mem_handle_exc(cpu, ra);
411         }
412     }
413 
414     return cc;
415 }
416 
417 uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
418                       uint32_t r3)
419 {
420     int cc;
421 
422     /* TODO: needed to inject interrupts  - push further down */
423     bql_lock();
424     cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3);
425     bql_unlock();
426 
427     return cc;
428 }
429 #endif
430 
431 #ifndef CONFIG_USER_ONLY
432 void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
433 {
434     S390CPU *cpu = env_archcpu(env);
435     bql_lock();
436     ioinst_handle_xsch(cpu, r1, GETPC());
437     bql_unlock();
438 }
439 
440 void HELPER(csch)(CPUS390XState *env, uint64_t r1)
441 {
442     S390CPU *cpu = env_archcpu(env);
443     bql_lock();
444     ioinst_handle_csch(cpu, r1, GETPC());
445     bql_unlock();
446 }
447 
448 void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
449 {
450     S390CPU *cpu = env_archcpu(env);
451     bql_lock();
452     ioinst_handle_hsch(cpu, r1, GETPC());
453     bql_unlock();
454 }
455 
456 void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
457 {
458     S390CPU *cpu = env_archcpu(env);
459     bql_lock();
460     ioinst_handle_msch(cpu, r1, inst >> 16, GETPC());
461     bql_unlock();
462 }
463 
464 void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
465 {
466     S390CPU *cpu = env_archcpu(env);
467     bql_lock();
468     ioinst_handle_rchp(cpu, r1, GETPC());
469     bql_unlock();
470 }
471 
472 void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
473 {
474     S390CPU *cpu = env_archcpu(env);
475     bql_lock();
476     ioinst_handle_rsch(cpu, r1, GETPC());
477     bql_unlock();
478 }
479 
480 void HELPER(sal)(CPUS390XState *env, uint64_t r1)
481 {
482     S390CPU *cpu = env_archcpu(env);
483 
484     bql_lock();
485     ioinst_handle_sal(cpu, r1, GETPC());
486     bql_unlock();
487 }
488 
489 void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst)
490 {
491     S390CPU *cpu = env_archcpu(env);
492 
493     bql_lock();
494     ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC());
495     bql_unlock();
496 }
497 
498 void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
499 {
500     S390CPU *cpu = env_archcpu(env);
501     bql_lock();
502     ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC());
503     bql_unlock();
504 }
505 
506 void HELPER(stcrw)(CPUS390XState *env, uint64_t inst)
507 {
508     S390CPU *cpu = env_archcpu(env);
509 
510     bql_lock();
511     ioinst_handle_stcrw(cpu, inst >> 16, GETPC());
512     bql_unlock();
513 }
514 
515 void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
516 {
517     S390CPU *cpu = env_archcpu(env);
518     bql_lock();
519     ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC());
520     bql_unlock();
521 }
522 
523 uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
524 {
525     const uintptr_t ra = GETPC();
526     S390CPU *cpu = env_archcpu(env);
527     QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic());
528     QEMUS390FlicIO *io = NULL;
529     LowCore *lowcore;
530 
531     if (addr & 0x3) {
532         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
533     }
534 
535     bql_lock();
536     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
537     if (!io) {
538         bql_unlock();
539         return 0;
540     }
541 
542     if (addr) {
543         struct {
544             uint16_t id;
545             uint16_t nr;
546             uint32_t parm;
547         } intc = {
548             .id = cpu_to_be16(io->id),
549             .nr = cpu_to_be16(io->nr),
550             .parm = cpu_to_be32(io->parm),
551         };
552 
553         if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) {
554             /* writing failed, reinject and properly clean up */
555             s390_io_interrupt(io->id, io->nr, io->parm, io->word);
556             bql_unlock();
557             g_free(io);
558             s390_cpu_virt_mem_handle_exc(cpu, ra);
559             return 0;
560         }
561     } else {
562         /* no protection applies */
563         lowcore = cpu_map_lowcore(env);
564         lowcore->subchannel_id = cpu_to_be16(io->id);
565         lowcore->subchannel_nr = cpu_to_be16(io->nr);
566         lowcore->io_int_parm = cpu_to_be32(io->parm);
567         lowcore->io_int_word = cpu_to_be32(io->word);
568         cpu_unmap_lowcore(lowcore);
569     }
570 
571     g_free(io);
572     bql_unlock();
573     return 1;
574 }
575 
576 void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
577 {
578     S390CPU *cpu = env_archcpu(env);
579     bql_lock();
580     ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC());
581     bql_unlock();
582 }
583 
584 void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
585 {
586     S390CPU *cpu = env_archcpu(env);
587     bql_lock();
588     ioinst_handle_chsc(cpu, inst >> 16, GETPC());
589     bql_unlock();
590 }
591 #endif
592 
593 #ifndef CONFIG_USER_ONLY
594 static G_NORETURN void per_raise_exception(CPUS390XState *env)
595 {
596     trigger_pgm_exception(env, PGM_PER);
597     cpu_loop_exit(env_cpu(env));
598 }
599 
600 static G_NORETURN void per_raise_exception_log(CPUS390XState *env)
601 {
602     qemu_log_mask(CPU_LOG_INT, "PER interrupt after 0x%" PRIx64 "\n",
603                   env->per_address);
604     per_raise_exception(env);
605 }
606 
607 void HELPER(per_check_exception)(CPUS390XState *env)
608 {
609     /* psw_addr, per_address and int_pgm_ilen are already set. */
610     if (unlikely(env->per_perc_atmid)) {
611         per_raise_exception_log(env);
612     }
613 }
614 
615 /* Check if an address is within the PER starting address and the PER
616    ending address.  The address range might loop.  */
617 static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
618 {
619     if (env->cregs[10] <= env->cregs[11]) {
620         return env->cregs[10] <= addr && addr <= env->cregs[11];
621     } else {
622         return env->cregs[10] <= addr || addr <= env->cregs[11];
623     }
624 }
625 
626 void HELPER(per_branch)(CPUS390XState *env, uint64_t dest, uint32_t ilen)
627 {
628     if ((env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
629         && !get_per_in_range(env, dest)) {
630         return;
631     }
632 
633     env->psw.addr = dest;
634     env->int_pgm_ilen = ilen;
635     env->per_address = env->gbea;
636     env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
637     per_raise_exception_log(env);
638 }
639 
640 void HELPER(per_ifetch)(CPUS390XState *env, uint32_t ilen)
641 {
642     if (get_per_in_range(env, env->psw.addr)) {
643         env->per_address = env->psw.addr;
644         env->int_pgm_ilen = ilen;
645         env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
646 
647         /* If the instruction has to be nullified, trigger the
648            exception immediately. */
649         if (env->cregs[9] & PER_CR9_EVENT_IFETCH_NULLIFICATION) {
650             env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION;
651             qemu_log_mask(CPU_LOG_INT, "PER interrupt before 0x%" PRIx64 "\n",
652                           env->per_address);
653             per_raise_exception(env);
654         }
655     }
656 }
657 
658 void HELPER(per_store_real)(CPUS390XState *env, uint32_t ilen)
659 {
660     /* PSW is saved just before calling the helper.  */
661     env->per_address = env->psw.addr;
662     env->int_pgm_ilen = ilen;
663     env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
664     per_raise_exception_log(env);
665 }
666 #endif
667 
668 static uint8_t stfl_bytes[2048];
669 static unsigned int used_stfl_bytes;
670 
671 static void prepare_stfl(void)
672 {
673     static bool initialized;
674     int i;
675 
676     /* racy, but we don't care, the same values are always written */
677     if (initialized) {
678         return;
679     }
680 
681     s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
682     for (i = 0; i < sizeof(stfl_bytes); i++) {
683         if (stfl_bytes[i]) {
684             used_stfl_bytes = i + 1;
685         }
686     }
687     initialized = true;
688 }
689 
690 #ifndef CONFIG_USER_ONLY
691 void HELPER(stfl)(CPUS390XState *env)
692 {
693     LowCore *lowcore;
694 
695     lowcore = cpu_map_lowcore(env);
696     prepare_stfl();
697     memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list));
698     cpu_unmap_lowcore(lowcore);
699 }
700 #endif
701 
702 uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
703 {
704     const uintptr_t ra = GETPC();
705     const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8;
706     int max_bytes;
707     int i;
708 
709     if (addr & 0x7) {
710         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
711     }
712 
713     prepare_stfl();
714     max_bytes = ROUND_UP(used_stfl_bytes, 8);
715 
716     /*
717      * The PoP says that doublewords beyond the highest-numbered facility
718      * bit may or may not be stored.  However, existing hardware appears to
719      * not store the words, and existing software depend on that.
720      */
721     for (i = 0; i < MIN(count_bytes, max_bytes); ++i) {
722         cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra);
723     }
724 
725     env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1);
726     return count_bytes >= max_bytes ? 0 : 3;
727 }
728 
729 #ifndef CONFIG_USER_ONLY
730 /*
731  * Note: we ignore any return code of the functions called for the pci
732  * instructions, as the only time they return !0 is when the stub is
733  * called, and in that case we didn't even offer the zpci facility.
734  * The only exception is SIC, where program checks need to be handled
735  * by the caller.
736  */
737 void HELPER(clp)(CPUS390XState *env, uint32_t r2)
738 {
739     S390CPU *cpu = env_archcpu(env);
740 
741     bql_lock();
742     clp_service_call(cpu, r2, GETPC());
743     bql_unlock();
744 }
745 
746 void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
747 {
748     S390CPU *cpu = env_archcpu(env);
749 
750     bql_lock();
751     pcilg_service_call(cpu, r1, r2, GETPC());
752     bql_unlock();
753 }
754 
755 void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
756 {
757     S390CPU *cpu = env_archcpu(env);
758 
759     bql_lock();
760     pcistg_service_call(cpu, r1, r2, GETPC());
761     bql_unlock();
762 }
763 
764 void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
765                      uint32_t ar)
766 {
767     S390CPU *cpu = env_archcpu(env);
768 
769     bql_lock();
770     stpcifc_service_call(cpu, r1, fiba, ar, GETPC());
771     bql_unlock();
772 }
773 
774 void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3)
775 {
776     S390CPU *cpu = env_archcpu(env);
777     int r;
778 
779     bql_lock();
780     r = css_do_sic(cpu, (r3 >> 27) & 0x7, r1 & 0xffff);
781     bql_unlock();
782     /* css_do_sic() may actually return a PGM_xxx value to inject */
783     if (r) {
784         tcg_s390_program_interrupt(env, -r, GETPC());
785     }
786 }
787 
788 void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2)
789 {
790     S390CPU *cpu = env_archcpu(env);
791 
792     bql_lock();
793     rpcit_service_call(cpu, r1, r2, GETPC());
794     bql_unlock();
795 }
796 
797 void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3,
798                     uint64_t gaddr, uint32_t ar)
799 {
800     S390CPU *cpu = env_archcpu(env);
801 
802     bql_lock();
803     pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC());
804     bql_unlock();
805 }
806 
807 void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
808                     uint32_t ar)
809 {
810     S390CPU *cpu = env_archcpu(env);
811 
812     bql_lock();
813     mpcifc_service_call(cpu, r1, fiba, ar, GETPC());
814     bql_unlock();
815 }
816 #endif
817