xref: /openbmc/qemu/target/s390x/tcg/misc_helper.c (revision 44602af8)
1 /*
2  *  S/390 misc helper routines
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2009 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/cutils.h"
23 #include "qemu/main-loop.h"
24 #include "cpu.h"
25 #include "s390x-internal.h"
26 #include "exec/memory.h"
27 #include "qemu/host-utils.h"
28 #include "exec/helper-proto.h"
29 #include "qemu/timer.h"
30 #include "exec/exec-all.h"
31 #include "exec/cpu_ldst.h"
32 #include "qapi/error.h"
33 #include "tcg_s390x.h"
34 #include "s390-tod.h"
35 
36 #if !defined(CONFIG_USER_ONLY)
37 #include "sysemu/cpus.h"
38 #include "sysemu/sysemu.h"
39 #include "hw/s390x/ebcdic.h"
40 #include "hw/s390x/s390-virtio-hcall.h"
41 #include "hw/s390x/sclp.h"
42 #include "hw/s390x/s390_flic.h"
43 #include "hw/s390x/ioinst.h"
44 #include "hw/s390x/s390-pci-inst.h"
45 #include "hw/boards.h"
46 #include "hw/s390x/tod.h"
47 #endif
48 
49 /* #define DEBUG_HELPER */
50 #ifdef DEBUG_HELPER
51 #define HELPER_LOG(x...) qemu_log(x)
52 #else
53 #define HELPER_LOG(x...)
54 #endif
55 
56 /* Raise an exception statically from a TB.  */
57 void HELPER(exception)(CPUS390XState *env, uint32_t excp)
58 {
59     CPUState *cs = env_cpu(env);
60 
61     HELPER_LOG("%s: exception %d\n", __func__, excp);
62     cs->exception_index = excp;
63     cpu_loop_exit(cs);
64 }
65 
66 /* Store CPU Timer (also used for EXTRACT CPU TIME) */
67 uint64_t HELPER(stpt)(CPUS390XState *env)
68 {
69 #if defined(CONFIG_USER_ONLY)
70     /*
71      * Fake a descending CPU timer. We could get negative values here,
72      * but we don't care as it is up to the OS when to process that
73      * interrupt and reset to > 0.
74      */
75     return UINT64_MAX - (uint64_t)cpu_get_host_ticks();
76 #else
77     return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
78 #endif
79 }
80 
81 /* Store Clock */
82 uint64_t HELPER(stck)(CPUS390XState *env)
83 {
84 #ifdef CONFIG_USER_ONLY
85     struct timespec ts;
86     uint64_t ns;
87 
88     clock_gettime(CLOCK_REALTIME, &ts);
89     ns = ts.tv_sec * NANOSECONDS_PER_SECOND + ts.tv_nsec;
90 
91     return TOD_UNIX_EPOCH + time2tod(ns);
92 #else
93     S390TODState *td = s390_get_todstate();
94     S390TODClass *tdc = S390_TOD_GET_CLASS(td);
95     S390TOD tod;
96 
97     tdc->get(td, &tod, &error_abort);
98     return tod.low;
99 #endif
100 }
101 
102 #ifndef CONFIG_USER_ONLY
103 /* SCLP service call */
104 uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
105 {
106     qemu_mutex_lock_iothread();
107     int r = sclp_service_call(env, r1, r2);
108     qemu_mutex_unlock_iothread();
109     if (r < 0) {
110         tcg_s390_program_interrupt(env, -r, GETPC());
111     }
112     return r;
113 }
114 
115 void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
116 {
117     uint64_t r;
118 
119     switch (num) {
120     case 0x500:
121         /* KVM hypercall */
122         qemu_mutex_lock_iothread();
123         r = s390_virtio_hypercall(env);
124         qemu_mutex_unlock_iothread();
125         break;
126     case 0x44:
127         /* yield */
128         r = 0;
129         break;
130     case 0x308:
131         /* ipl */
132         qemu_mutex_lock_iothread();
133         handle_diag_308(env, r1, r3, GETPC());
134         qemu_mutex_unlock_iothread();
135         r = 0;
136         break;
137     case 0x288:
138         /* time bomb (watchdog) */
139         r = handle_diag_288(env, r1, r3);
140         break;
141     default:
142         r = -1;
143         break;
144     }
145 
146     if (r) {
147         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
148     }
149 }
150 
151 /* Set Prefix */
152 void HELPER(spx)(CPUS390XState *env, uint64_t a1)
153 {
154     const uint32_t prefix = a1 & 0x7fffe000;
155     const uint32_t old_prefix = env->psa;
156     CPUState *cs = env_cpu(env);
157 
158     if (prefix == old_prefix) {
159         return;
160     }
161     /*
162      * Since prefix got aligned to 8k and memory increments are a multiple of
163      * 8k checking the first page is sufficient
164      */
165     if (!mmu_absolute_addr_valid(prefix, true)) {
166         tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC());
167     }
168 
169     env->psa = prefix;
170     HELPER_LOG("prefix: %#x\n", prefix);
171     tlb_flush_page(cs, 0);
172     tlb_flush_page(cs, TARGET_PAGE_SIZE);
173     if (prefix != 0) {
174         tlb_flush_page(cs, prefix);
175         tlb_flush_page(cs, prefix + TARGET_PAGE_SIZE);
176     }
177     if (old_prefix != 0) {
178         tlb_flush_page(cs, old_prefix);
179         tlb_flush_page(cs, old_prefix + TARGET_PAGE_SIZE);
180     }
181 }
182 
183 static void update_ckc_timer(CPUS390XState *env)
184 {
185     S390TODState *td = s390_get_todstate();
186     uint64_t time;
187 
188     /* stop the timer and remove pending CKC IRQs */
189     timer_del(env->tod_timer);
190     g_assert(qemu_mutex_iothread_locked());
191     env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
192 
193     /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */
194     if (env->ckc == -1ULL) {
195         return;
196     }
197 
198     /* difference between origins */
199     time = env->ckc - td->base.low;
200 
201     /* nanoseconds */
202     time = tod2time(time);
203 
204     timer_mod(env->tod_timer, time);
205 }
206 
207 /* Set Clock Comparator */
208 void HELPER(sckc)(CPUS390XState *env, uint64_t ckc)
209 {
210     env->ckc = ckc;
211 
212     qemu_mutex_lock_iothread();
213     update_ckc_timer(env);
214     qemu_mutex_unlock_iothread();
215 }
216 
217 void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque)
218 {
219     S390CPU *cpu = S390_CPU(cs);
220 
221     update_ckc_timer(&cpu->env);
222 }
223 
224 /* Set Clock */
225 uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low)
226 {
227     S390TODState *td = s390_get_todstate();
228     S390TODClass *tdc = S390_TOD_GET_CLASS(td);
229     S390TOD tod = {
230         .high = 0,
231         .low = tod_low,
232     };
233 
234     qemu_mutex_lock_iothread();
235     tdc->set(td, &tod, &error_abort);
236     qemu_mutex_unlock_iothread();
237     return 0;
238 }
239 
240 /* Set Tod Programmable Field */
241 void HELPER(sckpf)(CPUS390XState *env, uint64_t r0)
242 {
243     uint32_t val = r0;
244 
245     if (val & 0xffff0000) {
246         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
247     }
248     env->todpr = val;
249 }
250 
251 /* Store Clock Comparator */
252 uint64_t HELPER(stckc)(CPUS390XState *env)
253 {
254     return env->ckc;
255 }
256 
257 /* Set CPU Timer */
258 void HELPER(spt)(CPUS390XState *env, uint64_t time)
259 {
260     if (time == -1ULL) {
261         return;
262     }
263 
264     /* nanoseconds */
265     time = tod2time(time);
266 
267     env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time;
268 
269     timer_mod(env->cpu_timer, env->cputm);
270 }
271 
272 /* Store System Information */
273 uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1)
274 {
275     const uintptr_t ra = GETPC();
276     const uint32_t sel1 = r0 & STSI_R0_SEL1_MASK;
277     const uint32_t sel2 = r1 & STSI_R1_SEL2_MASK;
278     const MachineState *ms = MACHINE(qdev_get_machine());
279     uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0;
280     S390CPU *cpu = env_archcpu(env);
281     SysIB sysib = { };
282     int i, cc = 0;
283 
284     if ((r0 & STSI_R0_FC_MASK) > STSI_R0_FC_LEVEL_3) {
285         /* invalid function code: no other checks are performed */
286         return 3;
287     }
288 
289     if ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK)) {
290         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
291     }
292 
293     if ((r0 & STSI_R0_FC_MASK) == STSI_R0_FC_CURRENT) {
294         /* query the current level: no further checks are performed */
295         env->regs[0] = STSI_R0_FC_LEVEL_3;
296         return 0;
297     }
298 
299     if (a0 & ~TARGET_PAGE_MASK) {
300         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
301     }
302 
303     /* count the cpus and split them into configured and reserved ones */
304     for (i = 0; i < ms->possible_cpus->len; i++) {
305         total_cpus++;
306         if (ms->possible_cpus->cpus[i].cpu) {
307             conf_cpus++;
308         } else {
309             reserved_cpus++;
310         }
311     }
312 
313     /*
314      * In theory, we could report Level 1 / Level 2 as current. However,
315      * the Linux kernel will detect this as running under LPAR and assume
316      * that we have a sclp linemode console (which is always present on
317      * LPAR, but not the default for QEMU), therefore not displaying boot
318      * messages and making booting a Linux kernel under TCG harder.
319      *
320      * For now we fake the same SMP configuration on all levels.
321      *
322      * TODO: We could later make the level configurable via the machine
323      *       and change defaults (linemode console) based on machine type
324      *       and accelerator.
325      */
326     switch (r0 & STSI_R0_FC_MASK) {
327     case STSI_R0_FC_LEVEL_1:
328         if ((sel1 == 1) && (sel2 == 1)) {
329             /* Basic Machine Configuration */
330             char type[5] = {};
331 
332             ebcdic_put(sysib.sysib_111.manuf, "QEMU            ", 16);
333             /* same as machine type number in STORE CPU ID, but in EBCDIC */
334             snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type);
335             ebcdic_put(sysib.sysib_111.type, type, 4);
336             /* model number (not stored in STORE CPU ID for z/Architecure) */
337             ebcdic_put(sysib.sysib_111.model, "QEMU            ", 16);
338             ebcdic_put(sysib.sysib_111.sequence, "QEMU            ", 16);
339             ebcdic_put(sysib.sysib_111.plant, "QEMU", 4);
340         } else if ((sel1 == 2) && (sel2 == 1)) {
341             /* Basic Machine CPU */
342             ebcdic_put(sysib.sysib_121.sequence, "QEMUQEMUQEMUQEMU", 16);
343             ebcdic_put(sysib.sysib_121.plant, "QEMU", 4);
344             sysib.sysib_121.cpu_addr = cpu_to_be16(env->core_id);
345         } else if ((sel1 == 2) && (sel2 == 2)) {
346             /* Basic Machine CPUs */
347             sysib.sysib_122.capability = cpu_to_be32(0x443afc29);
348             sysib.sysib_122.total_cpus = cpu_to_be16(total_cpus);
349             sysib.sysib_122.conf_cpus = cpu_to_be16(conf_cpus);
350             sysib.sysib_122.reserved_cpus = cpu_to_be16(reserved_cpus);
351         } else {
352             cc = 3;
353         }
354         break;
355     case STSI_R0_FC_LEVEL_2:
356         if ((sel1 == 2) && (sel2 == 1)) {
357             /* LPAR CPU */
358             ebcdic_put(sysib.sysib_221.sequence, "QEMUQEMUQEMUQEMU", 16);
359             ebcdic_put(sysib.sysib_221.plant, "QEMU", 4);
360             sysib.sysib_221.cpu_addr = cpu_to_be16(env->core_id);
361         } else if ((sel1 == 2) && (sel2 == 2)) {
362             /* LPAR CPUs */
363             sysib.sysib_222.lcpuc = 0x80; /* dedicated */
364             sysib.sysib_222.total_cpus = cpu_to_be16(total_cpus);
365             sysib.sysib_222.conf_cpus = cpu_to_be16(conf_cpus);
366             sysib.sysib_222.reserved_cpus = cpu_to_be16(reserved_cpus);
367             ebcdic_put(sysib.sysib_222.name, "QEMU    ", 8);
368             sysib.sysib_222.caf = cpu_to_be32(1000);
369             sysib.sysib_222.dedicated_cpus = cpu_to_be16(conf_cpus);
370         } else {
371             cc = 3;
372         }
373         break;
374     case STSI_R0_FC_LEVEL_3:
375         if ((sel1 == 2) && (sel2 == 2)) {
376             /* VM CPUs */
377             sysib.sysib_322.count = 1;
378             sysib.sysib_322.vm[0].total_cpus = cpu_to_be16(total_cpus);
379             sysib.sysib_322.vm[0].conf_cpus = cpu_to_be16(conf_cpus);
380             sysib.sysib_322.vm[0].reserved_cpus = cpu_to_be16(reserved_cpus);
381             sysib.sysib_322.vm[0].caf = cpu_to_be32(1000);
382             /* Linux kernel uses this to distinguish us from z/VM */
383             ebcdic_put(sysib.sysib_322.vm[0].cpi, "KVM/Linux       ", 16);
384             sysib.sysib_322.vm[0].ext_name_encoding = 2; /* UTF-8 */
385 
386             /* If our VM has a name, use the real name */
387             if (qemu_name) {
388                 memset(sysib.sysib_322.vm[0].name, 0x40,
389                        sizeof(sysib.sysib_322.vm[0].name));
390                 ebcdic_put(sysib.sysib_322.vm[0].name, qemu_name,
391                            MIN(sizeof(sysib.sysib_322.vm[0].name),
392                                strlen(qemu_name)));
393                 strpadcpy((char *)sysib.sysib_322.ext_names[0],
394                           sizeof(sysib.sysib_322.ext_names[0]),
395                           qemu_name, '\0');
396 
397             } else {
398                 ebcdic_put(sysib.sysib_322.vm[0].name, "TCGguest", 8);
399                 strcpy((char *)sysib.sysib_322.ext_names[0], "TCGguest");
400             }
401 
402             /* add the uuid */
403             memcpy(sysib.sysib_322.vm[0].uuid, &qemu_uuid,
404                    sizeof(sysib.sysib_322.vm[0].uuid));
405         } else {
406             cc = 3;
407         }
408         break;
409     }
410 
411     if (cc == 0) {
412         if (s390_cpu_virt_mem_write(cpu, a0, 0, &sysib, sizeof(sysib))) {
413             s390_cpu_virt_mem_handle_exc(cpu, ra);
414         }
415     }
416 
417     return cc;
418 }
419 
420 uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
421                       uint32_t r3)
422 {
423     int cc;
424 
425     /* TODO: needed to inject interrupts  - push further down */
426     qemu_mutex_lock_iothread();
427     cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3);
428     qemu_mutex_unlock_iothread();
429 
430     return cc;
431 }
432 #endif
433 
434 #ifndef CONFIG_USER_ONLY
435 void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
436 {
437     S390CPU *cpu = env_archcpu(env);
438     qemu_mutex_lock_iothread();
439     ioinst_handle_xsch(cpu, r1, GETPC());
440     qemu_mutex_unlock_iothread();
441 }
442 
443 void HELPER(csch)(CPUS390XState *env, uint64_t r1)
444 {
445     S390CPU *cpu = env_archcpu(env);
446     qemu_mutex_lock_iothread();
447     ioinst_handle_csch(cpu, r1, GETPC());
448     qemu_mutex_unlock_iothread();
449 }
450 
451 void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
452 {
453     S390CPU *cpu = env_archcpu(env);
454     qemu_mutex_lock_iothread();
455     ioinst_handle_hsch(cpu, r1, GETPC());
456     qemu_mutex_unlock_iothread();
457 }
458 
459 void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
460 {
461     S390CPU *cpu = env_archcpu(env);
462     qemu_mutex_lock_iothread();
463     ioinst_handle_msch(cpu, r1, inst >> 16, GETPC());
464     qemu_mutex_unlock_iothread();
465 }
466 
467 void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
468 {
469     S390CPU *cpu = env_archcpu(env);
470     qemu_mutex_lock_iothread();
471     ioinst_handle_rchp(cpu, r1, GETPC());
472     qemu_mutex_unlock_iothread();
473 }
474 
475 void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
476 {
477     S390CPU *cpu = env_archcpu(env);
478     qemu_mutex_lock_iothread();
479     ioinst_handle_rsch(cpu, r1, GETPC());
480     qemu_mutex_unlock_iothread();
481 }
482 
483 void HELPER(sal)(CPUS390XState *env, uint64_t r1)
484 {
485     S390CPU *cpu = env_archcpu(env);
486 
487     qemu_mutex_lock_iothread();
488     ioinst_handle_sal(cpu, r1, GETPC());
489     qemu_mutex_unlock_iothread();
490 }
491 
492 void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst)
493 {
494     S390CPU *cpu = env_archcpu(env);
495 
496     qemu_mutex_lock_iothread();
497     ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC());
498     qemu_mutex_unlock_iothread();
499 }
500 
501 void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
502 {
503     S390CPU *cpu = env_archcpu(env);
504     qemu_mutex_lock_iothread();
505     ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC());
506     qemu_mutex_unlock_iothread();
507 }
508 
509 void HELPER(stcrw)(CPUS390XState *env, uint64_t inst)
510 {
511     S390CPU *cpu = env_archcpu(env);
512 
513     qemu_mutex_lock_iothread();
514     ioinst_handle_stcrw(cpu, inst >> 16, GETPC());
515     qemu_mutex_unlock_iothread();
516 }
517 
518 void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
519 {
520     S390CPU *cpu = env_archcpu(env);
521     qemu_mutex_lock_iothread();
522     ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC());
523     qemu_mutex_unlock_iothread();
524 }
525 
526 uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
527 {
528     const uintptr_t ra = GETPC();
529     S390CPU *cpu = env_archcpu(env);
530     QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic());
531     QEMUS390FlicIO *io = NULL;
532     LowCore *lowcore;
533 
534     if (addr & 0x3) {
535         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
536     }
537 
538     qemu_mutex_lock_iothread();
539     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
540     if (!io) {
541         qemu_mutex_unlock_iothread();
542         return 0;
543     }
544 
545     if (addr) {
546         struct {
547             uint16_t id;
548             uint16_t nr;
549             uint32_t parm;
550         } intc = {
551             .id = cpu_to_be16(io->id),
552             .nr = cpu_to_be16(io->nr),
553             .parm = cpu_to_be32(io->parm),
554         };
555 
556         if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) {
557             /* writing failed, reinject and properly clean up */
558             s390_io_interrupt(io->id, io->nr, io->parm, io->word);
559             qemu_mutex_unlock_iothread();
560             g_free(io);
561             s390_cpu_virt_mem_handle_exc(cpu, ra);
562             return 0;
563         }
564     } else {
565         /* no protection applies */
566         lowcore = cpu_map_lowcore(env);
567         lowcore->subchannel_id = cpu_to_be16(io->id);
568         lowcore->subchannel_nr = cpu_to_be16(io->nr);
569         lowcore->io_int_parm = cpu_to_be32(io->parm);
570         lowcore->io_int_word = cpu_to_be32(io->word);
571         cpu_unmap_lowcore(lowcore);
572     }
573 
574     g_free(io);
575     qemu_mutex_unlock_iothread();
576     return 1;
577 }
578 
579 void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
580 {
581     S390CPU *cpu = env_archcpu(env);
582     qemu_mutex_lock_iothread();
583     ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC());
584     qemu_mutex_unlock_iothread();
585 }
586 
587 void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
588 {
589     S390CPU *cpu = env_archcpu(env);
590     qemu_mutex_lock_iothread();
591     ioinst_handle_chsc(cpu, inst >> 16, GETPC());
592     qemu_mutex_unlock_iothread();
593 }
594 #endif
595 
596 #ifndef CONFIG_USER_ONLY
597 void HELPER(per_check_exception)(CPUS390XState *env)
598 {
599     if (env->per_perc_atmid) {
600         tcg_s390_program_interrupt(env, PGM_PER, GETPC());
601     }
602 }
603 
604 /* Check if an address is within the PER starting address and the PER
605    ending address.  The address range might loop.  */
606 static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
607 {
608     if (env->cregs[10] <= env->cregs[11]) {
609         return env->cregs[10] <= addr && addr <= env->cregs[11];
610     } else {
611         return env->cregs[10] <= addr || addr <= env->cregs[11];
612     }
613 }
614 
615 void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to)
616 {
617     if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) {
618         if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
619             || get_per_in_range(env, to)) {
620             env->per_address = from;
621             env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
622         }
623     }
624 }
625 
626 void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
627 {
628     if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) {
629         env->per_address = addr;
630         env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
631 
632         /* If the instruction has to be nullified, trigger the
633            exception immediately. */
634         if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) {
635             CPUState *cs = env_cpu(env);
636 
637             env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION;
638             env->int_pgm_code = PGM_PER;
639             env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr));
640 
641             cs->exception_index = EXCP_PGM;
642             cpu_loop_exit(cs);
643         }
644     }
645 }
646 
647 void HELPER(per_store_real)(CPUS390XState *env)
648 {
649     if ((env->cregs[9] & PER_CR9_EVENT_STORE) &&
650         (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
651         /* PSW is saved just before calling the helper.  */
652         env->per_address = env->psw.addr;
653         env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
654     }
655 }
656 #endif
657 
658 static uint8_t stfl_bytes[2048];
659 static unsigned int used_stfl_bytes;
660 
661 static void prepare_stfl(void)
662 {
663     static bool initialized;
664     int i;
665 
666     /* racy, but we don't care, the same values are always written */
667     if (initialized) {
668         return;
669     }
670 
671     s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
672     for (i = 0; i < sizeof(stfl_bytes); i++) {
673         if (stfl_bytes[i]) {
674             used_stfl_bytes = i + 1;
675         }
676     }
677     initialized = true;
678 }
679 
680 #ifndef CONFIG_USER_ONLY
681 void HELPER(stfl)(CPUS390XState *env)
682 {
683     LowCore *lowcore;
684 
685     lowcore = cpu_map_lowcore(env);
686     prepare_stfl();
687     memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list));
688     cpu_unmap_lowcore(lowcore);
689 }
690 #endif
691 
692 uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
693 {
694     const uintptr_t ra = GETPC();
695     const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8;
696     int max_bytes;
697     int i;
698 
699     if (addr & 0x7) {
700         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
701     }
702 
703     prepare_stfl();
704     max_bytes = ROUND_UP(used_stfl_bytes, 8);
705 
706     /*
707      * The PoP says that doublewords beyond the highest-numbered facility
708      * bit may or may not be stored.  However, existing hardware appears to
709      * not store the words, and existing software depend on that.
710      */
711     for (i = 0; i < MIN(count_bytes, max_bytes); ++i) {
712         cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra);
713     }
714 
715     env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1);
716     return count_bytes >= max_bytes ? 0 : 3;
717 }
718 
719 #ifndef CONFIG_USER_ONLY
720 /*
721  * Note: we ignore any return code of the functions called for the pci
722  * instructions, as the only time they return !0 is when the stub is
723  * called, and in that case we didn't even offer the zpci facility.
724  * The only exception is SIC, where program checks need to be handled
725  * by the caller.
726  */
727 void HELPER(clp)(CPUS390XState *env, uint32_t r2)
728 {
729     S390CPU *cpu = env_archcpu(env);
730 
731     qemu_mutex_lock_iothread();
732     clp_service_call(cpu, r2, GETPC());
733     qemu_mutex_unlock_iothread();
734 }
735 
736 void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
737 {
738     S390CPU *cpu = env_archcpu(env);
739 
740     qemu_mutex_lock_iothread();
741     pcilg_service_call(cpu, r1, r2, GETPC());
742     qemu_mutex_unlock_iothread();
743 }
744 
745 void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
746 {
747     S390CPU *cpu = env_archcpu(env);
748 
749     qemu_mutex_lock_iothread();
750     pcistg_service_call(cpu, r1, r2, GETPC());
751     qemu_mutex_unlock_iothread();
752 }
753 
754 void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
755                      uint32_t ar)
756 {
757     S390CPU *cpu = env_archcpu(env);
758 
759     qemu_mutex_lock_iothread();
760     stpcifc_service_call(cpu, r1, fiba, ar, GETPC());
761     qemu_mutex_unlock_iothread();
762 }
763 
764 void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3)
765 {
766     int r;
767 
768     qemu_mutex_lock_iothread();
769     r = css_do_sic(env, (r3 >> 27) & 0x7, r1 & 0xffff);
770     qemu_mutex_unlock_iothread();
771     /* css_do_sic() may actually return a PGM_xxx value to inject */
772     if (r) {
773         tcg_s390_program_interrupt(env, -r, GETPC());
774     }
775 }
776 
777 void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2)
778 {
779     S390CPU *cpu = env_archcpu(env);
780 
781     qemu_mutex_lock_iothread();
782     rpcit_service_call(cpu, r1, r2, GETPC());
783     qemu_mutex_unlock_iothread();
784 }
785 
786 void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3,
787                     uint64_t gaddr, uint32_t ar)
788 {
789     S390CPU *cpu = env_archcpu(env);
790 
791     qemu_mutex_lock_iothread();
792     pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC());
793     qemu_mutex_unlock_iothread();
794 }
795 
796 void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
797                     uint32_t ar)
798 {
799     S390CPU *cpu = env_archcpu(env);
800 
801     qemu_mutex_lock_iothread();
802     mpcifc_service_call(cpu, r1, fiba, ar, GETPC());
803     qemu_mutex_unlock_iothread();
804 }
805 #endif
806