xref: /openbmc/qemu/target/s390x/kvm/kvm.c (revision 25e2cfbb)
1 /*
2  * QEMU S390x KVM implementation
3  *
4  * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
5  * Copyright IBM Corp. 2012
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 
24 #include <linux/kvm.h>
25 #include <asm/ptrace.h>
26 
27 #include "cpu.h"
28 #include "s390x-internal.h"
29 #include "kvm_s390x.h"
30 #include "sysemu/kvm_int.h"
31 #include "qemu/cutils.h"
32 #include "qapi/error.h"
33 #include "qemu/error-report.h"
34 #include "qemu/timer.h"
35 #include "qemu/units.h"
36 #include "qemu/main-loop.h"
37 #include "qemu/mmap-alloc.h"
38 #include "qemu/log.h"
39 #include "sysemu/sysemu.h"
40 #include "sysemu/hw_accel.h"
41 #include "sysemu/runstate.h"
42 #include "sysemu/device_tree.h"
43 #include "exec/gdbstub.h"
44 #include "exec/ram_addr.h"
45 #include "trace.h"
46 #include "hw/s390x/s390-pci-inst.h"
47 #include "hw/s390x/s390-pci-bus.h"
48 #include "hw/s390x/ipl.h"
49 #include "hw/s390x/ebcdic.h"
50 #include "exec/memattrs.h"
51 #include "hw/s390x/s390-virtio-ccw.h"
52 #include "hw/s390x/s390-virtio-hcall.h"
53 #include "target/s390x/kvm/pv.h"
54 
55 #define kvm_vm_check_mem_attr(s, attr) \
56     kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr)
57 
58 #define IPA0_DIAG                       0x8300
59 #define IPA0_SIGP                       0xae00
60 #define IPA0_B2                         0xb200
61 #define IPA0_B9                         0xb900
62 #define IPA0_EB                         0xeb00
63 #define IPA0_E3                         0xe300
64 
65 #define PRIV_B2_SCLP_CALL               0x20
66 #define PRIV_B2_CSCH                    0x30
67 #define PRIV_B2_HSCH                    0x31
68 #define PRIV_B2_MSCH                    0x32
69 #define PRIV_B2_SSCH                    0x33
70 #define PRIV_B2_STSCH                   0x34
71 #define PRIV_B2_TSCH                    0x35
72 #define PRIV_B2_TPI                     0x36
73 #define PRIV_B2_SAL                     0x37
74 #define PRIV_B2_RSCH                    0x38
75 #define PRIV_B2_STCRW                   0x39
76 #define PRIV_B2_STCPS                   0x3a
77 #define PRIV_B2_RCHP                    0x3b
78 #define PRIV_B2_SCHM                    0x3c
79 #define PRIV_B2_CHSC                    0x5f
80 #define PRIV_B2_SIGA                    0x74
81 #define PRIV_B2_XSCH                    0x76
82 
83 #define PRIV_EB_SQBS                    0x8a
84 #define PRIV_EB_PCISTB                  0xd0
85 #define PRIV_EB_SIC                     0xd1
86 
87 #define PRIV_B9_EQBS                    0x9c
88 #define PRIV_B9_CLP                     0xa0
89 #define PRIV_B9_PCISTG                  0xd0
90 #define PRIV_B9_PCILG                   0xd2
91 #define PRIV_B9_RPCIT                   0xd3
92 
93 #define PRIV_E3_MPCIFC                  0xd0
94 #define PRIV_E3_STPCIFC                 0xd4
95 
96 #define DIAG_TIMEREVENT                 0x288
97 #define DIAG_IPL                        0x308
98 #define DIAG_SET_CONTROL_PROGRAM_CODES  0x318
99 #define DIAG_KVM_HYPERCALL              0x500
100 #define DIAG_KVM_BREAKPOINT             0x501
101 
102 #define ICPT_INSTRUCTION                0x04
103 #define ICPT_PROGRAM                    0x08
104 #define ICPT_EXT_INT                    0x14
105 #define ICPT_WAITPSW                    0x1c
106 #define ICPT_SOFT_INTERCEPT             0x24
107 #define ICPT_CPU_STOP                   0x28
108 #define ICPT_OPEREXC                    0x2c
109 #define ICPT_IO                         0x40
110 #define ICPT_PV_INSTR                   0x68
111 #define ICPT_PV_INSTR_NOTIFICATION      0x6c
112 
113 #define NR_LOCAL_IRQS 32
114 /*
115  * Needs to be big enough to contain max_cpus emergency signals
116  * and in addition NR_LOCAL_IRQS interrupts
117  */
118 #define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \
119                                      (max_cpus + NR_LOCAL_IRQS))
120 /*
121  * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages
122  * as the dirty bitmap must be managed by bitops that take an int as
123  * position indicator. This would end at an unaligned  address
124  * (0x7fffff00000). As future variants might provide larger pages
125  * and to make all addresses properly aligned, let us split at 4TB.
126  */
127 #define KVM_SLOT_MAX_BYTES (4UL * TiB)
128 
129 static CPUWatchpoint hw_watchpoint;
130 /*
131  * We don't use a list because this structure is also used to transmit the
132  * hardware breakpoints to the kernel.
133  */
134 static struct kvm_hw_breakpoint *hw_breakpoints;
135 static int nb_hw_breakpoints;
136 
137 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
138     KVM_CAP_LAST_INFO
139 };
140 
141 static int cap_sync_regs;
142 static int cap_async_pf;
143 static int cap_mem_op;
144 static int cap_mem_op_extension;
145 static int cap_s390_irq;
146 static int cap_ri;
147 static int cap_hpage_1m;
148 static int cap_vcpu_resets;
149 static int cap_protected;
150 static int cap_zpci_op;
151 static int cap_protected_dump;
152 
153 static bool mem_op_storage_key_support;
154 
155 static int active_cmma;
156 
157 static int kvm_s390_query_mem_limit(uint64_t *memory_limit)
158 {
159     struct kvm_device_attr attr = {
160         .group = KVM_S390_VM_MEM_CTRL,
161         .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
162         .addr = (uint64_t) memory_limit,
163     };
164 
165     return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
166 }
167 
168 int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit)
169 {
170     int rc;
171 
172     struct kvm_device_attr attr = {
173         .group = KVM_S390_VM_MEM_CTRL,
174         .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
175         .addr = (uint64_t) &new_limit,
176     };
177 
178     if (!kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_LIMIT_SIZE)) {
179         return 0;
180     }
181 
182     rc = kvm_s390_query_mem_limit(hw_limit);
183     if (rc) {
184         return rc;
185     } else if (*hw_limit < new_limit) {
186         return -E2BIG;
187     }
188 
189     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
190 }
191 
192 int kvm_s390_cmma_active(void)
193 {
194     return active_cmma;
195 }
196 
197 static bool kvm_s390_cmma_available(void)
198 {
199     static bool initialized, value;
200 
201     if (!initialized) {
202         initialized = true;
203         value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) &&
204                 kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA);
205     }
206     return value;
207 }
208 
209 void kvm_s390_cmma_reset(void)
210 {
211     int rc;
212     struct kvm_device_attr attr = {
213         .group = KVM_S390_VM_MEM_CTRL,
214         .attr = KVM_S390_VM_MEM_CLR_CMMA,
215     };
216 
217     if (!kvm_s390_cmma_active()) {
218         return;
219     }
220 
221     rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
222     trace_kvm_clear_cmma(rc);
223 }
224 
225 static void kvm_s390_enable_cmma(void)
226 {
227     int rc;
228     struct kvm_device_attr attr = {
229         .group = KVM_S390_VM_MEM_CTRL,
230         .attr = KVM_S390_VM_MEM_ENABLE_CMMA,
231     };
232 
233     if (cap_hpage_1m) {
234         warn_report("CMM will not be enabled because it is not "
235                     "compatible with huge memory backings.");
236         return;
237     }
238     rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
239     active_cmma = !rc;
240     trace_kvm_enable_cmma(rc);
241 }
242 
243 static void kvm_s390_set_attr(uint64_t attr)
244 {
245     struct kvm_device_attr attribute = {
246         .group = KVM_S390_VM_CRYPTO,
247         .attr  = attr,
248     };
249 
250     int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute);
251 
252     if (ret) {
253         error_report("Failed to set crypto device attribute %lu: %s",
254                      attr, strerror(-ret));
255     }
256 }
257 
258 static void kvm_s390_init_aes_kw(void)
259 {
260     uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW;
261 
262     if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap",
263                                  NULL)) {
264             attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW;
265     }
266 
267     if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
268             kvm_s390_set_attr(attr);
269     }
270 }
271 
272 static void kvm_s390_init_dea_kw(void)
273 {
274     uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW;
275 
276     if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap",
277                                  NULL)) {
278             attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW;
279     }
280 
281     if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
282             kvm_s390_set_attr(attr);
283     }
284 }
285 
286 void kvm_s390_crypto_reset(void)
287 {
288     if (s390_has_feat(S390_FEAT_MSA_EXT_3)) {
289         kvm_s390_init_aes_kw();
290         kvm_s390_init_dea_kw();
291     }
292 }
293 
294 void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp)
295 {
296     if (pagesize == 4 * KiB) {
297         return;
298     }
299 
300     if (!hpage_1m_allowed()) {
301         error_setg(errp, "This QEMU machine does not support huge page "
302                    "mappings");
303         return;
304     }
305 
306     if (pagesize != 1 * MiB) {
307         error_setg(errp, "Memory backing with 2G pages was specified, "
308                    "but KVM does not support this memory backing");
309         return;
310     }
311 
312     if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_HPAGE_1M, 0)) {
313         error_setg(errp, "Memory backing with 1M pages was specified, "
314                    "but KVM does not support this memory backing");
315         return;
316     }
317 
318     cap_hpage_1m = 1;
319 }
320 
321 int kvm_s390_get_hpage_1m(void)
322 {
323     return cap_hpage_1m;
324 }
325 
326 static void ccw_machine_class_foreach(ObjectClass *oc, void *opaque)
327 {
328     MachineClass *mc = MACHINE_CLASS(oc);
329 
330     mc->default_cpu_type = S390_CPU_TYPE_NAME("host");
331 }
332 
333 int kvm_arch_get_default_type(MachineState *ms)
334 {
335     return 0;
336 }
337 
338 int kvm_arch_init(MachineState *ms, KVMState *s)
339 {
340     object_class_foreach(ccw_machine_class_foreach, TYPE_S390_CCW_MACHINE,
341                          false, NULL);
342 
343     if (!kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) {
344         error_report("KVM is missing capability KVM_CAP_DEVICE_CTRL - "
345                      "please use kernel 3.15 or newer");
346         return -1;
347     }
348     if (!kvm_check_extension(s, KVM_CAP_S390_COW)) {
349         error_report("KVM is missing capability KVM_CAP_S390_COW - "
350                      "unsupported environment");
351         return -1;
352     }
353 
354     cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS);
355     cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
356     cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP);
357     cap_mem_op_extension = kvm_check_extension(s, KVM_CAP_S390_MEM_OP_EXTENSION);
358     mem_op_storage_key_support = cap_mem_op_extension > 0;
359     cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ);
360     cap_vcpu_resets = kvm_check_extension(s, KVM_CAP_S390_VCPU_RESETS);
361     cap_protected = kvm_check_extension(s, KVM_CAP_S390_PROTECTED);
362     cap_zpci_op = kvm_check_extension(s, KVM_CAP_S390_ZPCI_OP);
363     cap_protected_dump = kvm_check_extension(s, KVM_CAP_S390_PROTECTED_DUMP);
364 
365     kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0);
366     kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0);
367     kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0);
368     if (ri_allowed()) {
369         if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) {
370             cap_ri = 1;
371         }
372     }
373     if (cpu_model_allowed()) {
374         kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0);
375     }
376 
377     /*
378      * The migration interface for ais was introduced with kernel 4.13
379      * but the capability itself had been active since 4.12. As migration
380      * support is considered necessary, we only try to enable this for
381      * newer machine types if KVM_CAP_S390_AIS_MIGRATION is available.
382      */
383     if (cpu_model_allowed() && kvm_kernel_irqchip_allowed() &&
384         kvm_check_extension(s, KVM_CAP_S390_AIS_MIGRATION)) {
385         kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0);
386     }
387 
388     kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES);
389     return 0;
390 }
391 
392 int kvm_arch_irqchip_create(KVMState *s)
393 {
394     return 0;
395 }
396 
397 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
398 {
399     return cpu->cpu_index;
400 }
401 
402 int kvm_arch_init_vcpu(CPUState *cs)
403 {
404     unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus;
405     S390CPU *cpu = S390_CPU(cs);
406     kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state);
407     cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE(max_cpus));
408     return 0;
409 }
410 
411 int kvm_arch_destroy_vcpu(CPUState *cs)
412 {
413     S390CPU *cpu = S390_CPU(cs);
414 
415     g_free(cpu->irqstate);
416     cpu->irqstate = NULL;
417 
418     return 0;
419 }
420 
421 static void kvm_s390_reset_vcpu(S390CPU *cpu, unsigned long type)
422 {
423     CPUState *cs = CPU(cpu);
424 
425     /*
426      * The reset call is needed here to reset in-kernel vcpu data that
427      * we can't access directly from QEMU (i.e. with older kernels
428      * which don't support sync_regs/ONE_REG).  Before this ioctl
429      * cpu_synchronize_state() is called in common kvm code
430      * (kvm-all).
431      */
432     if (kvm_vcpu_ioctl(cs, type)) {
433         error_report("CPU reset failed on CPU %i type %lx",
434                      cs->cpu_index, type);
435     }
436 }
437 
438 void kvm_s390_reset_vcpu_initial(S390CPU *cpu)
439 {
440     kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET);
441 }
442 
443 void kvm_s390_reset_vcpu_clear(S390CPU *cpu)
444 {
445     if (cap_vcpu_resets) {
446         kvm_s390_reset_vcpu(cpu, KVM_S390_CLEAR_RESET);
447     } else {
448         kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET);
449     }
450 }
451 
452 void kvm_s390_reset_vcpu_normal(S390CPU *cpu)
453 {
454     if (cap_vcpu_resets) {
455         kvm_s390_reset_vcpu(cpu, KVM_S390_NORMAL_RESET);
456     }
457 }
458 
459 static int can_sync_regs(CPUState *cs, int regs)
460 {
461     return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs;
462 }
463 
464 int kvm_arch_put_registers(CPUState *cs, int level)
465 {
466     S390CPU *cpu = S390_CPU(cs);
467     CPUS390XState *env = &cpu->env;
468     struct kvm_sregs sregs;
469     struct kvm_regs regs;
470     struct kvm_fpu fpu = {};
471     int r;
472     int i;
473 
474     /* always save the PSW  and the GPRS*/
475     cs->kvm_run->psw_addr = env->psw.addr;
476     cs->kvm_run->psw_mask = env->psw.mask;
477 
478     if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
479         for (i = 0; i < 16; i++) {
480             cs->kvm_run->s.regs.gprs[i] = env->regs[i];
481             cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
482         }
483     } else {
484         for (i = 0; i < 16; i++) {
485             regs.gprs[i] = env->regs[i];
486         }
487         r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
488         if (r < 0) {
489             return r;
490         }
491     }
492 
493     if (can_sync_regs(cs, KVM_SYNC_VRS)) {
494         for (i = 0; i < 32; i++) {
495             cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0];
496             cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1];
497         }
498         cs->kvm_run->s.regs.fpc = env->fpc;
499         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS;
500     } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) {
501         for (i = 0; i < 16; i++) {
502             cs->kvm_run->s.regs.fprs[i] = *get_freg(env, i);
503         }
504         cs->kvm_run->s.regs.fpc = env->fpc;
505         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS;
506     } else {
507         /* Floating point */
508         for (i = 0; i < 16; i++) {
509             fpu.fprs[i] = *get_freg(env, i);
510         }
511         fpu.fpc = env->fpc;
512 
513         r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
514         if (r < 0) {
515             return r;
516         }
517     }
518 
519     /* Do we need to save more than that? */
520     if (level == KVM_PUT_RUNTIME_STATE) {
521         return 0;
522     }
523 
524     if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
525         cs->kvm_run->s.regs.cputm = env->cputm;
526         cs->kvm_run->s.regs.ckc = env->ckc;
527         cs->kvm_run->s.regs.todpr = env->todpr;
528         cs->kvm_run->s.regs.gbea = env->gbea;
529         cs->kvm_run->s.regs.pp = env->pp;
530         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0;
531     } else {
532         /*
533          * These ONE_REGS are not protected by a capability. As they are only
534          * necessary for migration we just trace a possible error, but don't
535          * return with an error return code.
536          */
537         kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
538         kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
539         kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
540         kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
541         kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
542     }
543 
544     if (can_sync_regs(cs, KVM_SYNC_RICCB)) {
545         memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64);
546         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB;
547     }
548 
549     /* pfault parameters */
550     if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
551         cs->kvm_run->s.regs.pft = env->pfault_token;
552         cs->kvm_run->s.regs.pfs = env->pfault_select;
553         cs->kvm_run->s.regs.pfc = env->pfault_compare;
554         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT;
555     } else if (cap_async_pf) {
556         r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
557         if (r < 0) {
558             return r;
559         }
560         r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
561         if (r < 0) {
562             return r;
563         }
564         r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
565         if (r < 0) {
566             return r;
567         }
568     }
569 
570     /* access registers and control registers*/
571     if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
572         for (i = 0; i < 16; i++) {
573             cs->kvm_run->s.regs.acrs[i] = env->aregs[i];
574             cs->kvm_run->s.regs.crs[i] = env->cregs[i];
575         }
576         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS;
577         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS;
578     } else {
579         for (i = 0; i < 16; i++) {
580             sregs.acrs[i] = env->aregs[i];
581             sregs.crs[i] = env->cregs[i];
582         }
583         r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
584         if (r < 0) {
585             return r;
586         }
587     }
588 
589     if (can_sync_regs(cs, KVM_SYNC_GSCB)) {
590         memcpy(cs->kvm_run->s.regs.gscb, env->gscb, 32);
591         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB;
592     }
593 
594     if (can_sync_regs(cs, KVM_SYNC_BPBC)) {
595         cs->kvm_run->s.regs.bpbc = env->bpbc;
596         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_BPBC;
597     }
598 
599     if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) {
600         cs->kvm_run->s.regs.etoken = env->etoken;
601         cs->kvm_run->s.regs.etoken_extension  = env->etoken_extension;
602         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ETOKEN;
603     }
604 
605     if (can_sync_regs(cs, KVM_SYNC_DIAG318)) {
606         cs->kvm_run->s.regs.diag318 = env->diag318_info;
607         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318;
608     }
609 
610     /* Finally the prefix */
611     if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
612         cs->kvm_run->s.regs.prefix = env->psa;
613         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
614     } else {
615         /* prefix is only supported via sync regs */
616     }
617     return 0;
618 }
619 
620 int kvm_arch_get_registers(CPUState *cs)
621 {
622     S390CPU *cpu = S390_CPU(cs);
623     CPUS390XState *env = &cpu->env;
624     struct kvm_sregs sregs;
625     struct kvm_regs regs;
626     struct kvm_fpu fpu;
627     int i, r;
628 
629     /* get the PSW */
630     env->psw.addr = cs->kvm_run->psw_addr;
631     env->psw.mask = cs->kvm_run->psw_mask;
632 
633     /* the GPRS */
634     if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
635         for (i = 0; i < 16; i++) {
636             env->regs[i] = cs->kvm_run->s.regs.gprs[i];
637         }
638     } else {
639         r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
640         if (r < 0) {
641             return r;
642         }
643          for (i = 0; i < 16; i++) {
644             env->regs[i] = regs.gprs[i];
645         }
646     }
647 
648     /* The ACRS and CRS */
649     if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
650         for (i = 0; i < 16; i++) {
651             env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
652             env->cregs[i] = cs->kvm_run->s.regs.crs[i];
653         }
654     } else {
655         r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
656         if (r < 0) {
657             return r;
658         }
659          for (i = 0; i < 16; i++) {
660             env->aregs[i] = sregs.acrs[i];
661             env->cregs[i] = sregs.crs[i];
662         }
663     }
664 
665     /* Floating point and vector registers */
666     if (can_sync_regs(cs, KVM_SYNC_VRS)) {
667         for (i = 0; i < 32; i++) {
668             env->vregs[i][0] = cs->kvm_run->s.regs.vrs[i][0];
669             env->vregs[i][1] = cs->kvm_run->s.regs.vrs[i][1];
670         }
671         env->fpc = cs->kvm_run->s.regs.fpc;
672     } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) {
673         for (i = 0; i < 16; i++) {
674             *get_freg(env, i) = cs->kvm_run->s.regs.fprs[i];
675         }
676         env->fpc = cs->kvm_run->s.regs.fpc;
677     } else {
678         r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
679         if (r < 0) {
680             return r;
681         }
682         for (i = 0; i < 16; i++) {
683             *get_freg(env, i) = fpu.fprs[i];
684         }
685         env->fpc = fpu.fpc;
686     }
687 
688     /* The prefix */
689     if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
690         env->psa = cs->kvm_run->s.regs.prefix;
691     }
692 
693     if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
694         env->cputm = cs->kvm_run->s.regs.cputm;
695         env->ckc = cs->kvm_run->s.regs.ckc;
696         env->todpr = cs->kvm_run->s.regs.todpr;
697         env->gbea = cs->kvm_run->s.regs.gbea;
698         env->pp = cs->kvm_run->s.regs.pp;
699     } else {
700         /*
701          * These ONE_REGS are not protected by a capability. As they are only
702          * necessary for migration we just trace a possible error, but don't
703          * return with an error return code.
704          */
705         kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
706         kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
707         kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
708         kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
709         kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
710     }
711 
712     if (can_sync_regs(cs, KVM_SYNC_RICCB)) {
713         memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64);
714     }
715 
716     if (can_sync_regs(cs, KVM_SYNC_GSCB)) {
717         memcpy(env->gscb, cs->kvm_run->s.regs.gscb, 32);
718     }
719 
720     if (can_sync_regs(cs, KVM_SYNC_BPBC)) {
721         env->bpbc = cs->kvm_run->s.regs.bpbc;
722     }
723 
724     if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) {
725         env->etoken = cs->kvm_run->s.regs.etoken;
726         env->etoken_extension = cs->kvm_run->s.regs.etoken_extension;
727     }
728 
729     /* pfault parameters */
730     if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
731         env->pfault_token = cs->kvm_run->s.regs.pft;
732         env->pfault_select = cs->kvm_run->s.regs.pfs;
733         env->pfault_compare = cs->kvm_run->s.regs.pfc;
734     } else if (cap_async_pf) {
735         r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
736         if (r < 0) {
737             return r;
738         }
739         r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
740         if (r < 0) {
741             return r;
742         }
743         r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
744         if (r < 0) {
745             return r;
746         }
747     }
748 
749     if (can_sync_regs(cs, KVM_SYNC_DIAG318)) {
750         env->diag318_info = cs->kvm_run->s.regs.diag318;
751     }
752 
753     return 0;
754 }
755 
756 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
757 {
758     int r;
759     struct kvm_device_attr attr = {
760         .group = KVM_S390_VM_TOD,
761         .attr = KVM_S390_VM_TOD_LOW,
762         .addr = (uint64_t)tod_low,
763     };
764 
765     r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
766     if (r) {
767         return r;
768     }
769 
770     attr.attr = KVM_S390_VM_TOD_HIGH;
771     attr.addr = (uint64_t)tod_high;
772     return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
773 }
774 
775 int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low)
776 {
777     int r;
778     struct kvm_s390_vm_tod_clock gtod;
779     struct kvm_device_attr attr = {
780         .group = KVM_S390_VM_TOD,
781         .attr = KVM_S390_VM_TOD_EXT,
782         .addr = (uint64_t)&gtod,
783     };
784 
785     r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
786     *tod_high = gtod.epoch_idx;
787     *tod_low  = gtod.tod;
788 
789     return r;
790 }
791 
792 int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_low)
793 {
794     int r;
795     struct kvm_device_attr attr = {
796         .group = KVM_S390_VM_TOD,
797         .attr = KVM_S390_VM_TOD_LOW,
798         .addr = (uint64_t)&tod_low,
799     };
800 
801     r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
802     if (r) {
803         return r;
804     }
805 
806     attr.attr = KVM_S390_VM_TOD_HIGH;
807     attr.addr = (uint64_t)&tod_high;
808     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
809 }
810 
811 int kvm_s390_set_clock_ext(uint8_t tod_high, uint64_t tod_low)
812 {
813     struct kvm_s390_vm_tod_clock gtod = {
814         .epoch_idx = tod_high,
815         .tod  = tod_low,
816     };
817     struct kvm_device_attr attr = {
818         .group = KVM_S390_VM_TOD,
819         .attr = KVM_S390_VM_TOD_EXT,
820         .addr = (uint64_t)&gtod,
821     };
822 
823     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
824 }
825 
826 /**
827  * kvm_s390_mem_op:
828  * @addr:      the logical start address in guest memory
829  * @ar:        the access register number
830  * @hostbuf:   buffer in host memory. NULL = do only checks w/o copying
831  * @len:       length that should be transferred
832  * @is_write:  true = write, false = read
833  * Returns:    0 on success, non-zero if an exception or error occurred
834  *
835  * Use KVM ioctl to read/write from/to guest memory. An access exception
836  * is injected into the vCPU in case of translation errors.
837  */
838 int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
839                     int len, bool is_write)
840 {
841     struct kvm_s390_mem_op mem_op = {
842         .gaddr = addr,
843         .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION,
844         .size = len,
845         .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE
846                        : KVM_S390_MEMOP_LOGICAL_READ,
847         .buf = (uint64_t)hostbuf,
848         .ar = ar,
849         .key = (cpu->env.psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY,
850     };
851     int ret;
852 
853     if (!cap_mem_op) {
854         return -ENOSYS;
855     }
856     if (!hostbuf) {
857         mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
858     }
859     if (mem_op_storage_key_support) {
860         mem_op.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
861     }
862 
863     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op);
864     if (ret < 0) {
865         warn_report("KVM_S390_MEM_OP failed: %s", strerror(-ret));
866     }
867     return ret;
868 }
869 
870 int kvm_s390_mem_op_pv(S390CPU *cpu, uint64_t offset, void *hostbuf,
871                        int len, bool is_write)
872 {
873     struct kvm_s390_mem_op mem_op = {
874         .sida_offset = offset,
875         .size = len,
876         .op = is_write ? KVM_S390_MEMOP_SIDA_WRITE
877                        : KVM_S390_MEMOP_SIDA_READ,
878         .buf = (uint64_t)hostbuf,
879     };
880     int ret;
881 
882     if (!cap_mem_op || !cap_protected) {
883         return -ENOSYS;
884     }
885 
886     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op);
887     if (ret < 0) {
888         error_report("KVM_S390_MEM_OP failed: %s", strerror(-ret));
889         abort();
890     }
891     return ret;
892 }
893 
894 static uint8_t const *sw_bp_inst;
895 static uint8_t sw_bp_ilen;
896 
897 static void determine_sw_breakpoint_instr(void)
898 {
899         /* DIAG 501 is used for sw breakpoints with old kernels */
900         static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
901         /* Instruction 0x0000 is used for sw breakpoints with recent kernels */
902         static const uint8_t instr_0x0000[] = {0x00, 0x00};
903 
904         if (sw_bp_inst) {
905             return;
906         }
907         if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) {
908             sw_bp_inst = diag_501;
909             sw_bp_ilen = sizeof(diag_501);
910             trace_kvm_sw_breakpoint(4);
911         } else {
912             sw_bp_inst = instr_0x0000;
913             sw_bp_ilen = sizeof(instr_0x0000);
914             trace_kvm_sw_breakpoint(2);
915         }
916 }
917 
918 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
919 {
920     determine_sw_breakpoint_instr();
921 
922     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
923                             sw_bp_ilen, 0) ||
924         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) {
925         return -EINVAL;
926     }
927     return 0;
928 }
929 
930 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
931 {
932     uint8_t t[MAX_ILEN];
933 
934     if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) {
935         return -EINVAL;
936     } else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) {
937         return -EINVAL;
938     } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
939                                    sw_bp_ilen, 1)) {
940         return -EINVAL;
941     }
942 
943     return 0;
944 }
945 
946 static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr,
947                                                     int len, int type)
948 {
949     int n;
950 
951     for (n = 0; n < nb_hw_breakpoints; n++) {
952         if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type &&
953             (hw_breakpoints[n].len == len || len == -1)) {
954             return &hw_breakpoints[n];
955         }
956     }
957 
958     return NULL;
959 }
960 
961 static int insert_hw_breakpoint(target_ulong addr, int len, int type)
962 {
963     int size;
964 
965     if (find_hw_breakpoint(addr, len, type)) {
966         return -EEXIST;
967     }
968 
969     size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint);
970 
971     if (!hw_breakpoints) {
972         nb_hw_breakpoints = 0;
973         hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size);
974     } else {
975         hw_breakpoints =
976             (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size);
977     }
978 
979     if (!hw_breakpoints) {
980         nb_hw_breakpoints = 0;
981         return -ENOMEM;
982     }
983 
984     hw_breakpoints[nb_hw_breakpoints].addr = addr;
985     hw_breakpoints[nb_hw_breakpoints].len = len;
986     hw_breakpoints[nb_hw_breakpoints].type = type;
987 
988     nb_hw_breakpoints++;
989 
990     return 0;
991 }
992 
993 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
994                                   target_ulong len, int type)
995 {
996     switch (type) {
997     case GDB_BREAKPOINT_HW:
998         type = KVM_HW_BP;
999         break;
1000     case GDB_WATCHPOINT_WRITE:
1001         if (len < 1) {
1002             return -EINVAL;
1003         }
1004         type = KVM_HW_WP_WRITE;
1005         break;
1006     default:
1007         return -ENOSYS;
1008     }
1009     return insert_hw_breakpoint(addr, len, type);
1010 }
1011 
1012 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
1013                                   target_ulong len, int type)
1014 {
1015     int size;
1016     struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type);
1017 
1018     if (bp == NULL) {
1019         return -ENOENT;
1020     }
1021 
1022     nb_hw_breakpoints--;
1023     if (nb_hw_breakpoints > 0) {
1024         /*
1025          * In order to trim the array, move the last element to the position to
1026          * be removed - if necessary.
1027          */
1028         if (bp != &hw_breakpoints[nb_hw_breakpoints]) {
1029             *bp = hw_breakpoints[nb_hw_breakpoints];
1030         }
1031         size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint);
1032         hw_breakpoints =
1033              g_realloc(hw_breakpoints, size);
1034     } else {
1035         g_free(hw_breakpoints);
1036         hw_breakpoints = NULL;
1037     }
1038 
1039     return 0;
1040 }
1041 
1042 void kvm_arch_remove_all_hw_breakpoints(void)
1043 {
1044     nb_hw_breakpoints = 0;
1045     g_free(hw_breakpoints);
1046     hw_breakpoints = NULL;
1047 }
1048 
1049 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
1050 {
1051     int i;
1052 
1053     if (nb_hw_breakpoints > 0) {
1054         dbg->arch.nr_hw_bp = nb_hw_breakpoints;
1055         dbg->arch.hw_bp = hw_breakpoints;
1056 
1057         for (i = 0; i < nb_hw_breakpoints; ++i) {
1058             hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu,
1059                                                        hw_breakpoints[i].addr);
1060         }
1061         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1062     } else {
1063         dbg->arch.nr_hw_bp = 0;
1064         dbg->arch.hw_bp = NULL;
1065     }
1066 }
1067 
1068 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
1069 {
1070 }
1071 
1072 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1073 {
1074     return MEMTXATTRS_UNSPECIFIED;
1075 }
1076 
1077 int kvm_arch_process_async_events(CPUState *cs)
1078 {
1079     return cs->halted;
1080 }
1081 
1082 static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq,
1083                                      struct kvm_s390_interrupt *interrupt)
1084 {
1085     int r = 0;
1086 
1087     interrupt->type = irq->type;
1088     switch (irq->type) {
1089     case KVM_S390_INT_VIRTIO:
1090         interrupt->parm = irq->u.ext.ext_params;
1091         /* fall through */
1092     case KVM_S390_INT_PFAULT_INIT:
1093     case KVM_S390_INT_PFAULT_DONE:
1094         interrupt->parm64 = irq->u.ext.ext_params2;
1095         break;
1096     case KVM_S390_PROGRAM_INT:
1097         interrupt->parm = irq->u.pgm.code;
1098         break;
1099     case KVM_S390_SIGP_SET_PREFIX:
1100         interrupt->parm = irq->u.prefix.address;
1101         break;
1102     case KVM_S390_INT_SERVICE:
1103         interrupt->parm = irq->u.ext.ext_params;
1104         break;
1105     case KVM_S390_MCHK:
1106         interrupt->parm = irq->u.mchk.cr14;
1107         interrupt->parm64 = irq->u.mchk.mcic;
1108         break;
1109     case KVM_S390_INT_EXTERNAL_CALL:
1110         interrupt->parm = irq->u.extcall.code;
1111         break;
1112     case KVM_S390_INT_EMERGENCY:
1113         interrupt->parm = irq->u.emerg.code;
1114         break;
1115     case KVM_S390_SIGP_STOP:
1116     case KVM_S390_RESTART:
1117         break; /* These types have no parameters */
1118     case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1119         interrupt->parm = irq->u.io.subchannel_id << 16;
1120         interrupt->parm |= irq->u.io.subchannel_nr;
1121         interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32;
1122         interrupt->parm64 |= irq->u.io.io_int_word;
1123         break;
1124     default:
1125         r = -EINVAL;
1126         break;
1127     }
1128     return r;
1129 }
1130 
1131 static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq)
1132 {
1133     struct kvm_s390_interrupt kvmint = {};
1134     int r;
1135 
1136     r = s390_kvm_irq_to_interrupt(irq, &kvmint);
1137     if (r < 0) {
1138         fprintf(stderr, "%s called with bogus interrupt\n", __func__);
1139         exit(1);
1140     }
1141 
1142     r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint);
1143     if (r < 0) {
1144         fprintf(stderr, "KVM failed to inject interrupt\n");
1145         exit(1);
1146     }
1147 }
1148 
1149 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq)
1150 {
1151     CPUState *cs = CPU(cpu);
1152     int r;
1153 
1154     if (cap_s390_irq) {
1155         r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq);
1156         if (!r) {
1157             return;
1158         }
1159         error_report("KVM failed to inject interrupt %llx", irq->type);
1160         exit(1);
1161     }
1162 
1163     inject_vcpu_irq_legacy(cs, irq);
1164 }
1165 
1166 void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq)
1167 {
1168     struct kvm_s390_interrupt kvmint = {};
1169     int r;
1170 
1171     r = s390_kvm_irq_to_interrupt(irq, &kvmint);
1172     if (r < 0) {
1173         fprintf(stderr, "%s called with bogus interrupt\n", __func__);
1174         exit(1);
1175     }
1176 
1177     r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint);
1178     if (r < 0) {
1179         fprintf(stderr, "KVM failed to inject interrupt\n");
1180         exit(1);
1181     }
1182 }
1183 
1184 void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code)
1185 {
1186     struct kvm_s390_irq irq = {
1187         .type = KVM_S390_PROGRAM_INT,
1188         .u.pgm.code = code,
1189     };
1190     qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
1191                   cpu->env.psw.addr);
1192     kvm_s390_vcpu_interrupt(cpu, &irq);
1193 }
1194 
1195 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code)
1196 {
1197     struct kvm_s390_irq irq = {
1198         .type = KVM_S390_PROGRAM_INT,
1199         .u.pgm.code = code,
1200         .u.pgm.trans_exc_code = te_code,
1201         .u.pgm.exc_access_id = te_code & 3,
1202     };
1203 
1204     kvm_s390_vcpu_interrupt(cpu, &irq);
1205 }
1206 
1207 static void kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
1208                                  uint16_t ipbh0)
1209 {
1210     CPUS390XState *env = &cpu->env;
1211     uint64_t sccb;
1212     uint32_t code;
1213     int r;
1214 
1215     sccb = env->regs[ipbh0 & 0xf];
1216     code = env->regs[(ipbh0 & 0xf0) >> 4];
1217 
1218     switch (run->s390_sieic.icptcode) {
1219     case ICPT_PV_INSTR_NOTIFICATION:
1220         g_assert(s390_is_pv());
1221         /* The notification intercepts are currently handled by KVM */
1222         error_report("unexpected SCLP PV notification");
1223         exit(1);
1224         break;
1225     case ICPT_PV_INSTR:
1226         g_assert(s390_is_pv());
1227         sclp_service_call_protected(env, sccb, code);
1228         /* Setting the CC is done by the Ultravisor. */
1229         break;
1230     case ICPT_INSTRUCTION:
1231         g_assert(!s390_is_pv());
1232         r = sclp_service_call(env, sccb, code);
1233         if (r < 0) {
1234             kvm_s390_program_interrupt(cpu, -r);
1235             return;
1236         }
1237         setcc(cpu, r);
1238     }
1239 }
1240 
1241 static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
1242 {
1243     CPUS390XState *env = &cpu->env;
1244     int rc = 0;
1245     uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16;
1246 
1247     switch (ipa1) {
1248     case PRIV_B2_XSCH:
1249         ioinst_handle_xsch(cpu, env->regs[1], RA_IGNORED);
1250         break;
1251     case PRIV_B2_CSCH:
1252         ioinst_handle_csch(cpu, env->regs[1], RA_IGNORED);
1253         break;
1254     case PRIV_B2_HSCH:
1255         ioinst_handle_hsch(cpu, env->regs[1], RA_IGNORED);
1256         break;
1257     case PRIV_B2_MSCH:
1258         ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
1259         break;
1260     case PRIV_B2_SSCH:
1261         ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
1262         break;
1263     case PRIV_B2_STCRW:
1264         ioinst_handle_stcrw(cpu, run->s390_sieic.ipb, RA_IGNORED);
1265         break;
1266     case PRIV_B2_STSCH:
1267         ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
1268         break;
1269     case PRIV_B2_TSCH:
1270         /* We should only get tsch via KVM_EXIT_S390_TSCH. */
1271         fprintf(stderr, "Spurious tsch intercept\n");
1272         break;
1273     case PRIV_B2_CHSC:
1274         ioinst_handle_chsc(cpu, run->s390_sieic.ipb, RA_IGNORED);
1275         break;
1276     case PRIV_B2_TPI:
1277         /* This should have been handled by kvm already. */
1278         fprintf(stderr, "Spurious tpi intercept\n");
1279         break;
1280     case PRIV_B2_SCHM:
1281         ioinst_handle_schm(cpu, env->regs[1], env->regs[2],
1282                            run->s390_sieic.ipb, RA_IGNORED);
1283         break;
1284     case PRIV_B2_RSCH:
1285         ioinst_handle_rsch(cpu, env->regs[1], RA_IGNORED);
1286         break;
1287     case PRIV_B2_RCHP:
1288         ioinst_handle_rchp(cpu, env->regs[1], RA_IGNORED);
1289         break;
1290     case PRIV_B2_STCPS:
1291         /* We do not provide this instruction, it is suppressed. */
1292         break;
1293     case PRIV_B2_SAL:
1294         ioinst_handle_sal(cpu, env->regs[1], RA_IGNORED);
1295         break;
1296     case PRIV_B2_SIGA:
1297         /* Not provided, set CC = 3 for subchannel not operational */
1298         setcc(cpu, 3);
1299         break;
1300     case PRIV_B2_SCLP_CALL:
1301         kvm_sclp_service_call(cpu, run, ipbh0);
1302         break;
1303     default:
1304         rc = -1;
1305         trace_kvm_insn_unhandled_priv(ipa1);
1306         break;
1307     }
1308 
1309     return rc;
1310 }
1311 
1312 static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run,
1313                                   uint8_t *ar)
1314 {
1315     CPUS390XState *env = &cpu->env;
1316     uint32_t x2 = (run->s390_sieic.ipa & 0x000f);
1317     uint32_t base2 = run->s390_sieic.ipb >> 28;
1318     uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
1319                      ((run->s390_sieic.ipb & 0xff00) << 4);
1320 
1321     if (disp2 & 0x80000) {
1322         disp2 += 0xfff00000;
1323     }
1324     if (ar) {
1325         *ar = base2;
1326     }
1327 
1328     return (base2 ? env->regs[base2] : 0) +
1329            (x2 ? env->regs[x2] : 0) + (long)(int)disp2;
1330 }
1331 
1332 static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run,
1333                                   uint8_t *ar)
1334 {
1335     CPUS390XState *env = &cpu->env;
1336     uint32_t base2 = run->s390_sieic.ipb >> 28;
1337     uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
1338                      ((run->s390_sieic.ipb & 0xff00) << 4);
1339 
1340     if (disp2 & 0x80000) {
1341         disp2 += 0xfff00000;
1342     }
1343     if (ar) {
1344         *ar = base2;
1345     }
1346 
1347     return (base2 ? env->regs[base2] : 0) + (long)(int)disp2;
1348 }
1349 
1350 static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run)
1351 {
1352     uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1353 
1354     if (s390_has_feat(S390_FEAT_ZPCI)) {
1355         return clp_service_call(cpu, r2, RA_IGNORED);
1356     } else {
1357         return -1;
1358     }
1359 }
1360 
1361 static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run)
1362 {
1363     uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1364     uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1365 
1366     if (s390_has_feat(S390_FEAT_ZPCI)) {
1367         return pcilg_service_call(cpu, r1, r2, RA_IGNORED);
1368     } else {
1369         return -1;
1370     }
1371 }
1372 
1373 static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run)
1374 {
1375     uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1376     uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1377 
1378     if (s390_has_feat(S390_FEAT_ZPCI)) {
1379         return pcistg_service_call(cpu, r1, r2, RA_IGNORED);
1380     } else {
1381         return -1;
1382     }
1383 }
1384 
1385 static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
1386 {
1387     uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1388     uint64_t fiba;
1389     uint8_t ar;
1390 
1391     if (s390_has_feat(S390_FEAT_ZPCI)) {
1392         fiba = get_base_disp_rxy(cpu, run, &ar);
1393 
1394         return stpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED);
1395     } else {
1396         return -1;
1397     }
1398 }
1399 
1400 static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run)
1401 {
1402     CPUS390XState *env = &cpu->env;
1403     uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1404     uint8_t r3 = run->s390_sieic.ipa & 0x000f;
1405     uint8_t isc;
1406     uint16_t mode;
1407     int r;
1408 
1409     mode = env->regs[r1] & 0xffff;
1410     isc = (env->regs[r3] >> 27) & 0x7;
1411     r = css_do_sic(env, isc, mode);
1412     if (r) {
1413         kvm_s390_program_interrupt(cpu, -r);
1414     }
1415 
1416     return 0;
1417 }
1418 
1419 static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run)
1420 {
1421     uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1422     uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1423 
1424     if (s390_has_feat(S390_FEAT_ZPCI)) {
1425         return rpcit_service_call(cpu, r1, r2, RA_IGNORED);
1426     } else {
1427         return -1;
1428     }
1429 }
1430 
1431 static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run)
1432 {
1433     uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1434     uint8_t r3 = run->s390_sieic.ipa & 0x000f;
1435     uint64_t gaddr;
1436     uint8_t ar;
1437 
1438     if (s390_has_feat(S390_FEAT_ZPCI)) {
1439         gaddr = get_base_disp_rsy(cpu, run, &ar);
1440 
1441         return pcistb_service_call(cpu, r1, r3, gaddr, ar, RA_IGNORED);
1442     } else {
1443         return -1;
1444     }
1445 }
1446 
1447 static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
1448 {
1449     uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1450     uint64_t fiba;
1451     uint8_t ar;
1452 
1453     if (s390_has_feat(S390_FEAT_ZPCI)) {
1454         fiba = get_base_disp_rxy(cpu, run, &ar);
1455 
1456         return mpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED);
1457     } else {
1458         return -1;
1459     }
1460 }
1461 
1462 static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
1463 {
1464     int r = 0;
1465 
1466     switch (ipa1) {
1467     case PRIV_B9_CLP:
1468         r = kvm_clp_service_call(cpu, run);
1469         break;
1470     case PRIV_B9_PCISTG:
1471         r = kvm_pcistg_service_call(cpu, run);
1472         break;
1473     case PRIV_B9_PCILG:
1474         r = kvm_pcilg_service_call(cpu, run);
1475         break;
1476     case PRIV_B9_RPCIT:
1477         r = kvm_rpcit_service_call(cpu, run);
1478         break;
1479     case PRIV_B9_EQBS:
1480         /* just inject exception */
1481         r = -1;
1482         break;
1483     default:
1484         r = -1;
1485         trace_kvm_insn_unhandled_priv(ipa1);
1486         break;
1487     }
1488 
1489     return r;
1490 }
1491 
1492 static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1493 {
1494     int r = 0;
1495 
1496     switch (ipbl) {
1497     case PRIV_EB_PCISTB:
1498         r = kvm_pcistb_service_call(cpu, run);
1499         break;
1500     case PRIV_EB_SIC:
1501         r = kvm_sic_service_call(cpu, run);
1502         break;
1503     case PRIV_EB_SQBS:
1504         /* just inject exception */
1505         r = -1;
1506         break;
1507     default:
1508         r = -1;
1509         trace_kvm_insn_unhandled_priv(ipbl);
1510         break;
1511     }
1512 
1513     return r;
1514 }
1515 
1516 static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1517 {
1518     int r = 0;
1519 
1520     switch (ipbl) {
1521     case PRIV_E3_MPCIFC:
1522         r = kvm_mpcifc_service_call(cpu, run);
1523         break;
1524     case PRIV_E3_STPCIFC:
1525         r = kvm_stpcifc_service_call(cpu, run);
1526         break;
1527     default:
1528         r = -1;
1529         trace_kvm_insn_unhandled_priv(ipbl);
1530         break;
1531     }
1532 
1533     return r;
1534 }
1535 
1536 static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
1537 {
1538     CPUS390XState *env = &cpu->env;
1539     int ret;
1540 
1541     ret = s390_virtio_hypercall(env);
1542     if (ret == -EINVAL) {
1543         kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
1544         return 0;
1545     }
1546 
1547     return ret;
1548 }
1549 
1550 static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run)
1551 {
1552     uint64_t r1, r3;
1553     int rc;
1554 
1555     r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1556     r3 = run->s390_sieic.ipa & 0x000f;
1557     rc = handle_diag_288(&cpu->env, r1, r3);
1558     if (rc) {
1559         kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
1560     }
1561 }
1562 
1563 static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run)
1564 {
1565     uint64_t r1, r3;
1566 
1567     r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1568     r3 = run->s390_sieic.ipa & 0x000f;
1569     handle_diag_308(&cpu->env, r1, r3, RA_IGNORED);
1570 }
1571 
1572 static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run)
1573 {
1574     CPUS390XState *env = &cpu->env;
1575     unsigned long pc;
1576 
1577     pc = env->psw.addr - sw_bp_ilen;
1578     if (kvm_find_sw_breakpoint(CPU(cpu), pc)) {
1579         env->psw.addr = pc;
1580         return EXCP_DEBUG;
1581     }
1582 
1583     return -ENOENT;
1584 }
1585 
1586 void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info)
1587 {
1588     CPUS390XState *env = &S390_CPU(cs)->env;
1589 
1590     /* Feat bit is set only if KVM supports sync for diag318 */
1591     if (s390_has_feat(S390_FEAT_DIAG_318)) {
1592         env->diag318_info = diag318_info;
1593         cs->kvm_run->s.regs.diag318 = diag318_info;
1594         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318;
1595         /*
1596          * diag 318 info is zeroed during a clear reset and
1597          * diag 308 IPL subcodes.
1598          */
1599     }
1600 }
1601 
1602 static void handle_diag_318(S390CPU *cpu, struct kvm_run *run)
1603 {
1604     uint64_t reg = (run->s390_sieic.ipa & 0x00f0) >> 4;
1605     uint64_t diag318_info = run->s.regs.gprs[reg];
1606     CPUState *t;
1607 
1608     /*
1609      * DIAG 318 can only be enabled with KVM support. As such, let's
1610      * ensure a guest cannot execute this instruction erroneously.
1611      */
1612     if (!s390_has_feat(S390_FEAT_DIAG_318)) {
1613         kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
1614         return;
1615     }
1616 
1617     CPU_FOREACH(t) {
1618         run_on_cpu(t, s390_do_cpu_set_diag318,
1619                    RUN_ON_CPU_HOST_ULONG(diag318_info));
1620     }
1621 }
1622 
1623 #define DIAG_KVM_CODE_MASK 0x000000000000ffff
1624 
1625 static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
1626 {
1627     int r = 0;
1628     uint16_t func_code;
1629 
1630     /*
1631      * For any diagnose call we support, bits 48-63 of the resulting
1632      * address specify the function code; the remainder is ignored.
1633      */
1634     func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK;
1635     switch (func_code) {
1636     case DIAG_TIMEREVENT:
1637         kvm_handle_diag_288(cpu, run);
1638         break;
1639     case DIAG_IPL:
1640         kvm_handle_diag_308(cpu, run);
1641         break;
1642     case DIAG_SET_CONTROL_PROGRAM_CODES:
1643         handle_diag_318(cpu, run);
1644         break;
1645     case DIAG_KVM_HYPERCALL:
1646         r = handle_hypercall(cpu, run);
1647         break;
1648     case DIAG_KVM_BREAKPOINT:
1649         r = handle_sw_breakpoint(cpu, run);
1650         break;
1651     default:
1652         trace_kvm_insn_diag(func_code);
1653         kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
1654         break;
1655     }
1656 
1657     return r;
1658 }
1659 
1660 static int kvm_s390_handle_sigp(S390CPU *cpu, uint8_t ipa1, uint32_t ipb)
1661 {
1662     CPUS390XState *env = &cpu->env;
1663     const uint8_t r1 = ipa1 >> 4;
1664     const uint8_t r3 = ipa1 & 0x0f;
1665     int ret;
1666     uint8_t order;
1667 
1668     /* get order code */
1669     order = decode_basedisp_rs(env, ipb, NULL) & SIGP_ORDER_MASK;
1670 
1671     ret = handle_sigp(env, order, r1, r3);
1672     setcc(cpu, ret);
1673     return 0;
1674 }
1675 
1676 static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
1677 {
1678     unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00);
1679     uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff;
1680     int r = -1;
1681 
1682     trace_kvm_insn(run->s390_sieic.ipa, run->s390_sieic.ipb);
1683     switch (ipa0) {
1684     case IPA0_B2:
1685         r = handle_b2(cpu, run, ipa1);
1686         break;
1687     case IPA0_B9:
1688         r = handle_b9(cpu, run, ipa1);
1689         break;
1690     case IPA0_EB:
1691         r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff);
1692         break;
1693     case IPA0_E3:
1694         r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff);
1695         break;
1696     case IPA0_DIAG:
1697         r = handle_diag(cpu, run, run->s390_sieic.ipb);
1698         break;
1699     case IPA0_SIGP:
1700         r = kvm_s390_handle_sigp(cpu, ipa1, run->s390_sieic.ipb);
1701         break;
1702     }
1703 
1704     if (r < 0) {
1705         r = 0;
1706         kvm_s390_program_interrupt(cpu, PGM_OPERATION);
1707     }
1708 
1709     return r;
1710 }
1711 
1712 static void unmanageable_intercept(S390CPU *cpu, S390CrashReason reason,
1713                                    int pswoffset)
1714 {
1715     CPUState *cs = CPU(cpu);
1716 
1717     s390_cpu_halt(cpu);
1718     cpu->env.crash_reason = reason;
1719     qemu_system_guest_panicked(cpu_get_crash_info(cs));
1720 }
1721 
1722 /* try to detect pgm check loops */
1723 static int handle_oper_loop(S390CPU *cpu, struct kvm_run *run)
1724 {
1725     CPUState *cs = CPU(cpu);
1726     PSW oldpsw, newpsw;
1727 
1728     newpsw.mask = ldq_phys(cs->as, cpu->env.psa +
1729                            offsetof(LowCore, program_new_psw));
1730     newpsw.addr = ldq_phys(cs->as, cpu->env.psa +
1731                            offsetof(LowCore, program_new_psw) + 8);
1732     oldpsw.mask  = run->psw_mask;
1733     oldpsw.addr  = run->psw_addr;
1734     /*
1735      * Avoid endless loops of operation exceptions, if the pgm new
1736      * PSW will cause a new operation exception.
1737      * The heuristic checks if the pgm new psw is within 6 bytes before
1738      * the faulting psw address (with same DAT, AS settings) and the
1739      * new psw is not a wait psw and the fault was not triggered by
1740      * problem state. In that case go into crashed state.
1741      */
1742 
1743     if (oldpsw.addr - newpsw.addr <= 6 &&
1744         !(newpsw.mask & PSW_MASK_WAIT) &&
1745         !(oldpsw.mask & PSW_MASK_PSTATE) &&
1746         (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
1747         (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT)) {
1748         unmanageable_intercept(cpu, S390_CRASH_REASON_OPINT_LOOP,
1749                                offsetof(LowCore, program_new_psw));
1750         return EXCP_HALTED;
1751     }
1752     return 0;
1753 }
1754 
1755 static int handle_intercept(S390CPU *cpu)
1756 {
1757     CPUState *cs = CPU(cpu);
1758     struct kvm_run *run = cs->kvm_run;
1759     int icpt_code = run->s390_sieic.icptcode;
1760     int r = 0;
1761 
1762     trace_kvm_intercept(icpt_code, (long)run->psw_addr);
1763     switch (icpt_code) {
1764         case ICPT_INSTRUCTION:
1765         case ICPT_PV_INSTR:
1766         case ICPT_PV_INSTR_NOTIFICATION:
1767             r = handle_instruction(cpu, run);
1768             break;
1769         case ICPT_PROGRAM:
1770             unmanageable_intercept(cpu, S390_CRASH_REASON_PGMINT_LOOP,
1771                                    offsetof(LowCore, program_new_psw));
1772             r = EXCP_HALTED;
1773             break;
1774         case ICPT_EXT_INT:
1775             unmanageable_intercept(cpu, S390_CRASH_REASON_EXTINT_LOOP,
1776                                    offsetof(LowCore, external_new_psw));
1777             r = EXCP_HALTED;
1778             break;
1779         case ICPT_WAITPSW:
1780             /* disabled wait, since enabled wait is handled in kernel */
1781             s390_handle_wait(cpu);
1782             r = EXCP_HALTED;
1783             break;
1784         case ICPT_CPU_STOP:
1785             do_stop_interrupt(&cpu->env);
1786             r = EXCP_HALTED;
1787             break;
1788         case ICPT_OPEREXC:
1789             /* check for break points */
1790             r = handle_sw_breakpoint(cpu, run);
1791             if (r == -ENOENT) {
1792                 /* Then check for potential pgm check loops */
1793                 r = handle_oper_loop(cpu, run);
1794                 if (r == 0) {
1795                     kvm_s390_program_interrupt(cpu, PGM_OPERATION);
1796                 }
1797             }
1798             break;
1799         case ICPT_SOFT_INTERCEPT:
1800             fprintf(stderr, "KVM unimplemented icpt SOFT\n");
1801             exit(1);
1802             break;
1803         case ICPT_IO:
1804             fprintf(stderr, "KVM unimplemented icpt IO\n");
1805             exit(1);
1806             break;
1807         default:
1808             fprintf(stderr, "Unknown intercept code: %d\n", icpt_code);
1809             exit(1);
1810             break;
1811     }
1812 
1813     return r;
1814 }
1815 
1816 static int handle_tsch(S390CPU *cpu)
1817 {
1818     CPUState *cs = CPU(cpu);
1819     struct kvm_run *run = cs->kvm_run;
1820     int ret;
1821 
1822     ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb,
1823                              RA_IGNORED);
1824     if (ret < 0) {
1825         /*
1826          * Failure.
1827          * If an I/O interrupt had been dequeued, we have to reinject it.
1828          */
1829         if (run->s390_tsch.dequeued) {
1830             s390_io_interrupt(run->s390_tsch.subchannel_id,
1831                               run->s390_tsch.subchannel_nr,
1832                               run->s390_tsch.io_int_parm,
1833                               run->s390_tsch.io_int_word);
1834         }
1835         ret = 0;
1836     }
1837     return ret;
1838 }
1839 
1840 static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar)
1841 {
1842     const MachineState *ms = MACHINE(qdev_get_machine());
1843     uint16_t conf_cpus = 0, reserved_cpus = 0;
1844     SysIB_322 sysib;
1845     int del, i;
1846 
1847     if (s390_is_pv()) {
1848         s390_cpu_pv_mem_read(cpu, 0, &sysib, sizeof(sysib));
1849     } else if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) {
1850         return;
1851     }
1852     /* Shift the stack of Extended Names to prepare for our own data */
1853     memmove(&sysib.ext_names[1], &sysib.ext_names[0],
1854             sizeof(sysib.ext_names[0]) * (sysib.count - 1));
1855     /* First virt level, that doesn't provide Ext Names delimits stack. It is
1856      * assumed it's not capable of managing Extended Names for lower levels.
1857      */
1858     for (del = 1; del < sysib.count; del++) {
1859         if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) {
1860             break;
1861         }
1862     }
1863     if (del < sysib.count) {
1864         memset(sysib.ext_names[del], 0,
1865                sizeof(sysib.ext_names[0]) * (sysib.count - del));
1866     }
1867 
1868     /* count the cpus and split them into configured and reserved ones */
1869     for (i = 0; i < ms->possible_cpus->len; i++) {
1870         if (ms->possible_cpus->cpus[i].cpu) {
1871             conf_cpus++;
1872         } else {
1873             reserved_cpus++;
1874         }
1875     }
1876     sysib.vm[0].total_cpus = conf_cpus + reserved_cpus;
1877     sysib.vm[0].conf_cpus = conf_cpus;
1878     sysib.vm[0].reserved_cpus = reserved_cpus;
1879 
1880     /* Insert short machine name in EBCDIC, padded with blanks */
1881     if (qemu_name) {
1882         memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name));
1883         ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name),
1884                                                     strlen(qemu_name)));
1885     }
1886     sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */
1887     /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's
1888      * considered by s390 as not capable of providing any Extended Name.
1889      * Therefore if no name was specified on qemu invocation, we go with the
1890      * same "KVMguest" default, which KVM has filled into short name field.
1891      */
1892     strpadcpy((char *)sysib.ext_names[0],
1893               sizeof(sysib.ext_names[0]),
1894               qemu_name ?: "KVMguest", '\0');
1895 
1896     /* Insert UUID */
1897     memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid));
1898 
1899     if (s390_is_pv()) {
1900         s390_cpu_pv_mem_write(cpu, 0, &sysib, sizeof(sysib));
1901     } else {
1902         s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib));
1903     }
1904 }
1905 
1906 static int handle_stsi(S390CPU *cpu)
1907 {
1908     CPUState *cs = CPU(cpu);
1909     struct kvm_run *run = cs->kvm_run;
1910 
1911     switch (run->s390_stsi.fc) {
1912     case 3:
1913         if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) {
1914             return 0;
1915         }
1916         /* Only sysib 3.2.2 needs post-handling for now. */
1917         insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar);
1918         return 0;
1919     default:
1920         return 0;
1921     }
1922 }
1923 
1924 static int kvm_arch_handle_debug_exit(S390CPU *cpu)
1925 {
1926     CPUState *cs = CPU(cpu);
1927     struct kvm_run *run = cs->kvm_run;
1928 
1929     int ret = 0;
1930     struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
1931 
1932     switch (arch_info->type) {
1933     case KVM_HW_WP_WRITE:
1934         if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
1935             cs->watchpoint_hit = &hw_watchpoint;
1936             hw_watchpoint.vaddr = arch_info->addr;
1937             hw_watchpoint.flags = BP_MEM_WRITE;
1938             ret = EXCP_DEBUG;
1939         }
1940         break;
1941     case KVM_HW_BP:
1942         if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
1943             ret = EXCP_DEBUG;
1944         }
1945         break;
1946     case KVM_SINGLESTEP:
1947         if (cs->singlestep_enabled) {
1948             ret = EXCP_DEBUG;
1949         }
1950         break;
1951     default:
1952         ret = -ENOSYS;
1953     }
1954 
1955     return ret;
1956 }
1957 
1958 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1959 {
1960     S390CPU *cpu = S390_CPU(cs);
1961     int ret = 0;
1962 
1963     qemu_mutex_lock_iothread();
1964 
1965     kvm_cpu_synchronize_state(cs);
1966 
1967     switch (run->exit_reason) {
1968         case KVM_EXIT_S390_SIEIC:
1969             ret = handle_intercept(cpu);
1970             break;
1971         case KVM_EXIT_S390_RESET:
1972             s390_ipl_reset_request(cs, S390_RESET_REIPL);
1973             break;
1974         case KVM_EXIT_S390_TSCH:
1975             ret = handle_tsch(cpu);
1976             break;
1977         case KVM_EXIT_S390_STSI:
1978             ret = handle_stsi(cpu);
1979             break;
1980         case KVM_EXIT_DEBUG:
1981             ret = kvm_arch_handle_debug_exit(cpu);
1982             break;
1983         default:
1984             fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
1985             break;
1986     }
1987     qemu_mutex_unlock_iothread();
1988 
1989     if (ret == 0) {
1990         ret = EXCP_INTERRUPT;
1991     }
1992     return ret;
1993 }
1994 
1995 bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
1996 {
1997     return true;
1998 }
1999 
2000 void kvm_s390_enable_css_support(S390CPU *cpu)
2001 {
2002     int r;
2003 
2004     /* Activate host kernel channel subsystem support. */
2005     r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0);
2006     assert(r == 0);
2007 }
2008 
2009 void kvm_arch_init_irq_routing(KVMState *s)
2010 {
2011     /*
2012      * Note that while irqchip capabilities generally imply that cpustates
2013      * are handled in-kernel, it is not true for s390 (yet); therefore, we
2014      * have to override the common code kvm_halt_in_kernel_allowed setting.
2015      */
2016     if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
2017         kvm_gsi_routing_allowed = true;
2018         kvm_halt_in_kernel_allowed = false;
2019     }
2020 }
2021 
2022 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
2023                                     int vq, bool assign)
2024 {
2025     struct kvm_ioeventfd kick = {
2026         .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY |
2027         KVM_IOEVENTFD_FLAG_DATAMATCH,
2028         .fd = event_notifier_get_fd(notifier),
2029         .datamatch = vq,
2030         .addr = sch,
2031         .len = 8,
2032     };
2033     trace_kvm_assign_subch_ioeventfd(kick.fd, kick.addr, assign,
2034                                      kick.datamatch);
2035     if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) {
2036         return -ENOSYS;
2037     }
2038     if (!assign) {
2039         kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
2040     }
2041     return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
2042 }
2043 
2044 int kvm_s390_get_protected_dump(void)
2045 {
2046     return cap_protected_dump;
2047 }
2048 
2049 int kvm_s390_get_ri(void)
2050 {
2051     return cap_ri;
2052 }
2053 
2054 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
2055 {
2056     struct kvm_mp_state mp_state = {};
2057     int ret;
2058 
2059     /* the kvm part might not have been initialized yet */
2060     if (CPU(cpu)->kvm_state == NULL) {
2061         return 0;
2062     }
2063 
2064     switch (cpu_state) {
2065     case S390_CPU_STATE_STOPPED:
2066         mp_state.mp_state = KVM_MP_STATE_STOPPED;
2067         break;
2068     case S390_CPU_STATE_CHECK_STOP:
2069         mp_state.mp_state = KVM_MP_STATE_CHECK_STOP;
2070         break;
2071     case S390_CPU_STATE_OPERATING:
2072         mp_state.mp_state = KVM_MP_STATE_OPERATING;
2073         break;
2074     case S390_CPU_STATE_LOAD:
2075         mp_state.mp_state = KVM_MP_STATE_LOAD;
2076         break;
2077     default:
2078         error_report("Requested CPU state is not a valid S390 CPU state: %u",
2079                      cpu_state);
2080         exit(1);
2081     }
2082 
2083     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
2084     if (ret) {
2085         trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state,
2086                                        strerror(-ret));
2087     }
2088 
2089     return ret;
2090 }
2091 
2092 void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
2093 {
2094     unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus;
2095     struct kvm_s390_irq_state irq_state = {
2096         .buf = (uint64_t) cpu->irqstate,
2097         .len = VCPU_IRQ_BUF_SIZE(max_cpus),
2098     };
2099     CPUState *cs = CPU(cpu);
2100     int32_t bytes;
2101 
2102     if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) {
2103         return;
2104     }
2105 
2106     bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state);
2107     if (bytes < 0) {
2108         cpu->irqstate_saved_size = 0;
2109         error_report("Migration of interrupt state failed");
2110         return;
2111     }
2112 
2113     cpu->irqstate_saved_size = bytes;
2114 }
2115 
2116 int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
2117 {
2118     CPUState *cs = CPU(cpu);
2119     struct kvm_s390_irq_state irq_state = {
2120         .buf = (uint64_t) cpu->irqstate,
2121         .len = cpu->irqstate_saved_size,
2122     };
2123     int r;
2124 
2125     if (cpu->irqstate_saved_size == 0) {
2126         return 0;
2127     }
2128 
2129     if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) {
2130         return -ENOSYS;
2131     }
2132 
2133     r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state);
2134     if (r) {
2135         error_report("Setting interrupt state failed %d", r);
2136     }
2137     return r;
2138 }
2139 
2140 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2141                              uint64_t address, uint32_t data, PCIDevice *dev)
2142 {
2143     S390PCIBusDevice *pbdev;
2144     uint32_t vec = data & ZPCI_MSI_VEC_MASK;
2145 
2146     if (!dev) {
2147         trace_kvm_msi_route_fixup("no pci device");
2148         return -ENODEV;
2149     }
2150 
2151     pbdev = s390_pci_find_dev_by_target(s390_get_phb(), DEVICE(dev)->id);
2152     if (!pbdev) {
2153         trace_kvm_msi_route_fixup("no zpci device");
2154         return -ENODEV;
2155     }
2156 
2157     route->type = KVM_IRQ_ROUTING_S390_ADAPTER;
2158     route->flags = 0;
2159     route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr;
2160     route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr;
2161     route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset;
2162     route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset + vec;
2163     route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id;
2164     return 0;
2165 }
2166 
2167 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
2168                                 int vector, PCIDevice *dev)
2169 {
2170     return 0;
2171 }
2172 
2173 int kvm_arch_release_virq_post(int virq)
2174 {
2175     return 0;
2176 }
2177 
2178 int kvm_arch_msi_data_to_gsi(uint32_t data)
2179 {
2180     abort();
2181 }
2182 
2183 static int query_cpu_subfunc(S390FeatBitmap features)
2184 {
2185     struct kvm_s390_vm_cpu_subfunc prop = {};
2186     struct kvm_device_attr attr = {
2187         .group = KVM_S390_VM_CPU_MODEL,
2188         .attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC,
2189         .addr = (uint64_t) &prop,
2190     };
2191     int rc;
2192 
2193     rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
2194     if (rc) {
2195         return  rc;
2196     }
2197 
2198     /*
2199      * We're going to add all subfunctions now, if the corresponding feature
2200      * is available that unlocks the query functions.
2201      */
2202     s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo);
2203     if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) {
2204         s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff);
2205     }
2206     if (test_bit(S390_FEAT_MSA, features)) {
2207         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac);
2208         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc);
2209         s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km);
2210         s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd);
2211         s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd);
2212     }
2213     if (test_bit(S390_FEAT_MSA_EXT_3, features)) {
2214         s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo);
2215     }
2216     if (test_bit(S390_FEAT_MSA_EXT_4, features)) {
2217         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr);
2218         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf);
2219         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo);
2220         s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc);
2221     }
2222     if (test_bit(S390_FEAT_MSA_EXT_5, features)) {
2223         s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno);
2224     }
2225     if (test_bit(S390_FEAT_MSA_EXT_8, features)) {
2226         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma);
2227     }
2228     if (test_bit(S390_FEAT_MSA_EXT_9, features)) {
2229         s390_add_from_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa);
2230     }
2231     if (test_bit(S390_FEAT_ESORT_BASE, features)) {
2232         s390_add_from_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl);
2233     }
2234     if (test_bit(S390_FEAT_DEFLATE_BASE, features)) {
2235         s390_add_from_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc);
2236     }
2237     return 0;
2238 }
2239 
2240 static int configure_cpu_subfunc(const S390FeatBitmap features)
2241 {
2242     struct kvm_s390_vm_cpu_subfunc prop = {};
2243     struct kvm_device_attr attr = {
2244         .group = KVM_S390_VM_CPU_MODEL,
2245         .attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC,
2246         .addr = (uint64_t) &prop,
2247     };
2248 
2249     if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2250                            KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) {
2251         /* hardware support might be missing, IBC will handle most of this */
2252         return 0;
2253     }
2254 
2255     s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo);
2256     if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) {
2257         s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff);
2258     }
2259     if (test_bit(S390_FEAT_MSA, features)) {
2260         s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac);
2261         s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc);
2262         s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km);
2263         s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd);
2264         s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd);
2265     }
2266     if (test_bit(S390_FEAT_MSA_EXT_3, features)) {
2267         s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo);
2268     }
2269     if (test_bit(S390_FEAT_MSA_EXT_4, features)) {
2270         s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr);
2271         s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf);
2272         s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo);
2273         s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc);
2274     }
2275     if (test_bit(S390_FEAT_MSA_EXT_5, features)) {
2276         s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno);
2277     }
2278     if (test_bit(S390_FEAT_MSA_EXT_8, features)) {
2279         s390_fill_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma);
2280     }
2281     if (test_bit(S390_FEAT_MSA_EXT_9, features)) {
2282         s390_fill_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa);
2283     }
2284     if (test_bit(S390_FEAT_ESORT_BASE, features)) {
2285         s390_fill_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl);
2286     }
2287     if (test_bit(S390_FEAT_DEFLATE_BASE, features)) {
2288         s390_fill_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc);
2289     }
2290     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
2291 }
2292 
2293 static int kvm_to_feat[][2] = {
2294     { KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP },
2295     { KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 },
2296     { KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO },
2297     { KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF },
2298     { KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE },
2299     { KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS },
2300     { KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB },
2301     { KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI },
2302     { KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS },
2303     { KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY },
2304     { KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA },
2305     { KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI},
2306     { KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF},
2307     { KVM_S390_VM_CPU_FEAT_KSS, S390_FEAT_SIE_KSS},
2308 };
2309 
2310 static int query_cpu_feat(S390FeatBitmap features)
2311 {
2312     struct kvm_s390_vm_cpu_feat prop = {};
2313     struct kvm_device_attr attr = {
2314         .group = KVM_S390_VM_CPU_MODEL,
2315         .attr = KVM_S390_VM_CPU_MACHINE_FEAT,
2316         .addr = (uint64_t) &prop,
2317     };
2318     int rc;
2319     int i;
2320 
2321     rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
2322     if (rc) {
2323         return  rc;
2324     }
2325 
2326     for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) {
2327         if (test_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat)) {
2328             set_bit(kvm_to_feat[i][1], features);
2329         }
2330     }
2331     return 0;
2332 }
2333 
2334 static int configure_cpu_feat(const S390FeatBitmap features)
2335 {
2336     struct kvm_s390_vm_cpu_feat prop = {};
2337     struct kvm_device_attr attr = {
2338         .group = KVM_S390_VM_CPU_MODEL,
2339         .attr = KVM_S390_VM_CPU_PROCESSOR_FEAT,
2340         .addr = (uint64_t) &prop,
2341     };
2342     int i;
2343 
2344     for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) {
2345         if (test_bit(kvm_to_feat[i][1], features)) {
2346             set_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat);
2347         }
2348     }
2349     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
2350 }
2351 
2352 bool kvm_s390_cpu_models_supported(void)
2353 {
2354     if (!cpu_model_allowed()) {
2355         /* compatibility machines interfere with the cpu model */
2356         return false;
2357     }
2358     return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2359                              KVM_S390_VM_CPU_MACHINE) &&
2360            kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2361                              KVM_S390_VM_CPU_PROCESSOR) &&
2362            kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2363                              KVM_S390_VM_CPU_MACHINE_FEAT) &&
2364            kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2365                              KVM_S390_VM_CPU_PROCESSOR_FEAT) &&
2366            kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2367                              KVM_S390_VM_CPU_MACHINE_SUBFUNC);
2368 }
2369 
2370 void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp)
2371 {
2372     struct kvm_s390_vm_cpu_machine prop = {};
2373     struct kvm_device_attr attr = {
2374         .group = KVM_S390_VM_CPU_MODEL,
2375         .attr = KVM_S390_VM_CPU_MACHINE,
2376         .addr = (uint64_t) &prop,
2377     };
2378     uint16_t unblocked_ibc = 0, cpu_type = 0;
2379     int rc;
2380 
2381     memset(model, 0, sizeof(*model));
2382 
2383     if (!kvm_s390_cpu_models_supported()) {
2384         error_setg(errp, "KVM doesn't support CPU models");
2385         return;
2386     }
2387 
2388     /* query the basic cpu model properties */
2389     rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
2390     if (rc) {
2391         error_setg(errp, "KVM: Error querying host CPU model: %d", rc);
2392         return;
2393     }
2394 
2395     cpu_type = cpuid_type(prop.cpuid);
2396     if (has_ibc(prop.ibc)) {
2397         model->lowest_ibc = lowest_ibc(prop.ibc);
2398         unblocked_ibc = unblocked_ibc(prop.ibc);
2399     }
2400     model->cpu_id = cpuid_id(prop.cpuid);
2401     model->cpu_id_format = cpuid_format(prop.cpuid);
2402     model->cpu_ver = 0xff;
2403 
2404     /* get supported cpu features indicated via STFL(E) */
2405     s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL,
2406                              (uint8_t *) prop.fac_mask);
2407     /* dat-enhancement facility 2 has no bit but was introduced with stfle */
2408     if (test_bit(S390_FEAT_STFLE, model->features)) {
2409         set_bit(S390_FEAT_DAT_ENH_2, model->features);
2410     }
2411     /* get supported cpu features indicated e.g. via SCLP */
2412     rc = query_cpu_feat(model->features);
2413     if (rc) {
2414         error_setg(errp, "KVM: Error querying CPU features: %d", rc);
2415         return;
2416     }
2417     /* get supported cpu subfunctions indicated via query / test bit */
2418     rc = query_cpu_subfunc(model->features);
2419     if (rc) {
2420         error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc);
2421         return;
2422     }
2423 
2424     /* PTFF subfunctions might be indicated although kernel support missing */
2425     if (!test_bit(S390_FEAT_MULTIPLE_EPOCH, model->features)) {
2426         clear_bit(S390_FEAT_PTFF_QSIE, model->features);
2427         clear_bit(S390_FEAT_PTFF_QTOUE, model->features);
2428         clear_bit(S390_FEAT_PTFF_STOE, model->features);
2429         clear_bit(S390_FEAT_PTFF_STOUE, model->features);
2430     }
2431 
2432     /* with cpu model support, CMM is only indicated if really available */
2433     if (kvm_s390_cmma_available()) {
2434         set_bit(S390_FEAT_CMM, model->features);
2435     } else {
2436         /* no cmm -> no cmm nt */
2437         clear_bit(S390_FEAT_CMM_NT, model->features);
2438     }
2439 
2440     /* bpb needs kernel support for migration, VSIE and reset */
2441     if (!kvm_check_extension(kvm_state, KVM_CAP_S390_BPB)) {
2442         clear_bit(S390_FEAT_BPB, model->features);
2443     }
2444 
2445     /*
2446      * If we have support for protected virtualization, indicate
2447      * the protected virtualization IPL unpack facility.
2448      */
2449     if (cap_protected) {
2450         set_bit(S390_FEAT_UNPACK, model->features);
2451     }
2452 
2453     /* We emulate a zPCI bus and AEN, therefore we don't need HW support */
2454     set_bit(S390_FEAT_ZPCI, model->features);
2455     set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features);
2456 
2457     if (s390_known_cpu_type(cpu_type)) {
2458         /* we want the exact model, even if some features are missing */
2459         model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc),
2460                                        ibc_ec_ga(unblocked_ibc), NULL);
2461     } else {
2462         /* model unknown, e.g. too new - search using features */
2463         model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc),
2464                                        ibc_ec_ga(unblocked_ibc),
2465                                        model->features);
2466     }
2467     if (!model->def) {
2468         error_setg(errp, "KVM: host CPU model could not be identified");
2469         return;
2470     }
2471     /* for now, we can only provide the AP feature with HW support */
2472     if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO,
2473         KVM_S390_VM_CRYPTO_ENABLE_APIE)) {
2474         set_bit(S390_FEAT_AP, model->features);
2475     }
2476 
2477     /*
2478      * Extended-Length SCCB is handled entirely within QEMU.
2479      * For PV guests this is completely fenced by the Ultravisor, as Service
2480      * Call error checking and STFLE interpretation are handled via SIE.
2481      */
2482     set_bit(S390_FEAT_EXTENDED_LENGTH_SCCB, model->features);
2483 
2484     if (kvm_check_extension(kvm_state, KVM_CAP_S390_DIAG318)) {
2485         set_bit(S390_FEAT_DIAG_318, model->features);
2486     }
2487 
2488     /* strip of features that are not part of the maximum model */
2489     bitmap_and(model->features, model->features, model->def->full_feat,
2490                S390_FEAT_MAX);
2491 }
2492 
2493 static void kvm_s390_configure_apie(bool interpret)
2494 {
2495     uint64_t attr = interpret ? KVM_S390_VM_CRYPTO_ENABLE_APIE :
2496                                 KVM_S390_VM_CRYPTO_DISABLE_APIE;
2497 
2498     if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
2499         kvm_s390_set_attr(attr);
2500     }
2501 }
2502 
2503 void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp)
2504 {
2505     struct kvm_s390_vm_cpu_processor prop  = {
2506         .fac_list = { 0 },
2507     };
2508     struct kvm_device_attr attr = {
2509         .group = KVM_S390_VM_CPU_MODEL,
2510         .attr = KVM_S390_VM_CPU_PROCESSOR,
2511         .addr = (uint64_t) &prop,
2512     };
2513     int rc;
2514 
2515     if (!model) {
2516         /* compatibility handling if cpu models are disabled */
2517         if (kvm_s390_cmma_available()) {
2518             kvm_s390_enable_cmma();
2519         }
2520         return;
2521     }
2522     if (!kvm_s390_cpu_models_supported()) {
2523         error_setg(errp, "KVM doesn't support CPU models");
2524         return;
2525     }
2526     prop.cpuid = s390_cpuid_from_cpu_model(model);
2527     prop.ibc = s390_ibc_from_cpu_model(model);
2528     /* configure cpu features indicated via STFL(e) */
2529     s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL,
2530                          (uint8_t *) prop.fac_list);
2531     rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
2532     if (rc) {
2533         error_setg(errp, "KVM: Error configuring the CPU model: %d", rc);
2534         return;
2535     }
2536     /* configure cpu features indicated e.g. via SCLP */
2537     rc = configure_cpu_feat(model->features);
2538     if (rc) {
2539         error_setg(errp, "KVM: Error configuring CPU features: %d", rc);
2540         return;
2541     }
2542     /* configure cpu subfunctions indicated via query / test bit */
2543     rc = configure_cpu_subfunc(model->features);
2544     if (rc) {
2545         error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc);
2546         return;
2547     }
2548     /* enable CMM via CMMA */
2549     if (test_bit(S390_FEAT_CMM, model->features)) {
2550         kvm_s390_enable_cmma();
2551     }
2552 
2553     if (test_bit(S390_FEAT_AP, model->features)) {
2554         kvm_s390_configure_apie(true);
2555     }
2556 }
2557 
2558 void kvm_s390_restart_interrupt(S390CPU *cpu)
2559 {
2560     struct kvm_s390_irq irq = {
2561         .type = KVM_S390_RESTART,
2562     };
2563 
2564     kvm_s390_vcpu_interrupt(cpu, &irq);
2565 }
2566 
2567 void kvm_s390_stop_interrupt(S390CPU *cpu)
2568 {
2569     struct kvm_s390_irq irq = {
2570         .type = KVM_S390_SIGP_STOP,
2571     };
2572 
2573     kvm_s390_vcpu_interrupt(cpu, &irq);
2574 }
2575 
2576 bool kvm_arch_cpu_check_are_resettable(void)
2577 {
2578     return true;
2579 }
2580 
2581 int kvm_s390_get_zpci_op(void)
2582 {
2583     return cap_zpci_op;
2584 }
2585 
2586 void kvm_arch_accel_class_init(ObjectClass *oc)
2587 {
2588 }
2589