1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * hosting IBM Z kernel virtual machines (s390x) 4 * 5 * Copyright IBM Corp. 2008, 2020 6 * 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com> 9 * Christian Ehrhardt <ehrhardt@de.ibm.com> 10 * Jason J. Herne <jjherne@us.ibm.com> 11 */ 12 13 #define KMSG_COMPONENT "kvm-s390" 14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15 16 #include <linux/compiler.h> 17 #include <linux/err.h> 18 #include <linux/fs.h> 19 #include <linux/hrtimer.h> 20 #include <linux/init.h> 21 #include <linux/kvm.h> 22 #include <linux/kvm_host.h> 23 #include <linux/mman.h> 24 #include <linux/module.h> 25 #include <linux/moduleparam.h> 26 #include <linux/random.h> 27 #include <linux/slab.h> 28 #include <linux/timer.h> 29 #include <linux/vmalloc.h> 30 #include <linux/bitmap.h> 31 #include <linux/sched/signal.h> 32 #include <linux/string.h> 33 #include <linux/pgtable.h> 34 #include <linux/mmu_notifier.h> 35 36 #include <asm/asm-offsets.h> 37 #include <asm/lowcore.h> 38 #include <asm/stp.h> 39 #include <asm/gmap.h> 40 #include <asm/nmi.h> 41 #include <asm/switch_to.h> 42 #include <asm/isc.h> 43 #include <asm/sclp.h> 44 #include <asm/cpacf.h> 45 #include <asm/timex.h> 46 #include <asm/ap.h> 47 #include <asm/uv.h> 48 #include <asm/fpu/api.h> 49 #include "kvm-s390.h" 50 #include "gaccess.h" 51 #include "pci.h" 52 53 #define CREATE_TRACE_POINTS 54 #include "trace.h" 55 #include "trace-s390.h" 56 57 #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */ 58 #define LOCAL_IRQS 32 59 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \ 60 (KVM_MAX_VCPUS + LOCAL_IRQS)) 61 62 const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 63 KVM_GENERIC_VM_STATS(), 64 STATS_DESC_COUNTER(VM, inject_io), 65 STATS_DESC_COUNTER(VM, inject_float_mchk), 66 STATS_DESC_COUNTER(VM, inject_pfault_done), 67 STATS_DESC_COUNTER(VM, inject_service_signal), 68 STATS_DESC_COUNTER(VM, inject_virtio), 69 STATS_DESC_COUNTER(VM, aen_forward) 70 }; 71 72 const struct kvm_stats_header kvm_vm_stats_header = { 73 .name_size = KVM_STATS_NAME_SIZE, 74 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), 75 .id_offset = sizeof(struct kvm_stats_header), 76 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 77 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 78 sizeof(kvm_vm_stats_desc), 79 }; 80 81 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 82 KVM_GENERIC_VCPU_STATS(), 83 STATS_DESC_COUNTER(VCPU, exit_userspace), 84 STATS_DESC_COUNTER(VCPU, exit_null), 85 STATS_DESC_COUNTER(VCPU, exit_external_request), 86 STATS_DESC_COUNTER(VCPU, exit_io_request), 87 STATS_DESC_COUNTER(VCPU, exit_external_interrupt), 88 STATS_DESC_COUNTER(VCPU, exit_stop_request), 89 STATS_DESC_COUNTER(VCPU, exit_validity), 90 STATS_DESC_COUNTER(VCPU, exit_instruction), 91 STATS_DESC_COUNTER(VCPU, exit_pei), 92 STATS_DESC_COUNTER(VCPU, halt_no_poll_steal), 93 STATS_DESC_COUNTER(VCPU, instruction_lctl), 94 STATS_DESC_COUNTER(VCPU, instruction_lctlg), 95 STATS_DESC_COUNTER(VCPU, instruction_stctl), 96 STATS_DESC_COUNTER(VCPU, instruction_stctg), 97 STATS_DESC_COUNTER(VCPU, exit_program_interruption), 98 STATS_DESC_COUNTER(VCPU, exit_instr_and_program), 99 STATS_DESC_COUNTER(VCPU, exit_operation_exception), 100 STATS_DESC_COUNTER(VCPU, deliver_ckc), 101 STATS_DESC_COUNTER(VCPU, deliver_cputm), 102 STATS_DESC_COUNTER(VCPU, deliver_external_call), 103 STATS_DESC_COUNTER(VCPU, deliver_emergency_signal), 104 STATS_DESC_COUNTER(VCPU, deliver_service_signal), 105 STATS_DESC_COUNTER(VCPU, deliver_virtio), 106 STATS_DESC_COUNTER(VCPU, deliver_stop_signal), 107 STATS_DESC_COUNTER(VCPU, deliver_prefix_signal), 108 STATS_DESC_COUNTER(VCPU, deliver_restart_signal), 109 STATS_DESC_COUNTER(VCPU, deliver_program), 110 STATS_DESC_COUNTER(VCPU, deliver_io), 111 STATS_DESC_COUNTER(VCPU, deliver_machine_check), 112 STATS_DESC_COUNTER(VCPU, exit_wait_state), 113 STATS_DESC_COUNTER(VCPU, inject_ckc), 114 STATS_DESC_COUNTER(VCPU, inject_cputm), 115 STATS_DESC_COUNTER(VCPU, inject_external_call), 116 STATS_DESC_COUNTER(VCPU, inject_emergency_signal), 117 STATS_DESC_COUNTER(VCPU, inject_mchk), 118 STATS_DESC_COUNTER(VCPU, inject_pfault_init), 119 STATS_DESC_COUNTER(VCPU, inject_program), 120 STATS_DESC_COUNTER(VCPU, inject_restart), 121 STATS_DESC_COUNTER(VCPU, inject_set_prefix), 122 STATS_DESC_COUNTER(VCPU, inject_stop_signal), 123 STATS_DESC_COUNTER(VCPU, instruction_epsw), 124 STATS_DESC_COUNTER(VCPU, instruction_gs), 125 STATS_DESC_COUNTER(VCPU, instruction_io_other), 126 STATS_DESC_COUNTER(VCPU, instruction_lpsw), 127 STATS_DESC_COUNTER(VCPU, instruction_lpswe), 128 STATS_DESC_COUNTER(VCPU, instruction_pfmf), 129 STATS_DESC_COUNTER(VCPU, instruction_ptff), 130 STATS_DESC_COUNTER(VCPU, instruction_sck), 131 STATS_DESC_COUNTER(VCPU, instruction_sckpf), 132 STATS_DESC_COUNTER(VCPU, instruction_stidp), 133 STATS_DESC_COUNTER(VCPU, instruction_spx), 134 STATS_DESC_COUNTER(VCPU, instruction_stpx), 135 STATS_DESC_COUNTER(VCPU, instruction_stap), 136 STATS_DESC_COUNTER(VCPU, instruction_iske), 137 STATS_DESC_COUNTER(VCPU, instruction_ri), 138 STATS_DESC_COUNTER(VCPU, instruction_rrbe), 139 STATS_DESC_COUNTER(VCPU, instruction_sske), 140 STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock), 141 STATS_DESC_COUNTER(VCPU, instruction_stsi), 142 STATS_DESC_COUNTER(VCPU, instruction_stfl), 143 STATS_DESC_COUNTER(VCPU, instruction_tb), 144 STATS_DESC_COUNTER(VCPU, instruction_tpi), 145 STATS_DESC_COUNTER(VCPU, instruction_tprot), 146 STATS_DESC_COUNTER(VCPU, instruction_tsch), 147 STATS_DESC_COUNTER(VCPU, instruction_sie), 148 STATS_DESC_COUNTER(VCPU, instruction_essa), 149 STATS_DESC_COUNTER(VCPU, instruction_sthyi), 150 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense), 151 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running), 152 STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call), 153 STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency), 154 STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency), 155 STATS_DESC_COUNTER(VCPU, instruction_sigp_start), 156 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop), 157 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status), 158 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status), 159 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status), 160 STATS_DESC_COUNTER(VCPU, instruction_sigp_arch), 161 STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix), 162 STATS_DESC_COUNTER(VCPU, instruction_sigp_restart), 163 STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset), 164 STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset), 165 STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown), 166 STATS_DESC_COUNTER(VCPU, instruction_diagnose_10), 167 STATS_DESC_COUNTER(VCPU, instruction_diagnose_44), 168 STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c), 169 STATS_DESC_COUNTER(VCPU, diag_9c_ignored), 170 STATS_DESC_COUNTER(VCPU, diag_9c_forward), 171 STATS_DESC_COUNTER(VCPU, instruction_diagnose_258), 172 STATS_DESC_COUNTER(VCPU, instruction_diagnose_308), 173 STATS_DESC_COUNTER(VCPU, instruction_diagnose_500), 174 STATS_DESC_COUNTER(VCPU, instruction_diagnose_other), 175 STATS_DESC_COUNTER(VCPU, pfault_sync) 176 }; 177 178 const struct kvm_stats_header kvm_vcpu_stats_header = { 179 .name_size = KVM_STATS_NAME_SIZE, 180 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), 181 .id_offset = sizeof(struct kvm_stats_header), 182 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 183 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 184 sizeof(kvm_vcpu_stats_desc), 185 }; 186 187 /* allow nested virtualization in KVM (if enabled by user space) */ 188 static int nested; 189 module_param(nested, int, S_IRUGO); 190 MODULE_PARM_DESC(nested, "Nested virtualization support"); 191 192 /* allow 1m huge page guest backing, if !nested */ 193 static int hpage; 194 module_param(hpage, int, 0444); 195 MODULE_PARM_DESC(hpage, "1m huge page backing support"); 196 197 /* maximum percentage of steal time for polling. >100 is treated like 100 */ 198 static u8 halt_poll_max_steal = 10; 199 module_param(halt_poll_max_steal, byte, 0644); 200 MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling"); 201 202 /* if set to true, the GISA will be initialized and used if available */ 203 static bool use_gisa = true; 204 module_param(use_gisa, bool, 0644); 205 MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it."); 206 207 /* maximum diag9c forwarding per second */ 208 unsigned int diag9c_forwarding_hz; 209 module_param(diag9c_forwarding_hz, uint, 0644); 210 MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off"); 211 212 /* 213 * allow asynchronous deinit for protected guests; enable by default since 214 * the feature is opt-in anyway 215 */ 216 static int async_destroy = 1; 217 module_param(async_destroy, int, 0444); 218 MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests"); 219 220 /* 221 * For now we handle at most 16 double words as this is what the s390 base 222 * kernel handles and stores in the prefix page. If we ever need to go beyond 223 * this, this requires changes to code, but the external uapi can stay. 224 */ 225 #define SIZE_INTERNAL 16 226 227 /* 228 * Base feature mask that defines default mask for facilities. Consists of the 229 * defines in FACILITIES_KVM and the non-hypervisor managed bits. 230 */ 231 static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM }; 232 /* 233 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL 234 * and defines the facilities that can be enabled via a cpu model. 235 */ 236 static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL }; 237 238 static unsigned long kvm_s390_fac_size(void) 239 { 240 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64); 241 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64); 242 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) > 243 sizeof(stfle_fac_list)); 244 245 return SIZE_INTERNAL; 246 } 247 248 /* available cpu features supported by kvm */ 249 static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); 250 /* available subfunctions indicated via query / "test bit" */ 251 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc; 252 253 static struct gmap_notifier gmap_notifier; 254 static struct gmap_notifier vsie_gmap_notifier; 255 debug_info_t *kvm_s390_dbf; 256 debug_info_t *kvm_s390_dbf_uv; 257 258 /* Section: not file related */ 259 /* forward declarations */ 260 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, 261 unsigned long end); 262 static int sca_switch_to_extended(struct kvm *kvm); 263 264 static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta) 265 { 266 u8 delta_idx = 0; 267 268 /* 269 * The TOD jumps by delta, we have to compensate this by adding 270 * -delta to the epoch. 271 */ 272 delta = -delta; 273 274 /* sign-extension - we're adding to signed values below */ 275 if ((s64)delta < 0) 276 delta_idx = -1; 277 278 scb->epoch += delta; 279 if (scb->ecd & ECD_MEF) { 280 scb->epdx += delta_idx; 281 if (scb->epoch < delta) 282 scb->epdx += 1; 283 } 284 } 285 286 /* 287 * This callback is executed during stop_machine(). All CPUs are therefore 288 * temporarily stopped. In order not to change guest behavior, we have to 289 * disable preemption whenever we touch the epoch of kvm and the VCPUs, 290 * so a CPU won't be stopped while calculating with the epoch. 291 */ 292 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val, 293 void *v) 294 { 295 struct kvm *kvm; 296 struct kvm_vcpu *vcpu; 297 unsigned long i; 298 unsigned long long *delta = v; 299 300 list_for_each_entry(kvm, &vm_list, vm_list) { 301 kvm_for_each_vcpu(i, vcpu, kvm) { 302 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); 303 if (i == 0) { 304 kvm->arch.epoch = vcpu->arch.sie_block->epoch; 305 kvm->arch.epdx = vcpu->arch.sie_block->epdx; 306 } 307 if (vcpu->arch.cputm_enabled) 308 vcpu->arch.cputm_start += *delta; 309 if (vcpu->arch.vsie_block) 310 kvm_clock_sync_scb(vcpu->arch.vsie_block, 311 *delta); 312 } 313 } 314 return NOTIFY_OK; 315 } 316 317 static struct notifier_block kvm_clock_notifier = { 318 .notifier_call = kvm_clock_sync, 319 }; 320 321 static void allow_cpu_feat(unsigned long nr) 322 { 323 set_bit_inv(nr, kvm_s390_available_cpu_feat); 324 } 325 326 static inline int plo_test_bit(unsigned char nr) 327 { 328 unsigned long function = (unsigned long)nr | 0x100; 329 int cc; 330 331 asm volatile( 332 " lgr 0,%[function]\n" 333 /* Parameter registers are ignored for "test bit" */ 334 " plo 0,0,0,0(0)\n" 335 " ipm %0\n" 336 " srl %0,28\n" 337 : "=d" (cc) 338 : [function] "d" (function) 339 : "cc", "0"); 340 return cc == 0; 341 } 342 343 static __always_inline void __insn32_query(unsigned int opcode, u8 *query) 344 { 345 asm volatile( 346 " lghi 0,0\n" 347 " lgr 1,%[query]\n" 348 /* Parameter registers are ignored */ 349 " .insn rrf,%[opc] << 16,2,4,6,0\n" 350 : 351 : [query] "d" ((unsigned long)query), [opc] "i" (opcode) 352 : "cc", "memory", "0", "1"); 353 } 354 355 #define INSN_SORTL 0xb938 356 #define INSN_DFLTCC 0xb939 357 358 static void __init kvm_s390_cpu_feat_init(void) 359 { 360 int i; 361 362 for (i = 0; i < 256; ++i) { 363 if (plo_test_bit(i)) 364 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7); 365 } 366 367 if (test_facility(28)) /* TOD-clock steering */ 368 ptff(kvm_s390_available_subfunc.ptff, 369 sizeof(kvm_s390_available_subfunc.ptff), 370 PTFF_QAF); 371 372 if (test_facility(17)) { /* MSA */ 373 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *) 374 kvm_s390_available_subfunc.kmac); 375 __cpacf_query(CPACF_KMC, (cpacf_mask_t *) 376 kvm_s390_available_subfunc.kmc); 377 __cpacf_query(CPACF_KM, (cpacf_mask_t *) 378 kvm_s390_available_subfunc.km); 379 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *) 380 kvm_s390_available_subfunc.kimd); 381 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *) 382 kvm_s390_available_subfunc.klmd); 383 } 384 if (test_facility(76)) /* MSA3 */ 385 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *) 386 kvm_s390_available_subfunc.pckmo); 387 if (test_facility(77)) { /* MSA4 */ 388 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *) 389 kvm_s390_available_subfunc.kmctr); 390 __cpacf_query(CPACF_KMF, (cpacf_mask_t *) 391 kvm_s390_available_subfunc.kmf); 392 __cpacf_query(CPACF_KMO, (cpacf_mask_t *) 393 kvm_s390_available_subfunc.kmo); 394 __cpacf_query(CPACF_PCC, (cpacf_mask_t *) 395 kvm_s390_available_subfunc.pcc); 396 } 397 if (test_facility(57)) /* MSA5 */ 398 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *) 399 kvm_s390_available_subfunc.ppno); 400 401 if (test_facility(146)) /* MSA8 */ 402 __cpacf_query(CPACF_KMA, (cpacf_mask_t *) 403 kvm_s390_available_subfunc.kma); 404 405 if (test_facility(155)) /* MSA9 */ 406 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *) 407 kvm_s390_available_subfunc.kdsa); 408 409 if (test_facility(150)) /* SORTL */ 410 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl); 411 412 if (test_facility(151)) /* DFLTCC */ 413 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc); 414 415 if (MACHINE_HAS_ESOP) 416 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP); 417 /* 418 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow), 419 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing). 420 */ 421 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao || 422 !test_facility(3) || !nested) 423 return; 424 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2); 425 if (sclp.has_64bscao) 426 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO); 427 if (sclp.has_siif) 428 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF); 429 if (sclp.has_gpere) 430 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE); 431 if (sclp.has_gsls) 432 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS); 433 if (sclp.has_ib) 434 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB); 435 if (sclp.has_cei) 436 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI); 437 if (sclp.has_ibs) 438 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS); 439 if (sclp.has_kss) 440 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS); 441 /* 442 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make 443 * all skey handling functions read/set the skey from the PGSTE 444 * instead of the real storage key. 445 * 446 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make 447 * pages being detected as preserved although they are resident. 448 * 449 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will 450 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY. 451 * 452 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and 453 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be 454 * correctly shadowed. We can do that for the PGSTE but not for PTE.I. 455 * 456 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We 457 * cannot easily shadow the SCA because of the ipte lock. 458 */ 459 } 460 461 static int __init __kvm_s390_init(void) 462 { 463 int rc = -ENOMEM; 464 465 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); 466 if (!kvm_s390_dbf) 467 return -ENOMEM; 468 469 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long)); 470 if (!kvm_s390_dbf_uv) 471 goto err_kvm_uv; 472 473 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) || 474 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view)) 475 goto err_debug_view; 476 477 kvm_s390_cpu_feat_init(); 478 479 /* Register floating interrupt controller interface. */ 480 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); 481 if (rc) { 482 pr_err("A FLIC registration call failed with rc=%d\n", rc); 483 goto err_flic; 484 } 485 486 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) { 487 rc = kvm_s390_pci_init(); 488 if (rc) { 489 pr_err("Unable to allocate AIFT for PCI\n"); 490 goto err_pci; 491 } 492 } 493 494 rc = kvm_s390_gib_init(GAL_ISC); 495 if (rc) 496 goto err_gib; 497 498 gmap_notifier.notifier_call = kvm_gmap_notifier; 499 gmap_register_pte_notifier(&gmap_notifier); 500 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier; 501 gmap_register_pte_notifier(&vsie_gmap_notifier); 502 atomic_notifier_chain_register(&s390_epoch_delta_notifier, 503 &kvm_clock_notifier); 504 505 return 0; 506 507 err_gib: 508 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) 509 kvm_s390_pci_exit(); 510 err_pci: 511 err_flic: 512 err_debug_view: 513 debug_unregister(kvm_s390_dbf_uv); 514 err_kvm_uv: 515 debug_unregister(kvm_s390_dbf); 516 return rc; 517 } 518 519 static void __kvm_s390_exit(void) 520 { 521 gmap_unregister_pte_notifier(&gmap_notifier); 522 gmap_unregister_pte_notifier(&vsie_gmap_notifier); 523 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier, 524 &kvm_clock_notifier); 525 526 kvm_s390_gib_destroy(); 527 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) 528 kvm_s390_pci_exit(); 529 debug_unregister(kvm_s390_dbf); 530 debug_unregister(kvm_s390_dbf_uv); 531 } 532 533 /* Section: device related */ 534 long kvm_arch_dev_ioctl(struct file *filp, 535 unsigned int ioctl, unsigned long arg) 536 { 537 if (ioctl == KVM_S390_ENABLE_SIE) 538 return s390_enable_sie(); 539 return -EINVAL; 540 } 541 542 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 543 { 544 int r; 545 546 switch (ext) { 547 case KVM_CAP_S390_PSW: 548 case KVM_CAP_S390_GMAP: 549 case KVM_CAP_SYNC_MMU: 550 #ifdef CONFIG_KVM_S390_UCONTROL 551 case KVM_CAP_S390_UCONTROL: 552 #endif 553 case KVM_CAP_ASYNC_PF: 554 case KVM_CAP_SYNC_REGS: 555 case KVM_CAP_ONE_REG: 556 case KVM_CAP_ENABLE_CAP: 557 case KVM_CAP_S390_CSS_SUPPORT: 558 case KVM_CAP_IOEVENTFD: 559 case KVM_CAP_DEVICE_CTRL: 560 case KVM_CAP_S390_IRQCHIP: 561 case KVM_CAP_VM_ATTRIBUTES: 562 case KVM_CAP_MP_STATE: 563 case KVM_CAP_IMMEDIATE_EXIT: 564 case KVM_CAP_S390_INJECT_IRQ: 565 case KVM_CAP_S390_USER_SIGP: 566 case KVM_CAP_S390_USER_STSI: 567 case KVM_CAP_S390_SKEYS: 568 case KVM_CAP_S390_IRQ_STATE: 569 case KVM_CAP_S390_USER_INSTR0: 570 case KVM_CAP_S390_CMMA_MIGRATION: 571 case KVM_CAP_S390_AIS: 572 case KVM_CAP_S390_AIS_MIGRATION: 573 case KVM_CAP_S390_VCPU_RESETS: 574 case KVM_CAP_SET_GUEST_DEBUG: 575 case KVM_CAP_S390_DIAG318: 576 case KVM_CAP_IRQFD_RESAMPLE: 577 r = 1; 578 break; 579 case KVM_CAP_SET_GUEST_DEBUG2: 580 r = KVM_GUESTDBG_VALID_MASK; 581 break; 582 case KVM_CAP_S390_HPAGE_1M: 583 r = 0; 584 if (hpage && !kvm_is_ucontrol(kvm)) 585 r = 1; 586 break; 587 case KVM_CAP_S390_MEM_OP: 588 r = MEM_OP_MAX_SIZE; 589 break; 590 case KVM_CAP_S390_MEM_OP_EXTENSION: 591 /* 592 * Flag bits indicating which extensions are supported. 593 * If r > 0, the base extension must also be supported/indicated, 594 * in order to maintain backwards compatibility. 595 */ 596 r = KVM_S390_MEMOP_EXTENSION_CAP_BASE | 597 KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG; 598 break; 599 case KVM_CAP_NR_VCPUS: 600 case KVM_CAP_MAX_VCPUS: 601 case KVM_CAP_MAX_VCPU_ID: 602 r = KVM_S390_BSCA_CPU_SLOTS; 603 if (!kvm_s390_use_sca_entries()) 604 r = KVM_MAX_VCPUS; 605 else if (sclp.has_esca && sclp.has_64bscao) 606 r = KVM_S390_ESCA_CPU_SLOTS; 607 if (ext == KVM_CAP_NR_VCPUS) 608 r = min_t(unsigned int, num_online_cpus(), r); 609 break; 610 case KVM_CAP_S390_COW: 611 r = MACHINE_HAS_ESOP; 612 break; 613 case KVM_CAP_S390_VECTOR_REGISTERS: 614 r = MACHINE_HAS_VX; 615 break; 616 case KVM_CAP_S390_RI: 617 r = test_facility(64); 618 break; 619 case KVM_CAP_S390_GS: 620 r = test_facility(133); 621 break; 622 case KVM_CAP_S390_BPB: 623 r = test_facility(82); 624 break; 625 case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE: 626 r = async_destroy && is_prot_virt_host(); 627 break; 628 case KVM_CAP_S390_PROTECTED: 629 r = is_prot_virt_host(); 630 break; 631 case KVM_CAP_S390_PROTECTED_DUMP: { 632 u64 pv_cmds_dump[] = { 633 BIT_UVC_CMD_DUMP_INIT, 634 BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE, 635 BIT_UVC_CMD_DUMP_CPU, 636 BIT_UVC_CMD_DUMP_COMPLETE, 637 }; 638 int i; 639 640 r = is_prot_virt_host(); 641 642 for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) { 643 if (!test_bit_inv(pv_cmds_dump[i], 644 (unsigned long *)&uv_info.inst_calls_list)) { 645 r = 0; 646 break; 647 } 648 } 649 break; 650 } 651 case KVM_CAP_S390_ZPCI_OP: 652 r = kvm_s390_pci_interp_allowed(); 653 break; 654 case KVM_CAP_S390_CPU_TOPOLOGY: 655 r = test_facility(11); 656 break; 657 default: 658 r = 0; 659 } 660 return r; 661 } 662 663 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 664 { 665 int i; 666 gfn_t cur_gfn, last_gfn; 667 unsigned long gaddr, vmaddr; 668 struct gmap *gmap = kvm->arch.gmap; 669 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES); 670 671 /* Loop over all guest segments */ 672 cur_gfn = memslot->base_gfn; 673 last_gfn = memslot->base_gfn + memslot->npages; 674 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) { 675 gaddr = gfn_to_gpa(cur_gfn); 676 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn); 677 if (kvm_is_error_hva(vmaddr)) 678 continue; 679 680 bitmap_zero(bitmap, _PAGE_ENTRIES); 681 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr); 682 for (i = 0; i < _PAGE_ENTRIES; i++) { 683 if (test_bit(i, bitmap)) 684 mark_page_dirty(kvm, cur_gfn + i); 685 } 686 687 if (fatal_signal_pending(current)) 688 return; 689 cond_resched(); 690 } 691 } 692 693 /* Section: vm related */ 694 static void sca_del_vcpu(struct kvm_vcpu *vcpu); 695 696 /* 697 * Get (and clear) the dirty memory log for a memory slot. 698 */ 699 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 700 struct kvm_dirty_log *log) 701 { 702 int r; 703 unsigned long n; 704 struct kvm_memory_slot *memslot; 705 int is_dirty; 706 707 if (kvm_is_ucontrol(kvm)) 708 return -EINVAL; 709 710 mutex_lock(&kvm->slots_lock); 711 712 r = -EINVAL; 713 if (log->slot >= KVM_USER_MEM_SLOTS) 714 goto out; 715 716 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); 717 if (r) 718 goto out; 719 720 /* Clear the dirty log */ 721 if (is_dirty) { 722 n = kvm_dirty_bitmap_bytes(memslot); 723 memset(memslot->dirty_bitmap, 0, n); 724 } 725 r = 0; 726 out: 727 mutex_unlock(&kvm->slots_lock); 728 return r; 729 } 730 731 static void icpt_operexc_on_all_vcpus(struct kvm *kvm) 732 { 733 unsigned long i; 734 struct kvm_vcpu *vcpu; 735 736 kvm_for_each_vcpu(i, vcpu, kvm) { 737 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu); 738 } 739 } 740 741 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) 742 { 743 int r; 744 745 if (cap->flags) 746 return -EINVAL; 747 748 switch (cap->cap) { 749 case KVM_CAP_S390_IRQCHIP: 750 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP"); 751 kvm->arch.use_irqchip = 1; 752 r = 0; 753 break; 754 case KVM_CAP_S390_USER_SIGP: 755 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP"); 756 kvm->arch.user_sigp = 1; 757 r = 0; 758 break; 759 case KVM_CAP_S390_VECTOR_REGISTERS: 760 mutex_lock(&kvm->lock); 761 if (kvm->created_vcpus) { 762 r = -EBUSY; 763 } else if (MACHINE_HAS_VX) { 764 set_kvm_facility(kvm->arch.model.fac_mask, 129); 765 set_kvm_facility(kvm->arch.model.fac_list, 129); 766 if (test_facility(134)) { 767 set_kvm_facility(kvm->arch.model.fac_mask, 134); 768 set_kvm_facility(kvm->arch.model.fac_list, 134); 769 } 770 if (test_facility(135)) { 771 set_kvm_facility(kvm->arch.model.fac_mask, 135); 772 set_kvm_facility(kvm->arch.model.fac_list, 135); 773 } 774 if (test_facility(148)) { 775 set_kvm_facility(kvm->arch.model.fac_mask, 148); 776 set_kvm_facility(kvm->arch.model.fac_list, 148); 777 } 778 if (test_facility(152)) { 779 set_kvm_facility(kvm->arch.model.fac_mask, 152); 780 set_kvm_facility(kvm->arch.model.fac_list, 152); 781 } 782 if (test_facility(192)) { 783 set_kvm_facility(kvm->arch.model.fac_mask, 192); 784 set_kvm_facility(kvm->arch.model.fac_list, 192); 785 } 786 r = 0; 787 } else 788 r = -EINVAL; 789 mutex_unlock(&kvm->lock); 790 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", 791 r ? "(not available)" : "(success)"); 792 break; 793 case KVM_CAP_S390_RI: 794 r = -EINVAL; 795 mutex_lock(&kvm->lock); 796 if (kvm->created_vcpus) { 797 r = -EBUSY; 798 } else if (test_facility(64)) { 799 set_kvm_facility(kvm->arch.model.fac_mask, 64); 800 set_kvm_facility(kvm->arch.model.fac_list, 64); 801 r = 0; 802 } 803 mutex_unlock(&kvm->lock); 804 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s", 805 r ? "(not available)" : "(success)"); 806 break; 807 case KVM_CAP_S390_AIS: 808 mutex_lock(&kvm->lock); 809 if (kvm->created_vcpus) { 810 r = -EBUSY; 811 } else { 812 set_kvm_facility(kvm->arch.model.fac_mask, 72); 813 set_kvm_facility(kvm->arch.model.fac_list, 72); 814 r = 0; 815 } 816 mutex_unlock(&kvm->lock); 817 VM_EVENT(kvm, 3, "ENABLE: AIS %s", 818 r ? "(not available)" : "(success)"); 819 break; 820 case KVM_CAP_S390_GS: 821 r = -EINVAL; 822 mutex_lock(&kvm->lock); 823 if (kvm->created_vcpus) { 824 r = -EBUSY; 825 } else if (test_facility(133)) { 826 set_kvm_facility(kvm->arch.model.fac_mask, 133); 827 set_kvm_facility(kvm->arch.model.fac_list, 133); 828 r = 0; 829 } 830 mutex_unlock(&kvm->lock); 831 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s", 832 r ? "(not available)" : "(success)"); 833 break; 834 case KVM_CAP_S390_HPAGE_1M: 835 mutex_lock(&kvm->lock); 836 if (kvm->created_vcpus) 837 r = -EBUSY; 838 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) 839 r = -EINVAL; 840 else { 841 r = 0; 842 mmap_write_lock(kvm->mm); 843 kvm->mm->context.allow_gmap_hpage_1m = 1; 844 mmap_write_unlock(kvm->mm); 845 /* 846 * We might have to create fake 4k page 847 * tables. To avoid that the hardware works on 848 * stale PGSTEs, we emulate these instructions. 849 */ 850 kvm->arch.use_skf = 0; 851 kvm->arch.use_pfmfi = 0; 852 } 853 mutex_unlock(&kvm->lock); 854 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s", 855 r ? "(not available)" : "(success)"); 856 break; 857 case KVM_CAP_S390_USER_STSI: 858 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI"); 859 kvm->arch.user_stsi = 1; 860 r = 0; 861 break; 862 case KVM_CAP_S390_USER_INSTR0: 863 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0"); 864 kvm->arch.user_instr0 = 1; 865 icpt_operexc_on_all_vcpus(kvm); 866 r = 0; 867 break; 868 case KVM_CAP_S390_CPU_TOPOLOGY: 869 r = -EINVAL; 870 mutex_lock(&kvm->lock); 871 if (kvm->created_vcpus) { 872 r = -EBUSY; 873 } else if (test_facility(11)) { 874 set_kvm_facility(kvm->arch.model.fac_mask, 11); 875 set_kvm_facility(kvm->arch.model.fac_list, 11); 876 r = 0; 877 } 878 mutex_unlock(&kvm->lock); 879 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s", 880 r ? "(not available)" : "(success)"); 881 break; 882 default: 883 r = -EINVAL; 884 break; 885 } 886 return r; 887 } 888 889 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 890 { 891 int ret; 892 893 switch (attr->attr) { 894 case KVM_S390_VM_MEM_LIMIT_SIZE: 895 ret = 0; 896 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", 897 kvm->arch.mem_limit); 898 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) 899 ret = -EFAULT; 900 break; 901 default: 902 ret = -ENXIO; 903 break; 904 } 905 return ret; 906 } 907 908 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 909 { 910 int ret; 911 unsigned int idx; 912 switch (attr->attr) { 913 case KVM_S390_VM_MEM_ENABLE_CMMA: 914 ret = -ENXIO; 915 if (!sclp.has_cmma) 916 break; 917 918 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support"); 919 mutex_lock(&kvm->lock); 920 if (kvm->created_vcpus) 921 ret = -EBUSY; 922 else if (kvm->mm->context.allow_gmap_hpage_1m) 923 ret = -EINVAL; 924 else { 925 kvm->arch.use_cmma = 1; 926 /* Not compatible with cmma. */ 927 kvm->arch.use_pfmfi = 0; 928 ret = 0; 929 } 930 mutex_unlock(&kvm->lock); 931 break; 932 case KVM_S390_VM_MEM_CLR_CMMA: 933 ret = -ENXIO; 934 if (!sclp.has_cmma) 935 break; 936 ret = -EINVAL; 937 if (!kvm->arch.use_cmma) 938 break; 939 940 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states"); 941 mutex_lock(&kvm->lock); 942 idx = srcu_read_lock(&kvm->srcu); 943 s390_reset_cmma(kvm->arch.gmap->mm); 944 srcu_read_unlock(&kvm->srcu, idx); 945 mutex_unlock(&kvm->lock); 946 ret = 0; 947 break; 948 case KVM_S390_VM_MEM_LIMIT_SIZE: { 949 unsigned long new_limit; 950 951 if (kvm_is_ucontrol(kvm)) 952 return -EINVAL; 953 954 if (get_user(new_limit, (u64 __user *)attr->addr)) 955 return -EFAULT; 956 957 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && 958 new_limit > kvm->arch.mem_limit) 959 return -E2BIG; 960 961 if (!new_limit) 962 return -EINVAL; 963 964 /* gmap_create takes last usable address */ 965 if (new_limit != KVM_S390_NO_MEM_LIMIT) 966 new_limit -= 1; 967 968 ret = -EBUSY; 969 mutex_lock(&kvm->lock); 970 if (!kvm->created_vcpus) { 971 /* gmap_create will round the limit up */ 972 struct gmap *new = gmap_create(current->mm, new_limit); 973 974 if (!new) { 975 ret = -ENOMEM; 976 } else { 977 gmap_remove(kvm->arch.gmap); 978 new->private = kvm; 979 kvm->arch.gmap = new; 980 ret = 0; 981 } 982 } 983 mutex_unlock(&kvm->lock); 984 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit); 985 VM_EVENT(kvm, 3, "New guest asce: 0x%pK", 986 (void *) kvm->arch.gmap->asce); 987 break; 988 } 989 default: 990 ret = -ENXIO; 991 break; 992 } 993 return ret; 994 } 995 996 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu); 997 998 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) 999 { 1000 struct kvm_vcpu *vcpu; 1001 unsigned long i; 1002 1003 kvm_s390_vcpu_block_all(kvm); 1004 1005 kvm_for_each_vcpu(i, vcpu, kvm) { 1006 kvm_s390_vcpu_crypto_setup(vcpu); 1007 /* recreate the shadow crycb by leaving the VSIE handler */ 1008 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu); 1009 } 1010 1011 kvm_s390_vcpu_unblock_all(kvm); 1012 } 1013 1014 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) 1015 { 1016 mutex_lock(&kvm->lock); 1017 switch (attr->attr) { 1018 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: 1019 if (!test_kvm_facility(kvm, 76)) { 1020 mutex_unlock(&kvm->lock); 1021 return -EINVAL; 1022 } 1023 get_random_bytes( 1024 kvm->arch.crypto.crycb->aes_wrapping_key_mask, 1025 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 1026 kvm->arch.crypto.aes_kw = 1; 1027 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support"); 1028 break; 1029 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: 1030 if (!test_kvm_facility(kvm, 76)) { 1031 mutex_unlock(&kvm->lock); 1032 return -EINVAL; 1033 } 1034 get_random_bytes( 1035 kvm->arch.crypto.crycb->dea_wrapping_key_mask, 1036 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 1037 kvm->arch.crypto.dea_kw = 1; 1038 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support"); 1039 break; 1040 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: 1041 if (!test_kvm_facility(kvm, 76)) { 1042 mutex_unlock(&kvm->lock); 1043 return -EINVAL; 1044 } 1045 kvm->arch.crypto.aes_kw = 0; 1046 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, 1047 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 1048 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support"); 1049 break; 1050 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: 1051 if (!test_kvm_facility(kvm, 76)) { 1052 mutex_unlock(&kvm->lock); 1053 return -EINVAL; 1054 } 1055 kvm->arch.crypto.dea_kw = 0; 1056 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, 1057 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 1058 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support"); 1059 break; 1060 case KVM_S390_VM_CRYPTO_ENABLE_APIE: 1061 if (!ap_instructions_available()) { 1062 mutex_unlock(&kvm->lock); 1063 return -EOPNOTSUPP; 1064 } 1065 kvm->arch.crypto.apie = 1; 1066 break; 1067 case KVM_S390_VM_CRYPTO_DISABLE_APIE: 1068 if (!ap_instructions_available()) { 1069 mutex_unlock(&kvm->lock); 1070 return -EOPNOTSUPP; 1071 } 1072 kvm->arch.crypto.apie = 0; 1073 break; 1074 default: 1075 mutex_unlock(&kvm->lock); 1076 return -ENXIO; 1077 } 1078 1079 kvm_s390_vcpu_crypto_reset_all(kvm); 1080 mutex_unlock(&kvm->lock); 1081 return 0; 1082 } 1083 1084 static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu) 1085 { 1086 /* Only set the ECB bits after guest requests zPCI interpretation */ 1087 if (!vcpu->kvm->arch.use_zpci_interp) 1088 return; 1089 1090 vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI; 1091 vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI; 1092 } 1093 1094 void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm) 1095 { 1096 struct kvm_vcpu *vcpu; 1097 unsigned long i; 1098 1099 lockdep_assert_held(&kvm->lock); 1100 1101 if (!kvm_s390_pci_interp_allowed()) 1102 return; 1103 1104 /* 1105 * If host is configured for PCI and the necessary facilities are 1106 * available, turn on interpretation for the life of this guest 1107 */ 1108 kvm->arch.use_zpci_interp = 1; 1109 1110 kvm_s390_vcpu_block_all(kvm); 1111 1112 kvm_for_each_vcpu(i, vcpu, kvm) { 1113 kvm_s390_vcpu_pci_setup(vcpu); 1114 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu); 1115 } 1116 1117 kvm_s390_vcpu_unblock_all(kvm); 1118 } 1119 1120 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req) 1121 { 1122 unsigned long cx; 1123 struct kvm_vcpu *vcpu; 1124 1125 kvm_for_each_vcpu(cx, vcpu, kvm) 1126 kvm_s390_sync_request(req, vcpu); 1127 } 1128 1129 /* 1130 * Must be called with kvm->srcu held to avoid races on memslots, and with 1131 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration. 1132 */ 1133 static int kvm_s390_vm_start_migration(struct kvm *kvm) 1134 { 1135 struct kvm_memory_slot *ms; 1136 struct kvm_memslots *slots; 1137 unsigned long ram_pages = 0; 1138 int bkt; 1139 1140 /* migration mode already enabled */ 1141 if (kvm->arch.migration_mode) 1142 return 0; 1143 slots = kvm_memslots(kvm); 1144 if (!slots || kvm_memslots_empty(slots)) 1145 return -EINVAL; 1146 1147 if (!kvm->arch.use_cmma) { 1148 kvm->arch.migration_mode = 1; 1149 return 0; 1150 } 1151 /* mark all the pages in active slots as dirty */ 1152 kvm_for_each_memslot(ms, bkt, slots) { 1153 if (!ms->dirty_bitmap) 1154 return -EINVAL; 1155 /* 1156 * The second half of the bitmap is only used on x86, 1157 * and would be wasted otherwise, so we put it to good 1158 * use here to keep track of the state of the storage 1159 * attributes. 1160 */ 1161 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms)); 1162 ram_pages += ms->npages; 1163 } 1164 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); 1165 kvm->arch.migration_mode = 1; 1166 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION); 1167 return 0; 1168 } 1169 1170 /* 1171 * Must be called with kvm->slots_lock to avoid races with ourselves and 1172 * kvm_s390_vm_start_migration. 1173 */ 1174 static int kvm_s390_vm_stop_migration(struct kvm *kvm) 1175 { 1176 /* migration mode already disabled */ 1177 if (!kvm->arch.migration_mode) 1178 return 0; 1179 kvm->arch.migration_mode = 0; 1180 if (kvm->arch.use_cmma) 1181 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); 1182 return 0; 1183 } 1184 1185 static int kvm_s390_vm_set_migration(struct kvm *kvm, 1186 struct kvm_device_attr *attr) 1187 { 1188 int res = -ENXIO; 1189 1190 mutex_lock(&kvm->slots_lock); 1191 switch (attr->attr) { 1192 case KVM_S390_VM_MIGRATION_START: 1193 res = kvm_s390_vm_start_migration(kvm); 1194 break; 1195 case KVM_S390_VM_MIGRATION_STOP: 1196 res = kvm_s390_vm_stop_migration(kvm); 1197 break; 1198 default: 1199 break; 1200 } 1201 mutex_unlock(&kvm->slots_lock); 1202 1203 return res; 1204 } 1205 1206 static int kvm_s390_vm_get_migration(struct kvm *kvm, 1207 struct kvm_device_attr *attr) 1208 { 1209 u64 mig = kvm->arch.migration_mode; 1210 1211 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS) 1212 return -ENXIO; 1213 1214 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig))) 1215 return -EFAULT; 1216 return 0; 1217 } 1218 1219 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod); 1220 1221 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) 1222 { 1223 struct kvm_s390_vm_tod_clock gtod; 1224 1225 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) 1226 return -EFAULT; 1227 1228 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) 1229 return -EINVAL; 1230 __kvm_s390_set_tod_clock(kvm, >od); 1231 1232 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", 1233 gtod.epoch_idx, gtod.tod); 1234 1235 return 0; 1236 } 1237 1238 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) 1239 { 1240 u8 gtod_high; 1241 1242 if (copy_from_user(>od_high, (void __user *)attr->addr, 1243 sizeof(gtod_high))) 1244 return -EFAULT; 1245 1246 if (gtod_high != 0) 1247 return -EINVAL; 1248 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high); 1249 1250 return 0; 1251 } 1252 1253 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) 1254 { 1255 struct kvm_s390_vm_tod_clock gtod = { 0 }; 1256 1257 if (copy_from_user(>od.tod, (void __user *)attr->addr, 1258 sizeof(gtod.tod))) 1259 return -EFAULT; 1260 1261 __kvm_s390_set_tod_clock(kvm, >od); 1262 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); 1263 return 0; 1264 } 1265 1266 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) 1267 { 1268 int ret; 1269 1270 if (attr->flags) 1271 return -EINVAL; 1272 1273 mutex_lock(&kvm->lock); 1274 /* 1275 * For protected guests, the TOD is managed by the ultravisor, so trying 1276 * to change it will never bring the expected results. 1277 */ 1278 if (kvm_s390_pv_is_protected(kvm)) { 1279 ret = -EOPNOTSUPP; 1280 goto out_unlock; 1281 } 1282 1283 switch (attr->attr) { 1284 case KVM_S390_VM_TOD_EXT: 1285 ret = kvm_s390_set_tod_ext(kvm, attr); 1286 break; 1287 case KVM_S390_VM_TOD_HIGH: 1288 ret = kvm_s390_set_tod_high(kvm, attr); 1289 break; 1290 case KVM_S390_VM_TOD_LOW: 1291 ret = kvm_s390_set_tod_low(kvm, attr); 1292 break; 1293 default: 1294 ret = -ENXIO; 1295 break; 1296 } 1297 1298 out_unlock: 1299 mutex_unlock(&kvm->lock); 1300 return ret; 1301 } 1302 1303 static void kvm_s390_get_tod_clock(struct kvm *kvm, 1304 struct kvm_s390_vm_tod_clock *gtod) 1305 { 1306 union tod_clock clk; 1307 1308 preempt_disable(); 1309 1310 store_tod_clock_ext(&clk); 1311 1312 gtod->tod = clk.tod + kvm->arch.epoch; 1313 gtod->epoch_idx = 0; 1314 if (test_kvm_facility(kvm, 139)) { 1315 gtod->epoch_idx = clk.ei + kvm->arch.epdx; 1316 if (gtod->tod < clk.tod) 1317 gtod->epoch_idx += 1; 1318 } 1319 1320 preempt_enable(); 1321 } 1322 1323 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) 1324 { 1325 struct kvm_s390_vm_tod_clock gtod; 1326 1327 memset(>od, 0, sizeof(gtod)); 1328 kvm_s390_get_tod_clock(kvm, >od); 1329 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) 1330 return -EFAULT; 1331 1332 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx", 1333 gtod.epoch_idx, gtod.tod); 1334 return 0; 1335 } 1336 1337 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) 1338 { 1339 u8 gtod_high = 0; 1340 1341 if (copy_to_user((void __user *)attr->addr, >od_high, 1342 sizeof(gtod_high))) 1343 return -EFAULT; 1344 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high); 1345 1346 return 0; 1347 } 1348 1349 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) 1350 { 1351 u64 gtod; 1352 1353 gtod = kvm_s390_get_tod_clock_fast(kvm); 1354 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) 1355 return -EFAULT; 1356 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod); 1357 1358 return 0; 1359 } 1360 1361 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) 1362 { 1363 int ret; 1364 1365 if (attr->flags) 1366 return -EINVAL; 1367 1368 switch (attr->attr) { 1369 case KVM_S390_VM_TOD_EXT: 1370 ret = kvm_s390_get_tod_ext(kvm, attr); 1371 break; 1372 case KVM_S390_VM_TOD_HIGH: 1373 ret = kvm_s390_get_tod_high(kvm, attr); 1374 break; 1375 case KVM_S390_VM_TOD_LOW: 1376 ret = kvm_s390_get_tod_low(kvm, attr); 1377 break; 1378 default: 1379 ret = -ENXIO; 1380 break; 1381 } 1382 return ret; 1383 } 1384 1385 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) 1386 { 1387 struct kvm_s390_vm_cpu_processor *proc; 1388 u16 lowest_ibc, unblocked_ibc; 1389 int ret = 0; 1390 1391 mutex_lock(&kvm->lock); 1392 if (kvm->created_vcpus) { 1393 ret = -EBUSY; 1394 goto out; 1395 } 1396 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT); 1397 if (!proc) { 1398 ret = -ENOMEM; 1399 goto out; 1400 } 1401 if (!copy_from_user(proc, (void __user *)attr->addr, 1402 sizeof(*proc))) { 1403 kvm->arch.model.cpuid = proc->cpuid; 1404 lowest_ibc = sclp.ibc >> 16 & 0xfff; 1405 unblocked_ibc = sclp.ibc & 0xfff; 1406 if (lowest_ibc && proc->ibc) { 1407 if (proc->ibc > unblocked_ibc) 1408 kvm->arch.model.ibc = unblocked_ibc; 1409 else if (proc->ibc < lowest_ibc) 1410 kvm->arch.model.ibc = lowest_ibc; 1411 else 1412 kvm->arch.model.ibc = proc->ibc; 1413 } 1414 memcpy(kvm->arch.model.fac_list, proc->fac_list, 1415 S390_ARCH_FAC_LIST_SIZE_BYTE); 1416 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx", 1417 kvm->arch.model.ibc, 1418 kvm->arch.model.cpuid); 1419 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx", 1420 kvm->arch.model.fac_list[0], 1421 kvm->arch.model.fac_list[1], 1422 kvm->arch.model.fac_list[2]); 1423 } else 1424 ret = -EFAULT; 1425 kfree(proc); 1426 out: 1427 mutex_unlock(&kvm->lock); 1428 return ret; 1429 } 1430 1431 static int kvm_s390_set_processor_feat(struct kvm *kvm, 1432 struct kvm_device_attr *attr) 1433 { 1434 struct kvm_s390_vm_cpu_feat data; 1435 1436 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data))) 1437 return -EFAULT; 1438 if (!bitmap_subset((unsigned long *) data.feat, 1439 kvm_s390_available_cpu_feat, 1440 KVM_S390_VM_CPU_FEAT_NR_BITS)) 1441 return -EINVAL; 1442 1443 mutex_lock(&kvm->lock); 1444 if (kvm->created_vcpus) { 1445 mutex_unlock(&kvm->lock); 1446 return -EBUSY; 1447 } 1448 bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS); 1449 mutex_unlock(&kvm->lock); 1450 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", 1451 data.feat[0], 1452 data.feat[1], 1453 data.feat[2]); 1454 return 0; 1455 } 1456 1457 static int kvm_s390_set_processor_subfunc(struct kvm *kvm, 1458 struct kvm_device_attr *attr) 1459 { 1460 mutex_lock(&kvm->lock); 1461 if (kvm->created_vcpus) { 1462 mutex_unlock(&kvm->lock); 1463 return -EBUSY; 1464 } 1465 1466 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, 1467 sizeof(struct kvm_s390_vm_cpu_subfunc))) { 1468 mutex_unlock(&kvm->lock); 1469 return -EFAULT; 1470 } 1471 mutex_unlock(&kvm->lock); 1472 1473 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", 1474 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], 1475 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], 1476 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], 1477 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); 1478 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx", 1479 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], 1480 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); 1481 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx", 1482 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], 1483 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); 1484 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx", 1485 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], 1486 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); 1487 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx", 1488 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], 1489 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); 1490 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx", 1491 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], 1492 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); 1493 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx", 1494 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], 1495 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); 1496 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx", 1497 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], 1498 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); 1499 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx", 1500 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], 1501 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); 1502 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx", 1503 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], 1504 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); 1505 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx", 1506 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], 1507 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); 1508 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx", 1509 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], 1510 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); 1511 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx", 1512 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], 1513 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); 1514 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx", 1515 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], 1516 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); 1517 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx", 1518 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], 1519 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); 1520 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", 1521 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], 1522 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], 1523 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], 1524 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); 1525 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", 1526 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], 1527 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], 1528 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], 1529 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); 1530 1531 return 0; 1532 } 1533 1534 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) 1535 { 1536 int ret = -ENXIO; 1537 1538 switch (attr->attr) { 1539 case KVM_S390_VM_CPU_PROCESSOR: 1540 ret = kvm_s390_set_processor(kvm, attr); 1541 break; 1542 case KVM_S390_VM_CPU_PROCESSOR_FEAT: 1543 ret = kvm_s390_set_processor_feat(kvm, attr); 1544 break; 1545 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC: 1546 ret = kvm_s390_set_processor_subfunc(kvm, attr); 1547 break; 1548 } 1549 return ret; 1550 } 1551 1552 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) 1553 { 1554 struct kvm_s390_vm_cpu_processor *proc; 1555 int ret = 0; 1556 1557 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT); 1558 if (!proc) { 1559 ret = -ENOMEM; 1560 goto out; 1561 } 1562 proc->cpuid = kvm->arch.model.cpuid; 1563 proc->ibc = kvm->arch.model.ibc; 1564 memcpy(&proc->fac_list, kvm->arch.model.fac_list, 1565 S390_ARCH_FAC_LIST_SIZE_BYTE); 1566 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx", 1567 kvm->arch.model.ibc, 1568 kvm->arch.model.cpuid); 1569 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx", 1570 kvm->arch.model.fac_list[0], 1571 kvm->arch.model.fac_list[1], 1572 kvm->arch.model.fac_list[2]); 1573 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) 1574 ret = -EFAULT; 1575 kfree(proc); 1576 out: 1577 return ret; 1578 } 1579 1580 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) 1581 { 1582 struct kvm_s390_vm_cpu_machine *mach; 1583 int ret = 0; 1584 1585 mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT); 1586 if (!mach) { 1587 ret = -ENOMEM; 1588 goto out; 1589 } 1590 get_cpu_id((struct cpuid *) &mach->cpuid); 1591 mach->ibc = sclp.ibc; 1592 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, 1593 S390_ARCH_FAC_LIST_SIZE_BYTE); 1594 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list, 1595 sizeof(stfle_fac_list)); 1596 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx", 1597 kvm->arch.model.ibc, 1598 kvm->arch.model.cpuid); 1599 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx", 1600 mach->fac_mask[0], 1601 mach->fac_mask[1], 1602 mach->fac_mask[2]); 1603 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx", 1604 mach->fac_list[0], 1605 mach->fac_list[1], 1606 mach->fac_list[2]); 1607 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) 1608 ret = -EFAULT; 1609 kfree(mach); 1610 out: 1611 return ret; 1612 } 1613 1614 static int kvm_s390_get_processor_feat(struct kvm *kvm, 1615 struct kvm_device_attr *attr) 1616 { 1617 struct kvm_s390_vm_cpu_feat data; 1618 1619 bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); 1620 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) 1621 return -EFAULT; 1622 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", 1623 data.feat[0], 1624 data.feat[1], 1625 data.feat[2]); 1626 return 0; 1627 } 1628 1629 static int kvm_s390_get_machine_feat(struct kvm *kvm, 1630 struct kvm_device_attr *attr) 1631 { 1632 struct kvm_s390_vm_cpu_feat data; 1633 1634 bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); 1635 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) 1636 return -EFAULT; 1637 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", 1638 data.feat[0], 1639 data.feat[1], 1640 data.feat[2]); 1641 return 0; 1642 } 1643 1644 static int kvm_s390_get_processor_subfunc(struct kvm *kvm, 1645 struct kvm_device_attr *attr) 1646 { 1647 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, 1648 sizeof(struct kvm_s390_vm_cpu_subfunc))) 1649 return -EFAULT; 1650 1651 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", 1652 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], 1653 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], 1654 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], 1655 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); 1656 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx", 1657 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], 1658 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); 1659 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx", 1660 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], 1661 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); 1662 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx", 1663 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], 1664 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); 1665 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx", 1666 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], 1667 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); 1668 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx", 1669 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], 1670 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); 1671 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx", 1672 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], 1673 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); 1674 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx", 1675 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], 1676 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); 1677 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx", 1678 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], 1679 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); 1680 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx", 1681 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], 1682 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); 1683 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx", 1684 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], 1685 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); 1686 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx", 1687 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], 1688 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); 1689 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx", 1690 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], 1691 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); 1692 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx", 1693 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], 1694 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); 1695 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx", 1696 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], 1697 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); 1698 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", 1699 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], 1700 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], 1701 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], 1702 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); 1703 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", 1704 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], 1705 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], 1706 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], 1707 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); 1708 1709 return 0; 1710 } 1711 1712 static int kvm_s390_get_machine_subfunc(struct kvm *kvm, 1713 struct kvm_device_attr *attr) 1714 { 1715 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc, 1716 sizeof(struct kvm_s390_vm_cpu_subfunc))) 1717 return -EFAULT; 1718 1719 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", 1720 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0], 1721 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1], 1722 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2], 1723 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]); 1724 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx", 1725 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0], 1726 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]); 1727 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx", 1728 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0], 1729 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]); 1730 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx", 1731 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0], 1732 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]); 1733 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx", 1734 ((unsigned long *) &kvm_s390_available_subfunc.km)[0], 1735 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]); 1736 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx", 1737 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0], 1738 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]); 1739 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx", 1740 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0], 1741 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]); 1742 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx", 1743 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0], 1744 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]); 1745 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx", 1746 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0], 1747 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]); 1748 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx", 1749 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0], 1750 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]); 1751 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx", 1752 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0], 1753 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]); 1754 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx", 1755 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0], 1756 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]); 1757 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx", 1758 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0], 1759 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]); 1760 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx", 1761 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0], 1762 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]); 1763 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx", 1764 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0], 1765 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]); 1766 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", 1767 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0], 1768 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1], 1769 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2], 1770 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]); 1771 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", 1772 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0], 1773 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1], 1774 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2], 1775 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]); 1776 1777 return 0; 1778 } 1779 1780 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) 1781 { 1782 int ret = -ENXIO; 1783 1784 switch (attr->attr) { 1785 case KVM_S390_VM_CPU_PROCESSOR: 1786 ret = kvm_s390_get_processor(kvm, attr); 1787 break; 1788 case KVM_S390_VM_CPU_MACHINE: 1789 ret = kvm_s390_get_machine(kvm, attr); 1790 break; 1791 case KVM_S390_VM_CPU_PROCESSOR_FEAT: 1792 ret = kvm_s390_get_processor_feat(kvm, attr); 1793 break; 1794 case KVM_S390_VM_CPU_MACHINE_FEAT: 1795 ret = kvm_s390_get_machine_feat(kvm, attr); 1796 break; 1797 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC: 1798 ret = kvm_s390_get_processor_subfunc(kvm, attr); 1799 break; 1800 case KVM_S390_VM_CPU_MACHINE_SUBFUNC: 1801 ret = kvm_s390_get_machine_subfunc(kvm, attr); 1802 break; 1803 } 1804 return ret; 1805 } 1806 1807 /** 1808 * kvm_s390_update_topology_change_report - update CPU topology change report 1809 * @kvm: guest KVM description 1810 * @val: set or clear the MTCR bit 1811 * 1812 * Updates the Multiprocessor Topology-Change-Report bit to signal 1813 * the guest with a topology change. 1814 * This is only relevant if the topology facility is present. 1815 * 1816 * The SCA version, bsca or esca, doesn't matter as offset is the same. 1817 */ 1818 static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val) 1819 { 1820 union sca_utility new, old; 1821 struct bsca_block *sca; 1822 1823 read_lock(&kvm->arch.sca_lock); 1824 sca = kvm->arch.sca; 1825 do { 1826 old = READ_ONCE(sca->utility); 1827 new = old; 1828 new.mtcr = val; 1829 } while (cmpxchg(&sca->utility.val, old.val, new.val) != old.val); 1830 read_unlock(&kvm->arch.sca_lock); 1831 } 1832 1833 static int kvm_s390_set_topo_change_indication(struct kvm *kvm, 1834 struct kvm_device_attr *attr) 1835 { 1836 if (!test_kvm_facility(kvm, 11)) 1837 return -ENXIO; 1838 1839 kvm_s390_update_topology_change_report(kvm, !!attr->attr); 1840 return 0; 1841 } 1842 1843 static int kvm_s390_get_topo_change_indication(struct kvm *kvm, 1844 struct kvm_device_attr *attr) 1845 { 1846 u8 topo; 1847 1848 if (!test_kvm_facility(kvm, 11)) 1849 return -ENXIO; 1850 1851 read_lock(&kvm->arch.sca_lock); 1852 topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr; 1853 read_unlock(&kvm->arch.sca_lock); 1854 1855 return put_user(topo, (u8 __user *)attr->addr); 1856 } 1857 1858 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) 1859 { 1860 int ret; 1861 1862 switch (attr->group) { 1863 case KVM_S390_VM_MEM_CTRL: 1864 ret = kvm_s390_set_mem_control(kvm, attr); 1865 break; 1866 case KVM_S390_VM_TOD: 1867 ret = kvm_s390_set_tod(kvm, attr); 1868 break; 1869 case KVM_S390_VM_CPU_MODEL: 1870 ret = kvm_s390_set_cpu_model(kvm, attr); 1871 break; 1872 case KVM_S390_VM_CRYPTO: 1873 ret = kvm_s390_vm_set_crypto(kvm, attr); 1874 break; 1875 case KVM_S390_VM_MIGRATION: 1876 ret = kvm_s390_vm_set_migration(kvm, attr); 1877 break; 1878 case KVM_S390_VM_CPU_TOPOLOGY: 1879 ret = kvm_s390_set_topo_change_indication(kvm, attr); 1880 break; 1881 default: 1882 ret = -ENXIO; 1883 break; 1884 } 1885 1886 return ret; 1887 } 1888 1889 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) 1890 { 1891 int ret; 1892 1893 switch (attr->group) { 1894 case KVM_S390_VM_MEM_CTRL: 1895 ret = kvm_s390_get_mem_control(kvm, attr); 1896 break; 1897 case KVM_S390_VM_TOD: 1898 ret = kvm_s390_get_tod(kvm, attr); 1899 break; 1900 case KVM_S390_VM_CPU_MODEL: 1901 ret = kvm_s390_get_cpu_model(kvm, attr); 1902 break; 1903 case KVM_S390_VM_MIGRATION: 1904 ret = kvm_s390_vm_get_migration(kvm, attr); 1905 break; 1906 case KVM_S390_VM_CPU_TOPOLOGY: 1907 ret = kvm_s390_get_topo_change_indication(kvm, attr); 1908 break; 1909 default: 1910 ret = -ENXIO; 1911 break; 1912 } 1913 1914 return ret; 1915 } 1916 1917 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) 1918 { 1919 int ret; 1920 1921 switch (attr->group) { 1922 case KVM_S390_VM_MEM_CTRL: 1923 switch (attr->attr) { 1924 case KVM_S390_VM_MEM_ENABLE_CMMA: 1925 case KVM_S390_VM_MEM_CLR_CMMA: 1926 ret = sclp.has_cmma ? 0 : -ENXIO; 1927 break; 1928 case KVM_S390_VM_MEM_LIMIT_SIZE: 1929 ret = 0; 1930 break; 1931 default: 1932 ret = -ENXIO; 1933 break; 1934 } 1935 break; 1936 case KVM_S390_VM_TOD: 1937 switch (attr->attr) { 1938 case KVM_S390_VM_TOD_LOW: 1939 case KVM_S390_VM_TOD_HIGH: 1940 ret = 0; 1941 break; 1942 default: 1943 ret = -ENXIO; 1944 break; 1945 } 1946 break; 1947 case KVM_S390_VM_CPU_MODEL: 1948 switch (attr->attr) { 1949 case KVM_S390_VM_CPU_PROCESSOR: 1950 case KVM_S390_VM_CPU_MACHINE: 1951 case KVM_S390_VM_CPU_PROCESSOR_FEAT: 1952 case KVM_S390_VM_CPU_MACHINE_FEAT: 1953 case KVM_S390_VM_CPU_MACHINE_SUBFUNC: 1954 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC: 1955 ret = 0; 1956 break; 1957 default: 1958 ret = -ENXIO; 1959 break; 1960 } 1961 break; 1962 case KVM_S390_VM_CRYPTO: 1963 switch (attr->attr) { 1964 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: 1965 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: 1966 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: 1967 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: 1968 ret = 0; 1969 break; 1970 case KVM_S390_VM_CRYPTO_ENABLE_APIE: 1971 case KVM_S390_VM_CRYPTO_DISABLE_APIE: 1972 ret = ap_instructions_available() ? 0 : -ENXIO; 1973 break; 1974 default: 1975 ret = -ENXIO; 1976 break; 1977 } 1978 break; 1979 case KVM_S390_VM_MIGRATION: 1980 ret = 0; 1981 break; 1982 case KVM_S390_VM_CPU_TOPOLOGY: 1983 ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO; 1984 break; 1985 default: 1986 ret = -ENXIO; 1987 break; 1988 } 1989 1990 return ret; 1991 } 1992 1993 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) 1994 { 1995 uint8_t *keys; 1996 uint64_t hva; 1997 int srcu_idx, i, r = 0; 1998 1999 if (args->flags != 0) 2000 return -EINVAL; 2001 2002 /* Is this guest using storage keys? */ 2003 if (!mm_uses_skeys(current->mm)) 2004 return KVM_S390_GET_SKEYS_NONE; 2005 2006 /* Enforce sane limit on memory allocation */ 2007 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) 2008 return -EINVAL; 2009 2010 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT); 2011 if (!keys) 2012 return -ENOMEM; 2013 2014 mmap_read_lock(current->mm); 2015 srcu_idx = srcu_read_lock(&kvm->srcu); 2016 for (i = 0; i < args->count; i++) { 2017 hva = gfn_to_hva(kvm, args->start_gfn + i); 2018 if (kvm_is_error_hva(hva)) { 2019 r = -EFAULT; 2020 break; 2021 } 2022 2023 r = get_guest_storage_key(current->mm, hva, &keys[i]); 2024 if (r) 2025 break; 2026 } 2027 srcu_read_unlock(&kvm->srcu, srcu_idx); 2028 mmap_read_unlock(current->mm); 2029 2030 if (!r) { 2031 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, 2032 sizeof(uint8_t) * args->count); 2033 if (r) 2034 r = -EFAULT; 2035 } 2036 2037 kvfree(keys); 2038 return r; 2039 } 2040 2041 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) 2042 { 2043 uint8_t *keys; 2044 uint64_t hva; 2045 int srcu_idx, i, r = 0; 2046 bool unlocked; 2047 2048 if (args->flags != 0) 2049 return -EINVAL; 2050 2051 /* Enforce sane limit on memory allocation */ 2052 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) 2053 return -EINVAL; 2054 2055 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT); 2056 if (!keys) 2057 return -ENOMEM; 2058 2059 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, 2060 sizeof(uint8_t) * args->count); 2061 if (r) { 2062 r = -EFAULT; 2063 goto out; 2064 } 2065 2066 /* Enable storage key handling for the guest */ 2067 r = s390_enable_skey(); 2068 if (r) 2069 goto out; 2070 2071 i = 0; 2072 mmap_read_lock(current->mm); 2073 srcu_idx = srcu_read_lock(&kvm->srcu); 2074 while (i < args->count) { 2075 unlocked = false; 2076 hva = gfn_to_hva(kvm, args->start_gfn + i); 2077 if (kvm_is_error_hva(hva)) { 2078 r = -EFAULT; 2079 break; 2080 } 2081 2082 /* Lowest order bit is reserved */ 2083 if (keys[i] & 0x01) { 2084 r = -EINVAL; 2085 break; 2086 } 2087 2088 r = set_guest_storage_key(current->mm, hva, keys[i], 0); 2089 if (r) { 2090 r = fixup_user_fault(current->mm, hva, 2091 FAULT_FLAG_WRITE, &unlocked); 2092 if (r) 2093 break; 2094 } 2095 if (!r) 2096 i++; 2097 } 2098 srcu_read_unlock(&kvm->srcu, srcu_idx); 2099 mmap_read_unlock(current->mm); 2100 out: 2101 kvfree(keys); 2102 return r; 2103 } 2104 2105 /* 2106 * Base address and length must be sent at the start of each block, therefore 2107 * it's cheaper to send some clean data, as long as it's less than the size of 2108 * two longs. 2109 */ 2110 #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *)) 2111 /* for consistency */ 2112 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX) 2113 2114 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, 2115 u8 *res, unsigned long bufsize) 2116 { 2117 unsigned long pgstev, hva, cur_gfn = args->start_gfn; 2118 2119 args->count = 0; 2120 while (args->count < bufsize) { 2121 hva = gfn_to_hva(kvm, cur_gfn); 2122 /* 2123 * We return an error if the first value was invalid, but we 2124 * return successfully if at least one value was copied. 2125 */ 2126 if (kvm_is_error_hva(hva)) 2127 return args->count ? 0 : -EFAULT; 2128 if (get_pgste(kvm->mm, hva, &pgstev) < 0) 2129 pgstev = 0; 2130 res[args->count++] = (pgstev >> 24) & 0x43; 2131 cur_gfn++; 2132 } 2133 2134 return 0; 2135 } 2136 2137 static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots, 2138 gfn_t gfn) 2139 { 2140 return ____gfn_to_memslot(slots, gfn, true); 2141 } 2142 2143 static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots, 2144 unsigned long cur_gfn) 2145 { 2146 struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn); 2147 unsigned long ofs = cur_gfn - ms->base_gfn; 2148 struct rb_node *mnode = &ms->gfn_node[slots->node_idx]; 2149 2150 if (ms->base_gfn + ms->npages <= cur_gfn) { 2151 mnode = rb_next(mnode); 2152 /* If we are above the highest slot, wrap around */ 2153 if (!mnode) 2154 mnode = rb_first(&slots->gfn_tree); 2155 2156 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); 2157 ofs = 0; 2158 } 2159 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); 2160 while (ofs >= ms->npages && (mnode = rb_next(mnode))) { 2161 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); 2162 ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages); 2163 } 2164 return ms->base_gfn + ofs; 2165 } 2166 2167 static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, 2168 u8 *res, unsigned long bufsize) 2169 { 2170 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev; 2171 struct kvm_memslots *slots = kvm_memslots(kvm); 2172 struct kvm_memory_slot *ms; 2173 2174 if (unlikely(kvm_memslots_empty(slots))) 2175 return 0; 2176 2177 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn); 2178 ms = gfn_to_memslot(kvm, cur_gfn); 2179 args->count = 0; 2180 args->start_gfn = cur_gfn; 2181 if (!ms) 2182 return 0; 2183 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1); 2184 mem_end = kvm_s390_get_gfn_end(slots); 2185 2186 while (args->count < bufsize) { 2187 hva = gfn_to_hva(kvm, cur_gfn); 2188 if (kvm_is_error_hva(hva)) 2189 return 0; 2190 /* Decrement only if we actually flipped the bit to 0 */ 2191 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) 2192 atomic64_dec(&kvm->arch.cmma_dirty_pages); 2193 if (get_pgste(kvm->mm, hva, &pgstev) < 0) 2194 pgstev = 0; 2195 /* Save the value */ 2196 res[args->count++] = (pgstev >> 24) & 0x43; 2197 /* If the next bit is too far away, stop. */ 2198 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE) 2199 return 0; 2200 /* If we reached the previous "next", find the next one */ 2201 if (cur_gfn == next_gfn) 2202 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1); 2203 /* Reached the end of memory or of the buffer, stop */ 2204 if ((next_gfn >= mem_end) || 2205 (next_gfn - args->start_gfn >= bufsize)) 2206 return 0; 2207 cur_gfn++; 2208 /* Reached the end of the current memslot, take the next one. */ 2209 if (cur_gfn - ms->base_gfn >= ms->npages) { 2210 ms = gfn_to_memslot(kvm, cur_gfn); 2211 if (!ms) 2212 return 0; 2213 } 2214 } 2215 return 0; 2216 } 2217 2218 /* 2219 * This function searches for the next page with dirty CMMA attributes, and 2220 * saves the attributes in the buffer up to either the end of the buffer or 2221 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found; 2222 * no trailing clean bytes are saved. 2223 * In case no dirty bits were found, or if CMMA was not enabled or used, the 2224 * output buffer will indicate 0 as length. 2225 */ 2226 static int kvm_s390_get_cmma_bits(struct kvm *kvm, 2227 struct kvm_s390_cmma_log *args) 2228 { 2229 unsigned long bufsize; 2230 int srcu_idx, peek, ret; 2231 u8 *values; 2232 2233 if (!kvm->arch.use_cmma) 2234 return -ENXIO; 2235 /* Invalid/unsupported flags were specified */ 2236 if (args->flags & ~KVM_S390_CMMA_PEEK) 2237 return -EINVAL; 2238 /* Migration mode query, and we are not doing a migration */ 2239 peek = !!(args->flags & KVM_S390_CMMA_PEEK); 2240 if (!peek && !kvm->arch.migration_mode) 2241 return -EINVAL; 2242 /* CMMA is disabled or was not used, or the buffer has length zero */ 2243 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX); 2244 if (!bufsize || !kvm->mm->context.uses_cmm) { 2245 memset(args, 0, sizeof(*args)); 2246 return 0; 2247 } 2248 /* We are not peeking, and there are no dirty pages */ 2249 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { 2250 memset(args, 0, sizeof(*args)); 2251 return 0; 2252 } 2253 2254 values = vmalloc(bufsize); 2255 if (!values) 2256 return -ENOMEM; 2257 2258 mmap_read_lock(kvm->mm); 2259 srcu_idx = srcu_read_lock(&kvm->srcu); 2260 if (peek) 2261 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize); 2262 else 2263 ret = kvm_s390_get_cmma(kvm, args, values, bufsize); 2264 srcu_read_unlock(&kvm->srcu, srcu_idx); 2265 mmap_read_unlock(kvm->mm); 2266 2267 if (kvm->arch.migration_mode) 2268 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); 2269 else 2270 args->remaining = 0; 2271 2272 if (copy_to_user((void __user *)args->values, values, args->count)) 2273 ret = -EFAULT; 2274 2275 vfree(values); 2276 return ret; 2277 } 2278 2279 /* 2280 * This function sets the CMMA attributes for the given pages. If the input 2281 * buffer has zero length, no action is taken, otherwise the attributes are 2282 * set and the mm->context.uses_cmm flag is set. 2283 */ 2284 static int kvm_s390_set_cmma_bits(struct kvm *kvm, 2285 const struct kvm_s390_cmma_log *args) 2286 { 2287 unsigned long hva, mask, pgstev, i; 2288 uint8_t *bits; 2289 int srcu_idx, r = 0; 2290 2291 mask = args->mask; 2292 2293 if (!kvm->arch.use_cmma) 2294 return -ENXIO; 2295 /* invalid/unsupported flags */ 2296 if (args->flags != 0) 2297 return -EINVAL; 2298 /* Enforce sane limit on memory allocation */ 2299 if (args->count > KVM_S390_CMMA_SIZE_MAX) 2300 return -EINVAL; 2301 /* Nothing to do */ 2302 if (args->count == 0) 2303 return 0; 2304 2305 bits = vmalloc(array_size(sizeof(*bits), args->count)); 2306 if (!bits) 2307 return -ENOMEM; 2308 2309 r = copy_from_user(bits, (void __user *)args->values, args->count); 2310 if (r) { 2311 r = -EFAULT; 2312 goto out; 2313 } 2314 2315 mmap_read_lock(kvm->mm); 2316 srcu_idx = srcu_read_lock(&kvm->srcu); 2317 for (i = 0; i < args->count; i++) { 2318 hva = gfn_to_hva(kvm, args->start_gfn + i); 2319 if (kvm_is_error_hva(hva)) { 2320 r = -EFAULT; 2321 break; 2322 } 2323 2324 pgstev = bits[i]; 2325 pgstev = pgstev << 24; 2326 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT; 2327 set_pgste_bits(kvm->mm, hva, mask, pgstev); 2328 } 2329 srcu_read_unlock(&kvm->srcu, srcu_idx); 2330 mmap_read_unlock(kvm->mm); 2331 2332 if (!kvm->mm->context.uses_cmm) { 2333 mmap_write_lock(kvm->mm); 2334 kvm->mm->context.uses_cmm = 1; 2335 mmap_write_unlock(kvm->mm); 2336 } 2337 out: 2338 vfree(bits); 2339 return r; 2340 } 2341 2342 /** 2343 * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to 2344 * non protected. 2345 * @kvm: the VM whose protected vCPUs are to be converted 2346 * @rc: return value for the RC field of the UVC (in case of error) 2347 * @rrc: return value for the RRC field of the UVC (in case of error) 2348 * 2349 * Does not stop in case of error, tries to convert as many 2350 * CPUs as possible. In case of error, the RC and RRC of the last error are 2351 * returned. 2352 * 2353 * Return: 0 in case of success, otherwise -EIO 2354 */ 2355 int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc) 2356 { 2357 struct kvm_vcpu *vcpu; 2358 unsigned long i; 2359 u16 _rc, _rrc; 2360 int ret = 0; 2361 2362 /* 2363 * We ignore failures and try to destroy as many CPUs as possible. 2364 * At the same time we must not free the assigned resources when 2365 * this fails, as the ultravisor has still access to that memory. 2366 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak 2367 * behind. 2368 * We want to return the first failure rc and rrc, though. 2369 */ 2370 kvm_for_each_vcpu(i, vcpu, kvm) { 2371 mutex_lock(&vcpu->mutex); 2372 if (kvm_s390_pv_destroy_cpu(vcpu, &_rc, &_rrc) && !ret) { 2373 *rc = _rc; 2374 *rrc = _rrc; 2375 ret = -EIO; 2376 } 2377 mutex_unlock(&vcpu->mutex); 2378 } 2379 /* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */ 2380 if (use_gisa) 2381 kvm_s390_gisa_enable(kvm); 2382 return ret; 2383 } 2384 2385 /** 2386 * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM 2387 * to protected. 2388 * @kvm: the VM whose protected vCPUs are to be converted 2389 * @rc: return value for the RC field of the UVC (in case of error) 2390 * @rrc: return value for the RRC field of the UVC (in case of error) 2391 * 2392 * Tries to undo the conversion in case of error. 2393 * 2394 * Return: 0 in case of success, otherwise -EIO 2395 */ 2396 static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc) 2397 { 2398 unsigned long i; 2399 int r = 0; 2400 u16 dummy; 2401 2402 struct kvm_vcpu *vcpu; 2403 2404 /* Disable the GISA if the ultravisor does not support AIV. */ 2405 if (!test_bit_inv(BIT_UV_FEAT_AIV, &uv_info.uv_feature_indications)) 2406 kvm_s390_gisa_disable(kvm); 2407 2408 kvm_for_each_vcpu(i, vcpu, kvm) { 2409 mutex_lock(&vcpu->mutex); 2410 r = kvm_s390_pv_create_cpu(vcpu, rc, rrc); 2411 mutex_unlock(&vcpu->mutex); 2412 if (r) 2413 break; 2414 } 2415 if (r) 2416 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy); 2417 return r; 2418 } 2419 2420 /* 2421 * Here we provide user space with a direct interface to query UV 2422 * related data like UV maxima and available features as well as 2423 * feature specific data. 2424 * 2425 * To facilitate future extension of the data structures we'll try to 2426 * write data up to the maximum requested length. 2427 */ 2428 static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info) 2429 { 2430 ssize_t len_min; 2431 2432 switch (info->header.id) { 2433 case KVM_PV_INFO_VM: { 2434 len_min = sizeof(info->header) + sizeof(info->vm); 2435 2436 if (info->header.len_max < len_min) 2437 return -EINVAL; 2438 2439 memcpy(info->vm.inst_calls_list, 2440 uv_info.inst_calls_list, 2441 sizeof(uv_info.inst_calls_list)); 2442 2443 /* It's max cpuid not max cpus, so it's off by one */ 2444 info->vm.max_cpus = uv_info.max_guest_cpu_id + 1; 2445 info->vm.max_guests = uv_info.max_num_sec_conf; 2446 info->vm.max_guest_addr = uv_info.max_sec_stor_addr; 2447 info->vm.feature_indication = uv_info.uv_feature_indications; 2448 2449 return len_min; 2450 } 2451 case KVM_PV_INFO_DUMP: { 2452 len_min = sizeof(info->header) + sizeof(info->dump); 2453 2454 if (info->header.len_max < len_min) 2455 return -EINVAL; 2456 2457 info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len; 2458 info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len; 2459 info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len; 2460 return len_min; 2461 } 2462 default: 2463 return -EINVAL; 2464 } 2465 } 2466 2467 static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd, 2468 struct kvm_s390_pv_dmp dmp) 2469 { 2470 int r = -EINVAL; 2471 void __user *result_buff = (void __user *)dmp.buff_addr; 2472 2473 switch (dmp.subcmd) { 2474 case KVM_PV_DUMP_INIT: { 2475 if (kvm->arch.pv.dumping) 2476 break; 2477 2478 /* 2479 * Block SIE entry as concurrent dump UVCs could lead 2480 * to validities. 2481 */ 2482 kvm_s390_vcpu_block_all(kvm); 2483 2484 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), 2485 UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc); 2486 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x", 2487 cmd->rc, cmd->rrc); 2488 if (!r) { 2489 kvm->arch.pv.dumping = true; 2490 } else { 2491 kvm_s390_vcpu_unblock_all(kvm); 2492 r = -EINVAL; 2493 } 2494 break; 2495 } 2496 case KVM_PV_DUMP_CONFIG_STOR_STATE: { 2497 if (!kvm->arch.pv.dumping) 2498 break; 2499 2500 /* 2501 * gaddr is an output parameter since we might stop 2502 * early. As dmp will be copied back in our caller, we 2503 * don't need to do it ourselves. 2504 */ 2505 r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len, 2506 &cmd->rc, &cmd->rrc); 2507 break; 2508 } 2509 case KVM_PV_DUMP_COMPLETE: { 2510 if (!kvm->arch.pv.dumping) 2511 break; 2512 2513 r = -EINVAL; 2514 if (dmp.buff_len < uv_info.conf_dump_finalize_len) 2515 break; 2516 2517 r = kvm_s390_pv_dump_complete(kvm, result_buff, 2518 &cmd->rc, &cmd->rrc); 2519 break; 2520 } 2521 default: 2522 r = -ENOTTY; 2523 break; 2524 } 2525 2526 return r; 2527 } 2528 2529 static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd) 2530 { 2531 const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM); 2532 void __user *argp = (void __user *)cmd->data; 2533 int r = 0; 2534 u16 dummy; 2535 2536 if (need_lock) 2537 mutex_lock(&kvm->lock); 2538 2539 switch (cmd->cmd) { 2540 case KVM_PV_ENABLE: { 2541 r = -EINVAL; 2542 if (kvm_s390_pv_is_protected(kvm)) 2543 break; 2544 2545 /* 2546 * FMT 4 SIE needs esca. As we never switch back to bsca from 2547 * esca, we need no cleanup in the error cases below 2548 */ 2549 r = sca_switch_to_extended(kvm); 2550 if (r) 2551 break; 2552 2553 mmap_write_lock(current->mm); 2554 r = gmap_mark_unmergeable(); 2555 mmap_write_unlock(current->mm); 2556 if (r) 2557 break; 2558 2559 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc); 2560 if (r) 2561 break; 2562 2563 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc); 2564 if (r) 2565 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy); 2566 2567 /* we need to block service interrupts from now on */ 2568 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); 2569 break; 2570 } 2571 case KVM_PV_ASYNC_CLEANUP_PREPARE: 2572 r = -EINVAL; 2573 if (!kvm_s390_pv_is_protected(kvm) || !async_destroy) 2574 break; 2575 2576 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); 2577 /* 2578 * If a CPU could not be destroyed, destroy VM will also fail. 2579 * There is no point in trying to destroy it. Instead return 2580 * the rc and rrc from the first CPU that failed destroying. 2581 */ 2582 if (r) 2583 break; 2584 r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc); 2585 2586 /* no need to block service interrupts any more */ 2587 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); 2588 break; 2589 case KVM_PV_ASYNC_CLEANUP_PERFORM: 2590 r = -EINVAL; 2591 if (!async_destroy) 2592 break; 2593 /* kvm->lock must not be held; this is asserted inside the function. */ 2594 r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc); 2595 break; 2596 case KVM_PV_DISABLE: { 2597 r = -EINVAL; 2598 if (!kvm_s390_pv_is_protected(kvm)) 2599 break; 2600 2601 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); 2602 /* 2603 * If a CPU could not be destroyed, destroy VM will also fail. 2604 * There is no point in trying to destroy it. Instead return 2605 * the rc and rrc from the first CPU that failed destroying. 2606 */ 2607 if (r) 2608 break; 2609 r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc); 2610 2611 /* no need to block service interrupts any more */ 2612 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); 2613 break; 2614 } 2615 case KVM_PV_SET_SEC_PARMS: { 2616 struct kvm_s390_pv_sec_parm parms = {}; 2617 void *hdr; 2618 2619 r = -EINVAL; 2620 if (!kvm_s390_pv_is_protected(kvm)) 2621 break; 2622 2623 r = -EFAULT; 2624 if (copy_from_user(&parms, argp, sizeof(parms))) 2625 break; 2626 2627 /* Currently restricted to 8KB */ 2628 r = -EINVAL; 2629 if (parms.length > PAGE_SIZE * 2) 2630 break; 2631 2632 r = -ENOMEM; 2633 hdr = vmalloc(parms.length); 2634 if (!hdr) 2635 break; 2636 2637 r = -EFAULT; 2638 if (!copy_from_user(hdr, (void __user *)parms.origin, 2639 parms.length)) 2640 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length, 2641 &cmd->rc, &cmd->rrc); 2642 2643 vfree(hdr); 2644 break; 2645 } 2646 case KVM_PV_UNPACK: { 2647 struct kvm_s390_pv_unp unp = {}; 2648 2649 r = -EINVAL; 2650 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) 2651 break; 2652 2653 r = -EFAULT; 2654 if (copy_from_user(&unp, argp, sizeof(unp))) 2655 break; 2656 2657 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak, 2658 &cmd->rc, &cmd->rrc); 2659 break; 2660 } 2661 case KVM_PV_VERIFY: { 2662 r = -EINVAL; 2663 if (!kvm_s390_pv_is_protected(kvm)) 2664 break; 2665 2666 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), 2667 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc); 2668 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc, 2669 cmd->rrc); 2670 break; 2671 } 2672 case KVM_PV_PREP_RESET: { 2673 r = -EINVAL; 2674 if (!kvm_s390_pv_is_protected(kvm)) 2675 break; 2676 2677 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), 2678 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc); 2679 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x", 2680 cmd->rc, cmd->rrc); 2681 break; 2682 } 2683 case KVM_PV_UNSHARE_ALL: { 2684 r = -EINVAL; 2685 if (!kvm_s390_pv_is_protected(kvm)) 2686 break; 2687 2688 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), 2689 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc); 2690 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x", 2691 cmd->rc, cmd->rrc); 2692 break; 2693 } 2694 case KVM_PV_INFO: { 2695 struct kvm_s390_pv_info info = {}; 2696 ssize_t data_len; 2697 2698 /* 2699 * No need to check the VM protection here. 2700 * 2701 * Maybe user space wants to query some of the data 2702 * when the VM is still unprotected. If we see the 2703 * need to fence a new data command we can still 2704 * return an error in the info handler. 2705 */ 2706 2707 r = -EFAULT; 2708 if (copy_from_user(&info, argp, sizeof(info.header))) 2709 break; 2710 2711 r = -EINVAL; 2712 if (info.header.len_max < sizeof(info.header)) 2713 break; 2714 2715 data_len = kvm_s390_handle_pv_info(&info); 2716 if (data_len < 0) { 2717 r = data_len; 2718 break; 2719 } 2720 /* 2721 * If a data command struct is extended (multiple 2722 * times) this can be used to determine how much of it 2723 * is valid. 2724 */ 2725 info.header.len_written = data_len; 2726 2727 r = -EFAULT; 2728 if (copy_to_user(argp, &info, data_len)) 2729 break; 2730 2731 r = 0; 2732 break; 2733 } 2734 case KVM_PV_DUMP: { 2735 struct kvm_s390_pv_dmp dmp; 2736 2737 r = -EINVAL; 2738 if (!kvm_s390_pv_is_protected(kvm)) 2739 break; 2740 2741 r = -EFAULT; 2742 if (copy_from_user(&dmp, argp, sizeof(dmp))) 2743 break; 2744 2745 r = kvm_s390_pv_dmp(kvm, cmd, dmp); 2746 if (r) 2747 break; 2748 2749 if (copy_to_user(argp, &dmp, sizeof(dmp))) { 2750 r = -EFAULT; 2751 break; 2752 } 2753 2754 break; 2755 } 2756 default: 2757 r = -ENOTTY; 2758 } 2759 if (need_lock) 2760 mutex_unlock(&kvm->lock); 2761 2762 return r; 2763 } 2764 2765 static int mem_op_validate_common(struct kvm_s390_mem_op *mop, u64 supported_flags) 2766 { 2767 if (mop->flags & ~supported_flags || !mop->size) 2768 return -EINVAL; 2769 if (mop->size > MEM_OP_MAX_SIZE) 2770 return -E2BIG; 2771 if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) { 2772 if (mop->key > 0xf) 2773 return -EINVAL; 2774 } else { 2775 mop->key = 0; 2776 } 2777 return 0; 2778 } 2779 2780 static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop) 2781 { 2782 void __user *uaddr = (void __user *)mop->buf; 2783 enum gacc_mode acc_mode; 2784 void *tmpbuf = NULL; 2785 int r, srcu_idx; 2786 2787 r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION | 2788 KVM_S390_MEMOP_F_CHECK_ONLY); 2789 if (r) 2790 return r; 2791 2792 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { 2793 tmpbuf = vmalloc(mop->size); 2794 if (!tmpbuf) 2795 return -ENOMEM; 2796 } 2797 2798 srcu_idx = srcu_read_lock(&kvm->srcu); 2799 2800 if (kvm_is_error_gpa(kvm, mop->gaddr)) { 2801 r = PGM_ADDRESSING; 2802 goto out_unlock; 2803 } 2804 2805 acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE; 2806 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { 2807 r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key); 2808 goto out_unlock; 2809 } 2810 if (acc_mode == GACC_FETCH) { 2811 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, 2812 mop->size, GACC_FETCH, mop->key); 2813 if (r) 2814 goto out_unlock; 2815 if (copy_to_user(uaddr, tmpbuf, mop->size)) 2816 r = -EFAULT; 2817 } else { 2818 if (copy_from_user(tmpbuf, uaddr, mop->size)) { 2819 r = -EFAULT; 2820 goto out_unlock; 2821 } 2822 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, 2823 mop->size, GACC_STORE, mop->key); 2824 } 2825 2826 out_unlock: 2827 srcu_read_unlock(&kvm->srcu, srcu_idx); 2828 2829 vfree(tmpbuf); 2830 return r; 2831 } 2832 2833 static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop) 2834 { 2835 void __user *uaddr = (void __user *)mop->buf; 2836 void __user *old_addr = (void __user *)mop->old_addr; 2837 union { 2838 __uint128_t quad; 2839 char raw[sizeof(__uint128_t)]; 2840 } old = { .quad = 0}, new = { .quad = 0 }; 2841 unsigned int off_in_quad = sizeof(new) - mop->size; 2842 int r, srcu_idx; 2843 bool success; 2844 2845 r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION); 2846 if (r) 2847 return r; 2848 /* 2849 * This validates off_in_quad. Checking that size is a power 2850 * of two is not necessary, as cmpxchg_guest_abs_with_key 2851 * takes care of that 2852 */ 2853 if (mop->size > sizeof(new)) 2854 return -EINVAL; 2855 if (copy_from_user(&new.raw[off_in_quad], uaddr, mop->size)) 2856 return -EFAULT; 2857 if (copy_from_user(&old.raw[off_in_quad], old_addr, mop->size)) 2858 return -EFAULT; 2859 2860 srcu_idx = srcu_read_lock(&kvm->srcu); 2861 2862 if (kvm_is_error_gpa(kvm, mop->gaddr)) { 2863 r = PGM_ADDRESSING; 2864 goto out_unlock; 2865 } 2866 2867 r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad, 2868 new.quad, mop->key, &success); 2869 if (!success && copy_to_user(old_addr, &old.raw[off_in_quad], mop->size)) 2870 r = -EFAULT; 2871 2872 out_unlock: 2873 srcu_read_unlock(&kvm->srcu, srcu_idx); 2874 return r; 2875 } 2876 2877 static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop) 2878 { 2879 /* 2880 * This is technically a heuristic only, if the kvm->lock is not 2881 * taken, it is not guaranteed that the vm is/remains non-protected. 2882 * This is ok from a kernel perspective, wrongdoing is detected 2883 * on the access, -EFAULT is returned and the vm may crash the 2884 * next time it accesses the memory in question. 2885 * There is no sane usecase to do switching and a memop on two 2886 * different CPUs at the same time. 2887 */ 2888 if (kvm_s390_pv_get_handle(kvm)) 2889 return -EINVAL; 2890 2891 switch (mop->op) { 2892 case KVM_S390_MEMOP_ABSOLUTE_READ: 2893 case KVM_S390_MEMOP_ABSOLUTE_WRITE: 2894 return kvm_s390_vm_mem_op_abs(kvm, mop); 2895 case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG: 2896 return kvm_s390_vm_mem_op_cmpxchg(kvm, mop); 2897 default: 2898 return -EINVAL; 2899 } 2900 } 2901 2902 long kvm_arch_vm_ioctl(struct file *filp, 2903 unsigned int ioctl, unsigned long arg) 2904 { 2905 struct kvm *kvm = filp->private_data; 2906 void __user *argp = (void __user *)arg; 2907 struct kvm_device_attr attr; 2908 int r; 2909 2910 switch (ioctl) { 2911 case KVM_S390_INTERRUPT: { 2912 struct kvm_s390_interrupt s390int; 2913 2914 r = -EFAULT; 2915 if (copy_from_user(&s390int, argp, sizeof(s390int))) 2916 break; 2917 r = kvm_s390_inject_vm(kvm, &s390int); 2918 break; 2919 } 2920 case KVM_CREATE_IRQCHIP: { 2921 struct kvm_irq_routing_entry routing; 2922 2923 r = -EINVAL; 2924 if (kvm->arch.use_irqchip) { 2925 /* Set up dummy routing. */ 2926 memset(&routing, 0, sizeof(routing)); 2927 r = kvm_set_irq_routing(kvm, &routing, 0, 0); 2928 } 2929 break; 2930 } 2931 case KVM_SET_DEVICE_ATTR: { 2932 r = -EFAULT; 2933 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 2934 break; 2935 r = kvm_s390_vm_set_attr(kvm, &attr); 2936 break; 2937 } 2938 case KVM_GET_DEVICE_ATTR: { 2939 r = -EFAULT; 2940 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 2941 break; 2942 r = kvm_s390_vm_get_attr(kvm, &attr); 2943 break; 2944 } 2945 case KVM_HAS_DEVICE_ATTR: { 2946 r = -EFAULT; 2947 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 2948 break; 2949 r = kvm_s390_vm_has_attr(kvm, &attr); 2950 break; 2951 } 2952 case KVM_S390_GET_SKEYS: { 2953 struct kvm_s390_skeys args; 2954 2955 r = -EFAULT; 2956 if (copy_from_user(&args, argp, 2957 sizeof(struct kvm_s390_skeys))) 2958 break; 2959 r = kvm_s390_get_skeys(kvm, &args); 2960 break; 2961 } 2962 case KVM_S390_SET_SKEYS: { 2963 struct kvm_s390_skeys args; 2964 2965 r = -EFAULT; 2966 if (copy_from_user(&args, argp, 2967 sizeof(struct kvm_s390_skeys))) 2968 break; 2969 r = kvm_s390_set_skeys(kvm, &args); 2970 break; 2971 } 2972 case KVM_S390_GET_CMMA_BITS: { 2973 struct kvm_s390_cmma_log args; 2974 2975 r = -EFAULT; 2976 if (copy_from_user(&args, argp, sizeof(args))) 2977 break; 2978 mutex_lock(&kvm->slots_lock); 2979 r = kvm_s390_get_cmma_bits(kvm, &args); 2980 mutex_unlock(&kvm->slots_lock); 2981 if (!r) { 2982 r = copy_to_user(argp, &args, sizeof(args)); 2983 if (r) 2984 r = -EFAULT; 2985 } 2986 break; 2987 } 2988 case KVM_S390_SET_CMMA_BITS: { 2989 struct kvm_s390_cmma_log args; 2990 2991 r = -EFAULT; 2992 if (copy_from_user(&args, argp, sizeof(args))) 2993 break; 2994 mutex_lock(&kvm->slots_lock); 2995 r = kvm_s390_set_cmma_bits(kvm, &args); 2996 mutex_unlock(&kvm->slots_lock); 2997 break; 2998 } 2999 case KVM_S390_PV_COMMAND: { 3000 struct kvm_pv_cmd args; 3001 3002 /* protvirt means user cpu state */ 3003 kvm_s390_set_user_cpu_state_ctrl(kvm); 3004 r = 0; 3005 if (!is_prot_virt_host()) { 3006 r = -EINVAL; 3007 break; 3008 } 3009 if (copy_from_user(&args, argp, sizeof(args))) { 3010 r = -EFAULT; 3011 break; 3012 } 3013 if (args.flags) { 3014 r = -EINVAL; 3015 break; 3016 } 3017 /* must be called without kvm->lock */ 3018 r = kvm_s390_handle_pv(kvm, &args); 3019 if (copy_to_user(argp, &args, sizeof(args))) { 3020 r = -EFAULT; 3021 break; 3022 } 3023 break; 3024 } 3025 case KVM_S390_MEM_OP: { 3026 struct kvm_s390_mem_op mem_op; 3027 3028 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0) 3029 r = kvm_s390_vm_mem_op(kvm, &mem_op); 3030 else 3031 r = -EFAULT; 3032 break; 3033 } 3034 case KVM_S390_ZPCI_OP: { 3035 struct kvm_s390_zpci_op args; 3036 3037 r = -EINVAL; 3038 if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) 3039 break; 3040 if (copy_from_user(&args, argp, sizeof(args))) { 3041 r = -EFAULT; 3042 break; 3043 } 3044 r = kvm_s390_pci_zpci_op(kvm, &args); 3045 break; 3046 } 3047 default: 3048 r = -ENOTTY; 3049 } 3050 3051 return r; 3052 } 3053 3054 static int kvm_s390_apxa_installed(void) 3055 { 3056 struct ap_config_info info; 3057 3058 if (ap_instructions_available()) { 3059 if (ap_qci(&info) == 0) 3060 return info.apxa; 3061 } 3062 3063 return 0; 3064 } 3065 3066 /* 3067 * The format of the crypto control block (CRYCB) is specified in the 3 low 3068 * order bits of the CRYCB designation (CRYCBD) field as follows: 3069 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the 3070 * AP extended addressing (APXA) facility are installed. 3071 * Format 1: The APXA facility is not installed but the MSAX3 facility is. 3072 * Format 2: Both the APXA and MSAX3 facilities are installed 3073 */ 3074 static void kvm_s390_set_crycb_format(struct kvm *kvm) 3075 { 3076 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; 3077 3078 /* Clear the CRYCB format bits - i.e., set format 0 by default */ 3079 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); 3080 3081 /* Check whether MSAX3 is installed */ 3082 if (!test_kvm_facility(kvm, 76)) 3083 return; 3084 3085 if (kvm_s390_apxa_installed()) 3086 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; 3087 else 3088 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; 3089 } 3090 3091 /* 3092 * kvm_arch_crypto_set_masks 3093 * 3094 * @kvm: pointer to the target guest's KVM struct containing the crypto masks 3095 * to be set. 3096 * @apm: the mask identifying the accessible AP adapters 3097 * @aqm: the mask identifying the accessible AP domains 3098 * @adm: the mask identifying the accessible AP control domains 3099 * 3100 * Set the masks that identify the adapters, domains and control domains to 3101 * which the KVM guest is granted access. 3102 * 3103 * Note: The kvm->lock mutex must be locked by the caller before invoking this 3104 * function. 3105 */ 3106 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, 3107 unsigned long *aqm, unsigned long *adm) 3108 { 3109 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; 3110 3111 kvm_s390_vcpu_block_all(kvm); 3112 3113 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { 3114 case CRYCB_FORMAT2: /* APCB1 use 256 bits */ 3115 memcpy(crycb->apcb1.apm, apm, 32); 3116 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx", 3117 apm[0], apm[1], apm[2], apm[3]); 3118 memcpy(crycb->apcb1.aqm, aqm, 32); 3119 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx", 3120 aqm[0], aqm[1], aqm[2], aqm[3]); 3121 memcpy(crycb->apcb1.adm, adm, 32); 3122 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx", 3123 adm[0], adm[1], adm[2], adm[3]); 3124 break; 3125 case CRYCB_FORMAT1: 3126 case CRYCB_FORMAT0: /* Fall through both use APCB0 */ 3127 memcpy(crycb->apcb0.apm, apm, 8); 3128 memcpy(crycb->apcb0.aqm, aqm, 2); 3129 memcpy(crycb->apcb0.adm, adm, 2); 3130 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x", 3131 apm[0], *((unsigned short *)aqm), 3132 *((unsigned short *)adm)); 3133 break; 3134 default: /* Can not happen */ 3135 break; 3136 } 3137 3138 /* recreate the shadow crycb for each vcpu */ 3139 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); 3140 kvm_s390_vcpu_unblock_all(kvm); 3141 } 3142 EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks); 3143 3144 /* 3145 * kvm_arch_crypto_clear_masks 3146 * 3147 * @kvm: pointer to the target guest's KVM struct containing the crypto masks 3148 * to be cleared. 3149 * 3150 * Clear the masks that identify the adapters, domains and control domains to 3151 * which the KVM guest is granted access. 3152 * 3153 * Note: The kvm->lock mutex must be locked by the caller before invoking this 3154 * function. 3155 */ 3156 void kvm_arch_crypto_clear_masks(struct kvm *kvm) 3157 { 3158 kvm_s390_vcpu_block_all(kvm); 3159 3160 memset(&kvm->arch.crypto.crycb->apcb0, 0, 3161 sizeof(kvm->arch.crypto.crycb->apcb0)); 3162 memset(&kvm->arch.crypto.crycb->apcb1, 0, 3163 sizeof(kvm->arch.crypto.crycb->apcb1)); 3164 3165 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:"); 3166 /* recreate the shadow crycb for each vcpu */ 3167 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); 3168 kvm_s390_vcpu_unblock_all(kvm); 3169 } 3170 EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks); 3171 3172 static u64 kvm_s390_get_initial_cpuid(void) 3173 { 3174 struct cpuid cpuid; 3175 3176 get_cpu_id(&cpuid); 3177 cpuid.version = 0xff; 3178 return *((u64 *) &cpuid); 3179 } 3180 3181 static void kvm_s390_crypto_init(struct kvm *kvm) 3182 { 3183 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; 3184 kvm_s390_set_crycb_format(kvm); 3185 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem); 3186 3187 if (!test_kvm_facility(kvm, 76)) 3188 return; 3189 3190 /* Enable AES/DEA protected key functions by default */ 3191 kvm->arch.crypto.aes_kw = 1; 3192 kvm->arch.crypto.dea_kw = 1; 3193 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 3194 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 3195 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 3196 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 3197 } 3198 3199 static void sca_dispose(struct kvm *kvm) 3200 { 3201 if (kvm->arch.use_esca) 3202 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); 3203 else 3204 free_page((unsigned long)(kvm->arch.sca)); 3205 kvm->arch.sca = NULL; 3206 } 3207 3208 void kvm_arch_free_vm(struct kvm *kvm) 3209 { 3210 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) 3211 kvm_s390_pci_clear_list(kvm); 3212 3213 __kvm_arch_free_vm(kvm); 3214 } 3215 3216 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 3217 { 3218 gfp_t alloc_flags = GFP_KERNEL_ACCOUNT; 3219 int i, rc; 3220 char debug_name[16]; 3221 static unsigned long sca_offset; 3222 3223 rc = -EINVAL; 3224 #ifdef CONFIG_KVM_S390_UCONTROL 3225 if (type & ~KVM_VM_S390_UCONTROL) 3226 goto out_err; 3227 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) 3228 goto out_err; 3229 #else 3230 if (type) 3231 goto out_err; 3232 #endif 3233 3234 rc = s390_enable_sie(); 3235 if (rc) 3236 goto out_err; 3237 3238 rc = -ENOMEM; 3239 3240 if (!sclp.has_64bscao) 3241 alloc_flags |= GFP_DMA; 3242 rwlock_init(&kvm->arch.sca_lock); 3243 /* start with basic SCA */ 3244 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); 3245 if (!kvm->arch.sca) 3246 goto out_err; 3247 mutex_lock(&kvm_lock); 3248 sca_offset += 16; 3249 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE) 3250 sca_offset = 0; 3251 kvm->arch.sca = (struct bsca_block *) 3252 ((char *) kvm->arch.sca + sca_offset); 3253 mutex_unlock(&kvm_lock); 3254 3255 sprintf(debug_name, "kvm-%u", current->pid); 3256 3257 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); 3258 if (!kvm->arch.dbf) 3259 goto out_err; 3260 3261 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096); 3262 kvm->arch.sie_page2 = 3263 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA); 3264 if (!kvm->arch.sie_page2) 3265 goto out_err; 3266 3267 kvm->arch.sie_page2->kvm = kvm; 3268 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; 3269 3270 for (i = 0; i < kvm_s390_fac_size(); i++) { 3271 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] & 3272 (kvm_s390_fac_base[i] | 3273 kvm_s390_fac_ext[i]); 3274 kvm->arch.model.fac_list[i] = stfle_fac_list[i] & 3275 kvm_s390_fac_base[i]; 3276 } 3277 kvm->arch.model.subfuncs = kvm_s390_available_subfunc; 3278 3279 /* we are always in czam mode - even on pre z14 machines */ 3280 set_kvm_facility(kvm->arch.model.fac_mask, 138); 3281 set_kvm_facility(kvm->arch.model.fac_list, 138); 3282 /* we emulate STHYI in kvm */ 3283 set_kvm_facility(kvm->arch.model.fac_mask, 74); 3284 set_kvm_facility(kvm->arch.model.fac_list, 74); 3285 if (MACHINE_HAS_TLB_GUEST) { 3286 set_kvm_facility(kvm->arch.model.fac_mask, 147); 3287 set_kvm_facility(kvm->arch.model.fac_list, 147); 3288 } 3289 3290 if (css_general_characteristics.aiv && test_facility(65)) 3291 set_kvm_facility(kvm->arch.model.fac_mask, 65); 3292 3293 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); 3294 kvm->arch.model.ibc = sclp.ibc & 0x0fff; 3295 3296 kvm_s390_crypto_init(kvm); 3297 3298 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) { 3299 mutex_lock(&kvm->lock); 3300 kvm_s390_pci_init_list(kvm); 3301 kvm_s390_vcpu_pci_enable_interp(kvm); 3302 mutex_unlock(&kvm->lock); 3303 } 3304 3305 mutex_init(&kvm->arch.float_int.ais_lock); 3306 spin_lock_init(&kvm->arch.float_int.lock); 3307 for (i = 0; i < FIRQ_LIST_COUNT; i++) 3308 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); 3309 init_waitqueue_head(&kvm->arch.ipte_wq); 3310 mutex_init(&kvm->arch.ipte_mutex); 3311 3312 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 3313 VM_EVENT(kvm, 3, "vm created with type %lu", type); 3314 3315 if (type & KVM_VM_S390_UCONTROL) { 3316 kvm->arch.gmap = NULL; 3317 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; 3318 } else { 3319 if (sclp.hamax == U64_MAX) 3320 kvm->arch.mem_limit = TASK_SIZE_MAX; 3321 else 3322 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, 3323 sclp.hamax + 1); 3324 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); 3325 if (!kvm->arch.gmap) 3326 goto out_err; 3327 kvm->arch.gmap->private = kvm; 3328 kvm->arch.gmap->pfault_enabled = 0; 3329 } 3330 3331 kvm->arch.use_pfmfi = sclp.has_pfmfi; 3332 kvm->arch.use_skf = sclp.has_skey; 3333 spin_lock_init(&kvm->arch.start_stop_lock); 3334 kvm_s390_vsie_init(kvm); 3335 if (use_gisa) 3336 kvm_s390_gisa_init(kvm); 3337 INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup); 3338 kvm->arch.pv.set_aside = NULL; 3339 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); 3340 3341 return 0; 3342 out_err: 3343 free_page((unsigned long)kvm->arch.sie_page2); 3344 debug_unregister(kvm->arch.dbf); 3345 sca_dispose(kvm); 3346 KVM_EVENT(3, "creation of vm failed: %d", rc); 3347 return rc; 3348 } 3349 3350 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 3351 { 3352 u16 rc, rrc; 3353 3354 VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 3355 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); 3356 kvm_s390_clear_local_irqs(vcpu); 3357 kvm_clear_async_pf_completion_queue(vcpu); 3358 if (!kvm_is_ucontrol(vcpu->kvm)) 3359 sca_del_vcpu(vcpu); 3360 kvm_s390_update_topology_change_report(vcpu->kvm, 1); 3361 3362 if (kvm_is_ucontrol(vcpu->kvm)) 3363 gmap_remove(vcpu->arch.gmap); 3364 3365 if (vcpu->kvm->arch.use_cmma) 3366 kvm_s390_vcpu_unsetup_cmma(vcpu); 3367 /* We can not hold the vcpu mutex here, we are already dying */ 3368 if (kvm_s390_pv_cpu_get_handle(vcpu)) 3369 kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc); 3370 free_page((unsigned long)(vcpu->arch.sie_block)); 3371 } 3372 3373 void kvm_arch_destroy_vm(struct kvm *kvm) 3374 { 3375 u16 rc, rrc; 3376 3377 kvm_destroy_vcpus(kvm); 3378 sca_dispose(kvm); 3379 kvm_s390_gisa_destroy(kvm); 3380 /* 3381 * We are already at the end of life and kvm->lock is not taken. 3382 * This is ok as the file descriptor is closed by now and nobody 3383 * can mess with the pv state. 3384 */ 3385 kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc); 3386 /* 3387 * Remove the mmu notifier only when the whole KVM VM is torn down, 3388 * and only if one was registered to begin with. If the VM is 3389 * currently not protected, but has been previously been protected, 3390 * then it's possible that the notifier is still registered. 3391 */ 3392 if (kvm->arch.pv.mmu_notifier.ops) 3393 mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm); 3394 3395 debug_unregister(kvm->arch.dbf); 3396 free_page((unsigned long)kvm->arch.sie_page2); 3397 if (!kvm_is_ucontrol(kvm)) 3398 gmap_remove(kvm->arch.gmap); 3399 kvm_s390_destroy_adapters(kvm); 3400 kvm_s390_clear_float_irqs(kvm); 3401 kvm_s390_vsie_destroy(kvm); 3402 KVM_EVENT(3, "vm 0x%pK destroyed", kvm); 3403 } 3404 3405 /* Section: vcpu related */ 3406 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) 3407 { 3408 vcpu->arch.gmap = gmap_create(current->mm, -1UL); 3409 if (!vcpu->arch.gmap) 3410 return -ENOMEM; 3411 vcpu->arch.gmap->private = vcpu->kvm; 3412 3413 return 0; 3414 } 3415 3416 static void sca_del_vcpu(struct kvm_vcpu *vcpu) 3417 { 3418 if (!kvm_s390_use_sca_entries()) 3419 return; 3420 read_lock(&vcpu->kvm->arch.sca_lock); 3421 if (vcpu->kvm->arch.use_esca) { 3422 struct esca_block *sca = vcpu->kvm->arch.sca; 3423 3424 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); 3425 sca->cpu[vcpu->vcpu_id].sda = 0; 3426 } else { 3427 struct bsca_block *sca = vcpu->kvm->arch.sca; 3428 3429 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); 3430 sca->cpu[vcpu->vcpu_id].sda = 0; 3431 } 3432 read_unlock(&vcpu->kvm->arch.sca_lock); 3433 } 3434 3435 static void sca_add_vcpu(struct kvm_vcpu *vcpu) 3436 { 3437 if (!kvm_s390_use_sca_entries()) { 3438 phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca); 3439 3440 /* we still need the basic sca for the ipte control */ 3441 vcpu->arch.sie_block->scaoh = sca_phys >> 32; 3442 vcpu->arch.sie_block->scaol = sca_phys; 3443 return; 3444 } 3445 read_lock(&vcpu->kvm->arch.sca_lock); 3446 if (vcpu->kvm->arch.use_esca) { 3447 struct esca_block *sca = vcpu->kvm->arch.sca; 3448 phys_addr_t sca_phys = virt_to_phys(sca); 3449 3450 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); 3451 vcpu->arch.sie_block->scaoh = sca_phys >> 32; 3452 vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK; 3453 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; 3454 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); 3455 } else { 3456 struct bsca_block *sca = vcpu->kvm->arch.sca; 3457 phys_addr_t sca_phys = virt_to_phys(sca); 3458 3459 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); 3460 vcpu->arch.sie_block->scaoh = sca_phys >> 32; 3461 vcpu->arch.sie_block->scaol = sca_phys; 3462 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); 3463 } 3464 read_unlock(&vcpu->kvm->arch.sca_lock); 3465 } 3466 3467 /* Basic SCA to Extended SCA data copy routines */ 3468 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s) 3469 { 3470 d->sda = s->sda; 3471 d->sigp_ctrl.c = s->sigp_ctrl.c; 3472 d->sigp_ctrl.scn = s->sigp_ctrl.scn; 3473 } 3474 3475 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s) 3476 { 3477 int i; 3478 3479 d->ipte_control = s->ipte_control; 3480 d->mcn[0] = s->mcn; 3481 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++) 3482 sca_copy_entry(&d->cpu[i], &s->cpu[i]); 3483 } 3484 3485 static int sca_switch_to_extended(struct kvm *kvm) 3486 { 3487 struct bsca_block *old_sca = kvm->arch.sca; 3488 struct esca_block *new_sca; 3489 struct kvm_vcpu *vcpu; 3490 unsigned long vcpu_idx; 3491 u32 scaol, scaoh; 3492 phys_addr_t new_sca_phys; 3493 3494 if (kvm->arch.use_esca) 3495 return 0; 3496 3497 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO); 3498 if (!new_sca) 3499 return -ENOMEM; 3500 3501 new_sca_phys = virt_to_phys(new_sca); 3502 scaoh = new_sca_phys >> 32; 3503 scaol = new_sca_phys & ESCA_SCAOL_MASK; 3504 3505 kvm_s390_vcpu_block_all(kvm); 3506 write_lock(&kvm->arch.sca_lock); 3507 3508 sca_copy_b_to_e(new_sca, old_sca); 3509 3510 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) { 3511 vcpu->arch.sie_block->scaoh = scaoh; 3512 vcpu->arch.sie_block->scaol = scaol; 3513 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; 3514 } 3515 kvm->arch.sca = new_sca; 3516 kvm->arch.use_esca = 1; 3517 3518 write_unlock(&kvm->arch.sca_lock); 3519 kvm_s390_vcpu_unblock_all(kvm); 3520 3521 free_page((unsigned long)old_sca); 3522 3523 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)", 3524 old_sca, kvm->arch.sca); 3525 return 0; 3526 } 3527 3528 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id) 3529 { 3530 int rc; 3531 3532 if (!kvm_s390_use_sca_entries()) { 3533 if (id < KVM_MAX_VCPUS) 3534 return true; 3535 return false; 3536 } 3537 if (id < KVM_S390_BSCA_CPU_SLOTS) 3538 return true; 3539 if (!sclp.has_esca || !sclp.has_64bscao) 3540 return false; 3541 3542 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); 3543 3544 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS; 3545 } 3546 3547 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */ 3548 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu) 3549 { 3550 WARN_ON_ONCE(vcpu->arch.cputm_start != 0); 3551 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); 3552 vcpu->arch.cputm_start = get_tod_clock_fast(); 3553 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); 3554 } 3555 3556 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */ 3557 static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu) 3558 { 3559 WARN_ON_ONCE(vcpu->arch.cputm_start == 0); 3560 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); 3561 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; 3562 vcpu->arch.cputm_start = 0; 3563 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); 3564 } 3565 3566 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */ 3567 static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu) 3568 { 3569 WARN_ON_ONCE(vcpu->arch.cputm_enabled); 3570 vcpu->arch.cputm_enabled = true; 3571 __start_cpu_timer_accounting(vcpu); 3572 } 3573 3574 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */ 3575 static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu) 3576 { 3577 WARN_ON_ONCE(!vcpu->arch.cputm_enabled); 3578 __stop_cpu_timer_accounting(vcpu); 3579 vcpu->arch.cputm_enabled = false; 3580 } 3581 3582 static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu) 3583 { 3584 preempt_disable(); /* protect from TOD sync and vcpu_load/put */ 3585 __enable_cpu_timer_accounting(vcpu); 3586 preempt_enable(); 3587 } 3588 3589 static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu) 3590 { 3591 preempt_disable(); /* protect from TOD sync and vcpu_load/put */ 3592 __disable_cpu_timer_accounting(vcpu); 3593 preempt_enable(); 3594 } 3595 3596 /* set the cpu timer - may only be called from the VCPU thread itself */ 3597 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm) 3598 { 3599 preempt_disable(); /* protect from TOD sync and vcpu_load/put */ 3600 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); 3601 if (vcpu->arch.cputm_enabled) 3602 vcpu->arch.cputm_start = get_tod_clock_fast(); 3603 vcpu->arch.sie_block->cputm = cputm; 3604 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); 3605 preempt_enable(); 3606 } 3607 3608 /* update and get the cpu timer - can also be called from other VCPU threads */ 3609 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu) 3610 { 3611 unsigned int seq; 3612 __u64 value; 3613 3614 if (unlikely(!vcpu->arch.cputm_enabled)) 3615 return vcpu->arch.sie_block->cputm; 3616 3617 preempt_disable(); /* protect from TOD sync and vcpu_load/put */ 3618 do { 3619 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount); 3620 /* 3621 * If the writer would ever execute a read in the critical 3622 * section, e.g. in irq context, we have a deadlock. 3623 */ 3624 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu); 3625 value = vcpu->arch.sie_block->cputm; 3626 /* if cputm_start is 0, accounting is being started/stopped */ 3627 if (likely(vcpu->arch.cputm_start)) 3628 value -= get_tod_clock_fast() - vcpu->arch.cputm_start; 3629 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1)); 3630 preempt_enable(); 3631 return value; 3632 } 3633 3634 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 3635 { 3636 3637 gmap_enable(vcpu->arch.enabled_gmap); 3638 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING); 3639 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) 3640 __start_cpu_timer_accounting(vcpu); 3641 vcpu->cpu = cpu; 3642 } 3643 3644 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 3645 { 3646 vcpu->cpu = -1; 3647 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) 3648 __stop_cpu_timer_accounting(vcpu); 3649 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING); 3650 vcpu->arch.enabled_gmap = gmap_get_enabled(); 3651 gmap_disable(vcpu->arch.enabled_gmap); 3652 3653 } 3654 3655 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 3656 { 3657 mutex_lock(&vcpu->kvm->lock); 3658 preempt_disable(); 3659 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; 3660 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; 3661 preempt_enable(); 3662 mutex_unlock(&vcpu->kvm->lock); 3663 if (!kvm_is_ucontrol(vcpu->kvm)) { 3664 vcpu->arch.gmap = vcpu->kvm->arch.gmap; 3665 sca_add_vcpu(vcpu); 3666 } 3667 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) 3668 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; 3669 /* make vcpu_load load the right gmap on the first trigger */ 3670 vcpu->arch.enabled_gmap = vcpu->arch.gmap; 3671 } 3672 3673 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr) 3674 { 3675 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && 3676 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo)) 3677 return true; 3678 return false; 3679 } 3680 3681 static bool kvm_has_pckmo_ecc(struct kvm *kvm) 3682 { 3683 /* At least one ECC subfunction must be present */ 3684 return kvm_has_pckmo_subfunc(kvm, 32) || 3685 kvm_has_pckmo_subfunc(kvm, 33) || 3686 kvm_has_pckmo_subfunc(kvm, 34) || 3687 kvm_has_pckmo_subfunc(kvm, 40) || 3688 kvm_has_pckmo_subfunc(kvm, 41); 3689 3690 } 3691 3692 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) 3693 { 3694 /* 3695 * If the AP instructions are not being interpreted and the MSAX3 3696 * facility is not configured for the guest, there is nothing to set up. 3697 */ 3698 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) 3699 return; 3700 3701 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; 3702 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); 3703 vcpu->arch.sie_block->eca &= ~ECA_APIE; 3704 vcpu->arch.sie_block->ecd &= ~ECD_ECC; 3705 3706 if (vcpu->kvm->arch.crypto.apie) 3707 vcpu->arch.sie_block->eca |= ECA_APIE; 3708 3709 /* Set up protected key support */ 3710 if (vcpu->kvm->arch.crypto.aes_kw) { 3711 vcpu->arch.sie_block->ecb3 |= ECB3_AES; 3712 /* ecc is also wrapped with AES key */ 3713 if (kvm_has_pckmo_ecc(vcpu->kvm)) 3714 vcpu->arch.sie_block->ecd |= ECD_ECC; 3715 } 3716 3717 if (vcpu->kvm->arch.crypto.dea_kw) 3718 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; 3719 } 3720 3721 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) 3722 { 3723 free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo)); 3724 vcpu->arch.sie_block->cbrlo = 0; 3725 } 3726 3727 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) 3728 { 3729 void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); 3730 3731 if (!cbrlo_page) 3732 return -ENOMEM; 3733 3734 vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page); 3735 return 0; 3736 } 3737 3738 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu) 3739 { 3740 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; 3741 3742 vcpu->arch.sie_block->ibc = model->ibc; 3743 if (test_kvm_facility(vcpu->kvm, 7)) 3744 vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list); 3745 } 3746 3747 static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu) 3748 { 3749 int rc = 0; 3750 u16 uvrc, uvrrc; 3751 3752 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 3753 CPUSTAT_SM | 3754 CPUSTAT_STOPPED); 3755 3756 if (test_kvm_facility(vcpu->kvm, 78)) 3757 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2); 3758 else if (test_kvm_facility(vcpu->kvm, 8)) 3759 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED); 3760 3761 kvm_s390_vcpu_setup_model(vcpu); 3762 3763 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */ 3764 if (MACHINE_HAS_ESOP) 3765 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; 3766 if (test_kvm_facility(vcpu->kvm, 9)) 3767 vcpu->arch.sie_block->ecb |= ECB_SRSI; 3768 if (test_kvm_facility(vcpu->kvm, 11)) 3769 vcpu->arch.sie_block->ecb |= ECB_PTF; 3770 if (test_kvm_facility(vcpu->kvm, 73)) 3771 vcpu->arch.sie_block->ecb |= ECB_TE; 3772 if (!kvm_is_ucontrol(vcpu->kvm)) 3773 vcpu->arch.sie_block->ecb |= ECB_SPECI; 3774 3775 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) 3776 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; 3777 if (test_kvm_facility(vcpu->kvm, 130)) 3778 vcpu->arch.sie_block->ecb2 |= ECB2_IEP; 3779 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; 3780 if (sclp.has_cei) 3781 vcpu->arch.sie_block->eca |= ECA_CEI; 3782 if (sclp.has_ib) 3783 vcpu->arch.sie_block->eca |= ECA_IB; 3784 if (sclp.has_siif) 3785 vcpu->arch.sie_block->eca |= ECA_SII; 3786 if (sclp.has_sigpif) 3787 vcpu->arch.sie_block->eca |= ECA_SIGPI; 3788 if (test_kvm_facility(vcpu->kvm, 129)) { 3789 vcpu->arch.sie_block->eca |= ECA_VX; 3790 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; 3791 } 3792 if (test_kvm_facility(vcpu->kvm, 139)) 3793 vcpu->arch.sie_block->ecd |= ECD_MEF; 3794 if (test_kvm_facility(vcpu->kvm, 156)) 3795 vcpu->arch.sie_block->ecd |= ECD_ETOKENF; 3796 if (vcpu->arch.sie_block->gd) { 3797 vcpu->arch.sie_block->eca |= ECA_AIV; 3798 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u", 3799 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); 3800 } 3801 vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC; 3802 vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb); 3803 3804 if (sclp.has_kss) 3805 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS); 3806 else 3807 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; 3808 3809 if (vcpu->kvm->arch.use_cmma) { 3810 rc = kvm_s390_vcpu_setup_cmma(vcpu); 3811 if (rc) 3812 return rc; 3813 } 3814 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3815 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; 3816 3817 vcpu->arch.sie_block->hpid = HPID_KVM; 3818 3819 kvm_s390_vcpu_crypto_setup(vcpu); 3820 3821 kvm_s390_vcpu_pci_setup(vcpu); 3822 3823 mutex_lock(&vcpu->kvm->lock); 3824 if (kvm_s390_pv_is_protected(vcpu->kvm)) { 3825 rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc); 3826 if (rc) 3827 kvm_s390_vcpu_unsetup_cmma(vcpu); 3828 } 3829 mutex_unlock(&vcpu->kvm->lock); 3830 3831 return rc; 3832 } 3833 3834 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) 3835 { 3836 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id)) 3837 return -EINVAL; 3838 return 0; 3839 } 3840 3841 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) 3842 { 3843 struct sie_page *sie_page; 3844 int rc; 3845 3846 BUILD_BUG_ON(sizeof(struct sie_page) != 4096); 3847 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT); 3848 if (!sie_page) 3849 return -ENOMEM; 3850 3851 vcpu->arch.sie_block = &sie_page->sie_block; 3852 vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb); 3853 3854 /* the real guest size will always be smaller than msl */ 3855 vcpu->arch.sie_block->mso = 0; 3856 vcpu->arch.sie_block->msl = sclp.hamax; 3857 3858 vcpu->arch.sie_block->icpua = vcpu->vcpu_id; 3859 spin_lock_init(&vcpu->arch.local_int.lock); 3860 vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm); 3861 seqcount_init(&vcpu->arch.cputm_seqcount); 3862 3863 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 3864 kvm_clear_async_pf_completion_queue(vcpu); 3865 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | 3866 KVM_SYNC_GPRS | 3867 KVM_SYNC_ACRS | 3868 KVM_SYNC_CRS | 3869 KVM_SYNC_ARCH0 | 3870 KVM_SYNC_PFAULT | 3871 KVM_SYNC_DIAG318; 3872 kvm_s390_set_prefix(vcpu, 0); 3873 if (test_kvm_facility(vcpu->kvm, 64)) 3874 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; 3875 if (test_kvm_facility(vcpu->kvm, 82)) 3876 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC; 3877 if (test_kvm_facility(vcpu->kvm, 133)) 3878 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; 3879 if (test_kvm_facility(vcpu->kvm, 156)) 3880 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN; 3881 /* fprs can be synchronized via vrs, even if the guest has no vx. With 3882 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format. 3883 */ 3884 if (MACHINE_HAS_VX) 3885 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; 3886 else 3887 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS; 3888 3889 if (kvm_is_ucontrol(vcpu->kvm)) { 3890 rc = __kvm_ucontrol_vcpu_init(vcpu); 3891 if (rc) 3892 goto out_free_sie_block; 3893 } 3894 3895 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", 3896 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); 3897 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); 3898 3899 rc = kvm_s390_vcpu_setup(vcpu); 3900 if (rc) 3901 goto out_ucontrol_uninit; 3902 3903 kvm_s390_update_topology_change_report(vcpu->kvm, 1); 3904 return 0; 3905 3906 out_ucontrol_uninit: 3907 if (kvm_is_ucontrol(vcpu->kvm)) 3908 gmap_remove(vcpu->arch.gmap); 3909 out_free_sie_block: 3910 free_page((unsigned long)(vcpu->arch.sie_block)); 3911 return rc; 3912 } 3913 3914 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 3915 { 3916 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); 3917 return kvm_s390_vcpu_has_irq(vcpu, 0); 3918 } 3919 3920 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 3921 { 3922 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE); 3923 } 3924 3925 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) 3926 { 3927 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 3928 exit_sie(vcpu); 3929 } 3930 3931 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu) 3932 { 3933 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 3934 } 3935 3936 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) 3937 { 3938 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); 3939 exit_sie(vcpu); 3940 } 3941 3942 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu) 3943 { 3944 return atomic_read(&vcpu->arch.sie_block->prog20) & 3945 (PROG_BLOCK_SIE | PROG_REQUEST); 3946 } 3947 3948 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) 3949 { 3950 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); 3951 } 3952 3953 /* 3954 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running. 3955 * If the CPU is not running (e.g. waiting as idle) the function will 3956 * return immediately. */ 3957 void exit_sie(struct kvm_vcpu *vcpu) 3958 { 3959 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT); 3960 kvm_s390_vsie_kick(vcpu); 3961 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) 3962 cpu_relax(); 3963 } 3964 3965 /* Kick a guest cpu out of SIE to process a request synchronously */ 3966 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu) 3967 { 3968 __kvm_make_request(req, vcpu); 3969 kvm_s390_vcpu_request(vcpu); 3970 } 3971 3972 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, 3973 unsigned long end) 3974 { 3975 struct kvm *kvm = gmap->private; 3976 struct kvm_vcpu *vcpu; 3977 unsigned long prefix; 3978 unsigned long i; 3979 3980 if (gmap_is_shadow(gmap)) 3981 return; 3982 if (start >= 1UL << 31) 3983 /* We are only interested in prefix pages */ 3984 return; 3985 kvm_for_each_vcpu(i, vcpu, kvm) { 3986 /* match against both prefix pages */ 3987 prefix = kvm_s390_get_prefix(vcpu); 3988 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) { 3989 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx", 3990 start, end); 3991 kvm_s390_sync_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu); 3992 } 3993 } 3994 } 3995 3996 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) 3997 { 3998 /* do not poll with more than halt_poll_max_steal percent of steal time */ 3999 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >= 4000 READ_ONCE(halt_poll_max_steal)) { 4001 vcpu->stat.halt_no_poll_steal++; 4002 return true; 4003 } 4004 return false; 4005 } 4006 4007 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 4008 { 4009 /* kvm common code refers to this, but never calls it */ 4010 BUG(); 4011 return 0; 4012 } 4013 4014 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 4015 struct kvm_one_reg *reg) 4016 { 4017 int r = -EINVAL; 4018 4019 switch (reg->id) { 4020 case KVM_REG_S390_TODPR: 4021 r = put_user(vcpu->arch.sie_block->todpr, 4022 (u32 __user *)reg->addr); 4023 break; 4024 case KVM_REG_S390_EPOCHDIFF: 4025 r = put_user(vcpu->arch.sie_block->epoch, 4026 (u64 __user *)reg->addr); 4027 break; 4028 case KVM_REG_S390_CPU_TIMER: 4029 r = put_user(kvm_s390_get_cpu_timer(vcpu), 4030 (u64 __user *)reg->addr); 4031 break; 4032 case KVM_REG_S390_CLOCK_COMP: 4033 r = put_user(vcpu->arch.sie_block->ckc, 4034 (u64 __user *)reg->addr); 4035 break; 4036 case KVM_REG_S390_PFTOKEN: 4037 r = put_user(vcpu->arch.pfault_token, 4038 (u64 __user *)reg->addr); 4039 break; 4040 case KVM_REG_S390_PFCOMPARE: 4041 r = put_user(vcpu->arch.pfault_compare, 4042 (u64 __user *)reg->addr); 4043 break; 4044 case KVM_REG_S390_PFSELECT: 4045 r = put_user(vcpu->arch.pfault_select, 4046 (u64 __user *)reg->addr); 4047 break; 4048 case KVM_REG_S390_PP: 4049 r = put_user(vcpu->arch.sie_block->pp, 4050 (u64 __user *)reg->addr); 4051 break; 4052 case KVM_REG_S390_GBEA: 4053 r = put_user(vcpu->arch.sie_block->gbea, 4054 (u64 __user *)reg->addr); 4055 break; 4056 default: 4057 break; 4058 } 4059 4060 return r; 4061 } 4062 4063 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 4064 struct kvm_one_reg *reg) 4065 { 4066 int r = -EINVAL; 4067 __u64 val; 4068 4069 switch (reg->id) { 4070 case KVM_REG_S390_TODPR: 4071 r = get_user(vcpu->arch.sie_block->todpr, 4072 (u32 __user *)reg->addr); 4073 break; 4074 case KVM_REG_S390_EPOCHDIFF: 4075 r = get_user(vcpu->arch.sie_block->epoch, 4076 (u64 __user *)reg->addr); 4077 break; 4078 case KVM_REG_S390_CPU_TIMER: 4079 r = get_user(val, (u64 __user *)reg->addr); 4080 if (!r) 4081 kvm_s390_set_cpu_timer(vcpu, val); 4082 break; 4083 case KVM_REG_S390_CLOCK_COMP: 4084 r = get_user(vcpu->arch.sie_block->ckc, 4085 (u64 __user *)reg->addr); 4086 break; 4087 case KVM_REG_S390_PFTOKEN: 4088 r = get_user(vcpu->arch.pfault_token, 4089 (u64 __user *)reg->addr); 4090 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 4091 kvm_clear_async_pf_completion_queue(vcpu); 4092 break; 4093 case KVM_REG_S390_PFCOMPARE: 4094 r = get_user(vcpu->arch.pfault_compare, 4095 (u64 __user *)reg->addr); 4096 break; 4097 case KVM_REG_S390_PFSELECT: 4098 r = get_user(vcpu->arch.pfault_select, 4099 (u64 __user *)reg->addr); 4100 break; 4101 case KVM_REG_S390_PP: 4102 r = get_user(vcpu->arch.sie_block->pp, 4103 (u64 __user *)reg->addr); 4104 break; 4105 case KVM_REG_S390_GBEA: 4106 r = get_user(vcpu->arch.sie_block->gbea, 4107 (u64 __user *)reg->addr); 4108 break; 4109 default: 4110 break; 4111 } 4112 4113 return r; 4114 } 4115 4116 static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu) 4117 { 4118 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI; 4119 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 4120 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb)); 4121 4122 kvm_clear_async_pf_completion_queue(vcpu); 4123 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 4124 kvm_s390_vcpu_stop(vcpu); 4125 kvm_s390_clear_local_irqs(vcpu); 4126 } 4127 4128 static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) 4129 { 4130 /* Initial reset is a superset of the normal reset */ 4131 kvm_arch_vcpu_ioctl_normal_reset(vcpu); 4132 4133 /* 4134 * This equals initial cpu reset in pop, but we don't switch to ESA. 4135 * We do not only reset the internal data, but also ... 4136 */ 4137 vcpu->arch.sie_block->gpsw.mask = 0; 4138 vcpu->arch.sie_block->gpsw.addr = 0; 4139 kvm_s390_set_prefix(vcpu, 0); 4140 kvm_s390_set_cpu_timer(vcpu, 0); 4141 vcpu->arch.sie_block->ckc = 0; 4142 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr)); 4143 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK; 4144 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK; 4145 4146 /* ... the data in sync regs */ 4147 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs)); 4148 vcpu->run->s.regs.ckc = 0; 4149 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK; 4150 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK; 4151 vcpu->run->psw_addr = 0; 4152 vcpu->run->psw_mask = 0; 4153 vcpu->run->s.regs.todpr = 0; 4154 vcpu->run->s.regs.cputm = 0; 4155 vcpu->run->s.regs.ckc = 0; 4156 vcpu->run->s.regs.pp = 0; 4157 vcpu->run->s.regs.gbea = 1; 4158 vcpu->run->s.regs.fpc = 0; 4159 /* 4160 * Do not reset these registers in the protected case, as some of 4161 * them are overlayed and they are not accessible in this case 4162 * anyway. 4163 */ 4164 if (!kvm_s390_pv_cpu_is_protected(vcpu)) { 4165 vcpu->arch.sie_block->gbea = 1; 4166 vcpu->arch.sie_block->pp = 0; 4167 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; 4168 vcpu->arch.sie_block->todpr = 0; 4169 } 4170 } 4171 4172 static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu) 4173 { 4174 struct kvm_sync_regs *regs = &vcpu->run->s.regs; 4175 4176 /* Clear reset is a superset of the initial reset */ 4177 kvm_arch_vcpu_ioctl_initial_reset(vcpu); 4178 4179 memset(®s->gprs, 0, sizeof(regs->gprs)); 4180 memset(®s->vrs, 0, sizeof(regs->vrs)); 4181 memset(®s->acrs, 0, sizeof(regs->acrs)); 4182 memset(®s->gscb, 0, sizeof(regs->gscb)); 4183 4184 regs->etoken = 0; 4185 regs->etoken_extension = 0; 4186 } 4187 4188 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 4189 { 4190 vcpu_load(vcpu); 4191 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); 4192 vcpu_put(vcpu); 4193 return 0; 4194 } 4195 4196 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 4197 { 4198 vcpu_load(vcpu); 4199 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); 4200 vcpu_put(vcpu); 4201 return 0; 4202 } 4203 4204 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 4205 struct kvm_sregs *sregs) 4206 { 4207 vcpu_load(vcpu); 4208 4209 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); 4210 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); 4211 4212 vcpu_put(vcpu); 4213 return 0; 4214 } 4215 4216 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 4217 struct kvm_sregs *sregs) 4218 { 4219 vcpu_load(vcpu); 4220 4221 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); 4222 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); 4223 4224 vcpu_put(vcpu); 4225 return 0; 4226 } 4227 4228 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 4229 { 4230 int ret = 0; 4231 4232 vcpu_load(vcpu); 4233 4234 if (test_fp_ctl(fpu->fpc)) { 4235 ret = -EINVAL; 4236 goto out; 4237 } 4238 vcpu->run->s.regs.fpc = fpu->fpc; 4239 if (MACHINE_HAS_VX) 4240 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs, 4241 (freg_t *) fpu->fprs); 4242 else 4243 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 4244 4245 out: 4246 vcpu_put(vcpu); 4247 return ret; 4248 } 4249 4250 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 4251 { 4252 vcpu_load(vcpu); 4253 4254 /* make sure we have the latest values */ 4255 save_fpu_regs(); 4256 if (MACHINE_HAS_VX) 4257 convert_vx_to_fp((freg_t *) fpu->fprs, 4258 (__vector128 *) vcpu->run->s.regs.vrs); 4259 else 4260 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs)); 4261 fpu->fpc = vcpu->run->s.regs.fpc; 4262 4263 vcpu_put(vcpu); 4264 return 0; 4265 } 4266 4267 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) 4268 { 4269 int rc = 0; 4270 4271 if (!is_vcpu_stopped(vcpu)) 4272 rc = -EBUSY; 4273 else { 4274 vcpu->run->psw_mask = psw.mask; 4275 vcpu->run->psw_addr = psw.addr; 4276 } 4277 return rc; 4278 } 4279 4280 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 4281 struct kvm_translation *tr) 4282 { 4283 return -EINVAL; /* not implemented yet */ 4284 } 4285 4286 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \ 4287 KVM_GUESTDBG_USE_HW_BP | \ 4288 KVM_GUESTDBG_ENABLE) 4289 4290 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 4291 struct kvm_guest_debug *dbg) 4292 { 4293 int rc = 0; 4294 4295 vcpu_load(vcpu); 4296 4297 vcpu->guest_debug = 0; 4298 kvm_s390_clear_bp_data(vcpu); 4299 4300 if (dbg->control & ~VALID_GUESTDBG_FLAGS) { 4301 rc = -EINVAL; 4302 goto out; 4303 } 4304 if (!sclp.has_gpere) { 4305 rc = -EINVAL; 4306 goto out; 4307 } 4308 4309 if (dbg->control & KVM_GUESTDBG_ENABLE) { 4310 vcpu->guest_debug = dbg->control; 4311 /* enforce guest PER */ 4312 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P); 4313 4314 if (dbg->control & KVM_GUESTDBG_USE_HW_BP) 4315 rc = kvm_s390_import_bp_data(vcpu, dbg); 4316 } else { 4317 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P); 4318 vcpu->arch.guestdbg.last_bp = 0; 4319 } 4320 4321 if (rc) { 4322 vcpu->guest_debug = 0; 4323 kvm_s390_clear_bp_data(vcpu); 4324 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P); 4325 } 4326 4327 out: 4328 vcpu_put(vcpu); 4329 return rc; 4330 } 4331 4332 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 4333 struct kvm_mp_state *mp_state) 4334 { 4335 int ret; 4336 4337 vcpu_load(vcpu); 4338 4339 /* CHECK_STOP and LOAD are not supported yet */ 4340 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : 4341 KVM_MP_STATE_OPERATING; 4342 4343 vcpu_put(vcpu); 4344 return ret; 4345 } 4346 4347 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 4348 struct kvm_mp_state *mp_state) 4349 { 4350 int rc = 0; 4351 4352 vcpu_load(vcpu); 4353 4354 /* user space knows about this interface - let it control the state */ 4355 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm); 4356 4357 switch (mp_state->mp_state) { 4358 case KVM_MP_STATE_STOPPED: 4359 rc = kvm_s390_vcpu_stop(vcpu); 4360 break; 4361 case KVM_MP_STATE_OPERATING: 4362 rc = kvm_s390_vcpu_start(vcpu); 4363 break; 4364 case KVM_MP_STATE_LOAD: 4365 if (!kvm_s390_pv_cpu_is_protected(vcpu)) { 4366 rc = -ENXIO; 4367 break; 4368 } 4369 rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD); 4370 break; 4371 case KVM_MP_STATE_CHECK_STOP: 4372 fallthrough; /* CHECK_STOP and LOAD are not supported yet */ 4373 default: 4374 rc = -ENXIO; 4375 } 4376 4377 vcpu_put(vcpu); 4378 return rc; 4379 } 4380 4381 static bool ibs_enabled(struct kvm_vcpu *vcpu) 4382 { 4383 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS); 4384 } 4385 4386 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 4387 { 4388 retry: 4389 kvm_s390_vcpu_request_handled(vcpu); 4390 if (!kvm_request_pending(vcpu)) 4391 return 0; 4392 /* 4393 * If the guest prefix changed, re-arm the ipte notifier for the 4394 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock. 4395 * This ensures that the ipte instruction for this request has 4396 * already finished. We might race against a second unmapper that 4397 * wants to set the blocking bit. Lets just retry the request loop. 4398 */ 4399 if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) { 4400 int rc; 4401 rc = gmap_mprotect_notify(vcpu->arch.gmap, 4402 kvm_s390_get_prefix(vcpu), 4403 PAGE_SIZE * 2, PROT_WRITE); 4404 if (rc) { 4405 kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu); 4406 return rc; 4407 } 4408 goto retry; 4409 } 4410 4411 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 4412 vcpu->arch.sie_block->ihcpu = 0xffff; 4413 goto retry; 4414 } 4415 4416 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { 4417 if (!ibs_enabled(vcpu)) { 4418 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); 4419 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS); 4420 } 4421 goto retry; 4422 } 4423 4424 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { 4425 if (ibs_enabled(vcpu)) { 4426 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); 4427 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS); 4428 } 4429 goto retry; 4430 } 4431 4432 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) { 4433 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; 4434 goto retry; 4435 } 4436 4437 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) { 4438 /* 4439 * Disable CMM virtualization; we will emulate the ESSA 4440 * instruction manually, in order to provide additional 4441 * functionalities needed for live migration. 4442 */ 4443 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA; 4444 goto retry; 4445 } 4446 4447 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) { 4448 /* 4449 * Re-enable CMM virtualization if CMMA is available and 4450 * CMM has been used. 4451 */ 4452 if ((vcpu->kvm->arch.use_cmma) && 4453 (vcpu->kvm->mm->context.uses_cmm)) 4454 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; 4455 goto retry; 4456 } 4457 4458 /* we left the vsie handler, nothing to do, just clear the request */ 4459 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu); 4460 4461 return 0; 4462 } 4463 4464 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) 4465 { 4466 struct kvm_vcpu *vcpu; 4467 union tod_clock clk; 4468 unsigned long i; 4469 4470 preempt_disable(); 4471 4472 store_tod_clock_ext(&clk); 4473 4474 kvm->arch.epoch = gtod->tod - clk.tod; 4475 kvm->arch.epdx = 0; 4476 if (test_kvm_facility(kvm, 139)) { 4477 kvm->arch.epdx = gtod->epoch_idx - clk.ei; 4478 if (kvm->arch.epoch > gtod->tod) 4479 kvm->arch.epdx -= 1; 4480 } 4481 4482 kvm_s390_vcpu_block_all(kvm); 4483 kvm_for_each_vcpu(i, vcpu, kvm) { 4484 vcpu->arch.sie_block->epoch = kvm->arch.epoch; 4485 vcpu->arch.sie_block->epdx = kvm->arch.epdx; 4486 } 4487 4488 kvm_s390_vcpu_unblock_all(kvm); 4489 preempt_enable(); 4490 } 4491 4492 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) 4493 { 4494 if (!mutex_trylock(&kvm->lock)) 4495 return 0; 4496 __kvm_s390_set_tod_clock(kvm, gtod); 4497 mutex_unlock(&kvm->lock); 4498 return 1; 4499 } 4500 4501 /** 4502 * kvm_arch_fault_in_page - fault-in guest page if necessary 4503 * @vcpu: The corresponding virtual cpu 4504 * @gpa: Guest physical address 4505 * @writable: Whether the page should be writable or not 4506 * 4507 * Make sure that a guest page has been faulted-in on the host. 4508 * 4509 * Return: Zero on success, negative error code otherwise. 4510 */ 4511 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) 4512 { 4513 return gmap_fault(vcpu->arch.gmap, gpa, 4514 writable ? FAULT_FLAG_WRITE : 0); 4515 } 4516 4517 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, 4518 unsigned long token) 4519 { 4520 struct kvm_s390_interrupt inti; 4521 struct kvm_s390_irq irq; 4522 4523 if (start_token) { 4524 irq.u.ext.ext_params2 = token; 4525 irq.type = KVM_S390_INT_PFAULT_INIT; 4526 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq)); 4527 } else { 4528 inti.type = KVM_S390_INT_PFAULT_DONE; 4529 inti.parm64 = token; 4530 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); 4531 } 4532 } 4533 4534 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 4535 struct kvm_async_pf *work) 4536 { 4537 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); 4538 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); 4539 4540 return true; 4541 } 4542 4543 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 4544 struct kvm_async_pf *work) 4545 { 4546 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); 4547 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); 4548 } 4549 4550 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 4551 struct kvm_async_pf *work) 4552 { 4553 /* s390 will always inject the page directly */ 4554 } 4555 4556 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) 4557 { 4558 /* 4559 * s390 will always inject the page directly, 4560 * but we still want check_async_completion to cleanup 4561 */ 4562 return true; 4563 } 4564 4565 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) 4566 { 4567 hva_t hva; 4568 struct kvm_arch_async_pf arch; 4569 4570 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 4571 return false; 4572 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != 4573 vcpu->arch.pfault_compare) 4574 return false; 4575 if (psw_extint_disabled(vcpu)) 4576 return false; 4577 if (kvm_s390_vcpu_has_irq(vcpu, 0)) 4578 return false; 4579 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) 4580 return false; 4581 if (!vcpu->arch.gmap->pfault_enabled) 4582 return false; 4583 4584 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); 4585 hva += current->thread.gmap_addr & ~PAGE_MASK; 4586 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) 4587 return false; 4588 4589 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); 4590 } 4591 4592 static int vcpu_pre_run(struct kvm_vcpu *vcpu) 4593 { 4594 int rc, cpuflags; 4595 4596 /* 4597 * On s390 notifications for arriving pages will be delivered directly 4598 * to the guest but the house keeping for completed pfaults is 4599 * handled outside the worker. 4600 */ 4601 kvm_check_async_pf_completion(vcpu); 4602 4603 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14]; 4604 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15]; 4605 4606 if (need_resched()) 4607 schedule(); 4608 4609 if (!kvm_is_ucontrol(vcpu->kvm)) { 4610 rc = kvm_s390_deliver_pending_interrupts(vcpu); 4611 if (rc) 4612 return rc; 4613 } 4614 4615 rc = kvm_s390_handle_requests(vcpu); 4616 if (rc) 4617 return rc; 4618 4619 if (guestdbg_enabled(vcpu)) { 4620 kvm_s390_backup_guest_per_regs(vcpu); 4621 kvm_s390_patch_guest_per_regs(vcpu); 4622 } 4623 4624 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); 4625 4626 vcpu->arch.sie_block->icptcode = 0; 4627 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); 4628 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); 4629 trace_kvm_s390_sie_enter(vcpu, cpuflags); 4630 4631 return 0; 4632 } 4633 4634 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) 4635 { 4636 struct kvm_s390_pgm_info pgm_info = { 4637 .code = PGM_ADDRESSING, 4638 }; 4639 u8 opcode, ilen; 4640 int rc; 4641 4642 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 4643 trace_kvm_s390_sie_fault(vcpu); 4644 4645 /* 4646 * We want to inject an addressing exception, which is defined as a 4647 * suppressing or terminating exception. However, since we came here 4648 * by a DAT access exception, the PSW still points to the faulting 4649 * instruction since DAT exceptions are nullifying. So we've got 4650 * to look up the current opcode to get the length of the instruction 4651 * to be able to forward the PSW. 4652 */ 4653 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1); 4654 ilen = insn_length(opcode); 4655 if (rc < 0) { 4656 return rc; 4657 } else if (rc) { 4658 /* Instruction-Fetching Exceptions - we can't detect the ilen. 4659 * Forward by arbitrary ilc, injection will take care of 4660 * nullification if necessary. 4661 */ 4662 pgm_info = vcpu->arch.pgm; 4663 ilen = 4; 4664 } 4665 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID; 4666 kvm_s390_forward_psw(vcpu, ilen); 4667 return kvm_s390_inject_prog_irq(vcpu, &pgm_info); 4668 } 4669 4670 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) 4671 { 4672 struct mcck_volatile_info *mcck_info; 4673 struct sie_page *sie_page; 4674 4675 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 4676 vcpu->arch.sie_block->icptcode); 4677 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 4678 4679 if (guestdbg_enabled(vcpu)) 4680 kvm_s390_restore_guest_per_regs(vcpu); 4681 4682 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14; 4683 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15; 4684 4685 if (exit_reason == -EINTR) { 4686 VCPU_EVENT(vcpu, 3, "%s", "machine check"); 4687 sie_page = container_of(vcpu->arch.sie_block, 4688 struct sie_page, sie_block); 4689 mcck_info = &sie_page->mcck_info; 4690 kvm_s390_reinject_machine_check(vcpu, mcck_info); 4691 return 0; 4692 } 4693 4694 if (vcpu->arch.sie_block->icptcode > 0) { 4695 int rc = kvm_handle_sie_intercept(vcpu); 4696 4697 if (rc != -EOPNOTSUPP) 4698 return rc; 4699 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC; 4700 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 4701 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; 4702 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; 4703 return -EREMOTE; 4704 } else if (exit_reason != -EFAULT) { 4705 vcpu->stat.exit_null++; 4706 return 0; 4707 } else if (kvm_is_ucontrol(vcpu->kvm)) { 4708 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; 4709 vcpu->run->s390_ucontrol.trans_exc_code = 4710 current->thread.gmap_addr; 4711 vcpu->run->s390_ucontrol.pgm_code = 0x10; 4712 return -EREMOTE; 4713 } else if (current->thread.gmap_pfault) { 4714 trace_kvm_s390_major_guest_pfault(vcpu); 4715 current->thread.gmap_pfault = 0; 4716 if (kvm_arch_setup_async_pf(vcpu)) 4717 return 0; 4718 vcpu->stat.pfault_sync++; 4719 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1); 4720 } 4721 return vcpu_post_run_fault_in_sie(vcpu); 4722 } 4723 4724 #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK) 4725 static int __vcpu_run(struct kvm_vcpu *vcpu) 4726 { 4727 int rc, exit_reason; 4728 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block; 4729 4730 /* 4731 * We try to hold kvm->srcu during most of vcpu_run (except when run- 4732 * ning the guest), so that memslots (and other stuff) are protected 4733 */ 4734 kvm_vcpu_srcu_read_lock(vcpu); 4735 4736 do { 4737 rc = vcpu_pre_run(vcpu); 4738 if (rc) 4739 break; 4740 4741 kvm_vcpu_srcu_read_unlock(vcpu); 4742 /* 4743 * As PF_VCPU will be used in fault handler, between 4744 * guest_enter and guest_exit should be no uaccess. 4745 */ 4746 local_irq_disable(); 4747 guest_enter_irqoff(); 4748 __disable_cpu_timer_accounting(vcpu); 4749 local_irq_enable(); 4750 if (kvm_s390_pv_cpu_is_protected(vcpu)) { 4751 memcpy(sie_page->pv_grregs, 4752 vcpu->run->s.regs.gprs, 4753 sizeof(sie_page->pv_grregs)); 4754 } 4755 if (test_cpu_flag(CIF_FPU)) 4756 load_fpu_regs(); 4757 exit_reason = sie64a(vcpu->arch.sie_block, 4758 vcpu->run->s.regs.gprs); 4759 if (kvm_s390_pv_cpu_is_protected(vcpu)) { 4760 memcpy(vcpu->run->s.regs.gprs, 4761 sie_page->pv_grregs, 4762 sizeof(sie_page->pv_grregs)); 4763 /* 4764 * We're not allowed to inject interrupts on intercepts 4765 * that leave the guest state in an "in-between" state 4766 * where the next SIE entry will do a continuation. 4767 * Fence interrupts in our "internal" PSW. 4768 */ 4769 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR || 4770 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) { 4771 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; 4772 } 4773 } 4774 local_irq_disable(); 4775 __enable_cpu_timer_accounting(vcpu); 4776 guest_exit_irqoff(); 4777 local_irq_enable(); 4778 kvm_vcpu_srcu_read_lock(vcpu); 4779 4780 rc = vcpu_post_run(vcpu, exit_reason); 4781 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); 4782 4783 kvm_vcpu_srcu_read_unlock(vcpu); 4784 return rc; 4785 } 4786 4787 static void sync_regs_fmt2(struct kvm_vcpu *vcpu) 4788 { 4789 struct kvm_run *kvm_run = vcpu->run; 4790 struct runtime_instr_cb *riccb; 4791 struct gs_cb *gscb; 4792 4793 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; 4794 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb; 4795 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; 4796 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; 4797 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { 4798 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; 4799 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; 4800 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; 4801 } 4802 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { 4803 vcpu->arch.pfault_token = kvm_run->s.regs.pft; 4804 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; 4805 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; 4806 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 4807 kvm_clear_async_pf_completion_queue(vcpu); 4808 } 4809 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) { 4810 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; 4811 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; 4812 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc); 4813 } 4814 /* 4815 * If userspace sets the riccb (e.g. after migration) to a valid state, 4816 * we should enable RI here instead of doing the lazy enablement. 4817 */ 4818 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) && 4819 test_kvm_facility(vcpu->kvm, 64) && 4820 riccb->v && 4821 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { 4822 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)"); 4823 vcpu->arch.sie_block->ecb3 |= ECB3_RI; 4824 } 4825 /* 4826 * If userspace sets the gscb (e.g. after migration) to non-zero, 4827 * we should enable GS here instead of doing the lazy enablement. 4828 */ 4829 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) && 4830 test_kvm_facility(vcpu->kvm, 133) && 4831 gscb->gssm && 4832 !vcpu->arch.gs_enabled) { 4833 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)"); 4834 vcpu->arch.sie_block->ecb |= ECB_GS; 4835 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; 4836 vcpu->arch.gs_enabled = 1; 4837 } 4838 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) && 4839 test_kvm_facility(vcpu->kvm, 82)) { 4840 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; 4841 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; 4842 } 4843 if (MACHINE_HAS_GS) { 4844 preempt_disable(); 4845 __ctl_set_bit(2, 4); 4846 if (current->thread.gs_cb) { 4847 vcpu->arch.host_gscb = current->thread.gs_cb; 4848 save_gs_cb(vcpu->arch.host_gscb); 4849 } 4850 if (vcpu->arch.gs_enabled) { 4851 current->thread.gs_cb = (struct gs_cb *) 4852 &vcpu->run->s.regs.gscb; 4853 restore_gs_cb(current->thread.gs_cb); 4854 } 4855 preempt_enable(); 4856 } 4857 /* SIE will load etoken directly from SDNX and therefore kvm_run */ 4858 } 4859 4860 static void sync_regs(struct kvm_vcpu *vcpu) 4861 { 4862 struct kvm_run *kvm_run = vcpu->run; 4863 4864 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) 4865 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); 4866 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { 4867 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); 4868 /* some control register changes require a tlb flush */ 4869 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 4870 } 4871 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { 4872 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm); 4873 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; 4874 } 4875 save_access_regs(vcpu->arch.host_acrs); 4876 restore_access_regs(vcpu->run->s.regs.acrs); 4877 /* save host (userspace) fprs/vrs */ 4878 save_fpu_regs(); 4879 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; 4880 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; 4881 if (MACHINE_HAS_VX) 4882 current->thread.fpu.regs = vcpu->run->s.regs.vrs; 4883 else 4884 current->thread.fpu.regs = vcpu->run->s.regs.fprs; 4885 current->thread.fpu.fpc = vcpu->run->s.regs.fpc; 4886 if (test_fp_ctl(current->thread.fpu.fpc)) 4887 /* User space provided an invalid FPC, let's clear it */ 4888 current->thread.fpu.fpc = 0; 4889 4890 /* Sync fmt2 only data */ 4891 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) { 4892 sync_regs_fmt2(vcpu); 4893 } else { 4894 /* 4895 * In several places we have to modify our internal view to 4896 * not do things that are disallowed by the ultravisor. For 4897 * example we must not inject interrupts after specific exits 4898 * (e.g. 112 prefix page not secure). We do this by turning 4899 * off the machine check, external and I/O interrupt bits 4900 * of our PSW copy. To avoid getting validity intercepts, we 4901 * do only accept the condition code from userspace. 4902 */ 4903 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC; 4904 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask & 4905 PSW_MASK_CC; 4906 } 4907 4908 kvm_run->kvm_dirty_regs = 0; 4909 } 4910 4911 static void store_regs_fmt2(struct kvm_vcpu *vcpu) 4912 { 4913 struct kvm_run *kvm_run = vcpu->run; 4914 4915 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; 4916 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; 4917 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; 4918 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; 4919 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; 4920 if (MACHINE_HAS_GS) { 4921 preempt_disable(); 4922 __ctl_set_bit(2, 4); 4923 if (vcpu->arch.gs_enabled) 4924 save_gs_cb(current->thread.gs_cb); 4925 current->thread.gs_cb = vcpu->arch.host_gscb; 4926 restore_gs_cb(vcpu->arch.host_gscb); 4927 if (!vcpu->arch.host_gscb) 4928 __ctl_clear_bit(2, 4); 4929 vcpu->arch.host_gscb = NULL; 4930 preempt_enable(); 4931 } 4932 /* SIE will save etoken directly into SDNX and therefore kvm_run */ 4933 } 4934 4935 static void store_regs(struct kvm_vcpu *vcpu) 4936 { 4937 struct kvm_run *kvm_run = vcpu->run; 4938 4939 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; 4940 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; 4941 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); 4942 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); 4943 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu); 4944 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; 4945 kvm_run->s.regs.pft = vcpu->arch.pfault_token; 4946 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; 4947 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; 4948 save_access_regs(vcpu->run->s.regs.acrs); 4949 restore_access_regs(vcpu->arch.host_acrs); 4950 /* Save guest register state */ 4951 save_fpu_regs(); 4952 vcpu->run->s.regs.fpc = current->thread.fpu.fpc; 4953 /* Restore will be done lazily at return */ 4954 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; 4955 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; 4956 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) 4957 store_regs_fmt2(vcpu); 4958 } 4959 4960 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) 4961 { 4962 struct kvm_run *kvm_run = vcpu->run; 4963 int rc; 4964 4965 /* 4966 * Running a VM while dumping always has the potential to 4967 * produce inconsistent dump data. But for PV vcpus a SIE 4968 * entry while dumping could also lead to a fatal validity 4969 * intercept which we absolutely want to avoid. 4970 */ 4971 if (vcpu->kvm->arch.pv.dumping) 4972 return -EINVAL; 4973 4974 if (kvm_run->immediate_exit) 4975 return -EINTR; 4976 4977 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS || 4978 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS) 4979 return -EINVAL; 4980 4981 vcpu_load(vcpu); 4982 4983 if (guestdbg_exit_pending(vcpu)) { 4984 kvm_s390_prepare_debug_exit(vcpu); 4985 rc = 0; 4986 goto out; 4987 } 4988 4989 kvm_sigset_activate(vcpu); 4990 4991 /* 4992 * no need to check the return value of vcpu_start as it can only have 4993 * an error for protvirt, but protvirt means user cpu state 4994 */ 4995 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { 4996 kvm_s390_vcpu_start(vcpu); 4997 } else if (is_vcpu_stopped(vcpu)) { 4998 pr_err_ratelimited("can't run stopped vcpu %d\n", 4999 vcpu->vcpu_id); 5000 rc = -EINVAL; 5001 goto out; 5002 } 5003 5004 sync_regs(vcpu); 5005 enable_cpu_timer_accounting(vcpu); 5006 5007 might_fault(); 5008 rc = __vcpu_run(vcpu); 5009 5010 if (signal_pending(current) && !rc) { 5011 kvm_run->exit_reason = KVM_EXIT_INTR; 5012 rc = -EINTR; 5013 } 5014 5015 if (guestdbg_exit_pending(vcpu) && !rc) { 5016 kvm_s390_prepare_debug_exit(vcpu); 5017 rc = 0; 5018 } 5019 5020 if (rc == -EREMOTE) { 5021 /* userspace support is needed, kvm_run has been prepared */ 5022 rc = 0; 5023 } 5024 5025 disable_cpu_timer_accounting(vcpu); 5026 store_regs(vcpu); 5027 5028 kvm_sigset_deactivate(vcpu); 5029 5030 vcpu->stat.exit_userspace++; 5031 out: 5032 vcpu_put(vcpu); 5033 return rc; 5034 } 5035 5036 /* 5037 * store status at address 5038 * we use have two special cases: 5039 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit 5040 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix 5041 */ 5042 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) 5043 { 5044 unsigned char archmode = 1; 5045 freg_t fprs[NUM_FPRS]; 5046 unsigned int px; 5047 u64 clkcomp, cputm; 5048 int rc; 5049 5050 px = kvm_s390_get_prefix(vcpu); 5051 if (gpa == KVM_S390_STORE_STATUS_NOADDR) { 5052 if (write_guest_abs(vcpu, 163, &archmode, 1)) 5053 return -EFAULT; 5054 gpa = 0; 5055 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { 5056 if (write_guest_real(vcpu, 163, &archmode, 1)) 5057 return -EFAULT; 5058 gpa = px; 5059 } else 5060 gpa -= __LC_FPREGS_SAVE_AREA; 5061 5062 /* manually convert vector registers if necessary */ 5063 if (MACHINE_HAS_VX) { 5064 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); 5065 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, 5066 fprs, 128); 5067 } else { 5068 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, 5069 vcpu->run->s.regs.fprs, 128); 5070 } 5071 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA, 5072 vcpu->run->s.regs.gprs, 128); 5073 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA, 5074 &vcpu->arch.sie_block->gpsw, 16); 5075 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA, 5076 &px, 4); 5077 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA, 5078 &vcpu->run->s.regs.fpc, 4); 5079 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA, 5080 &vcpu->arch.sie_block->todpr, 4); 5081 cputm = kvm_s390_get_cpu_timer(vcpu); 5082 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA, 5083 &cputm, 8); 5084 clkcomp = vcpu->arch.sie_block->ckc >> 8; 5085 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA, 5086 &clkcomp, 8); 5087 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA, 5088 &vcpu->run->s.regs.acrs, 64); 5089 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA, 5090 &vcpu->arch.sie_block->gcr, 128); 5091 return rc ? -EFAULT : 0; 5092 } 5093 5094 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 5095 { 5096 /* 5097 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy 5098 * switch in the run ioctl. Let's update our copies before we save 5099 * it into the save area 5100 */ 5101 save_fpu_regs(); 5102 vcpu->run->s.regs.fpc = current->thread.fpu.fpc; 5103 save_access_regs(vcpu->run->s.regs.acrs); 5104 5105 return kvm_s390_store_status_unloaded(vcpu, addr); 5106 } 5107 5108 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 5109 { 5110 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); 5111 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu); 5112 } 5113 5114 static void __disable_ibs_on_all_vcpus(struct kvm *kvm) 5115 { 5116 unsigned long i; 5117 struct kvm_vcpu *vcpu; 5118 5119 kvm_for_each_vcpu(i, vcpu, kvm) { 5120 __disable_ibs_on_vcpu(vcpu); 5121 } 5122 } 5123 5124 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 5125 { 5126 if (!sclp.has_ibs) 5127 return; 5128 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); 5129 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu); 5130 } 5131 5132 int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) 5133 { 5134 int i, online_vcpus, r = 0, started_vcpus = 0; 5135 5136 if (!is_vcpu_stopped(vcpu)) 5137 return 0; 5138 5139 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); 5140 /* Only one cpu at a time may enter/leave the STOPPED state. */ 5141 spin_lock(&vcpu->kvm->arch.start_stop_lock); 5142 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 5143 5144 /* Let's tell the UV that we want to change into the operating state */ 5145 if (kvm_s390_pv_cpu_is_protected(vcpu)) { 5146 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR); 5147 if (r) { 5148 spin_unlock(&vcpu->kvm->arch.start_stop_lock); 5149 return r; 5150 } 5151 } 5152 5153 for (i = 0; i < online_vcpus; i++) { 5154 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i))) 5155 started_vcpus++; 5156 } 5157 5158 if (started_vcpus == 0) { 5159 /* we're the only active VCPU -> speed it up */ 5160 __enable_ibs_on_vcpu(vcpu); 5161 } else if (started_vcpus == 1) { 5162 /* 5163 * As we are starting a second VCPU, we have to disable 5164 * the IBS facility on all VCPUs to remove potentially 5165 * outstanding ENABLE requests. 5166 */ 5167 __disable_ibs_on_all_vcpus(vcpu->kvm); 5168 } 5169 5170 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED); 5171 /* 5172 * The real PSW might have changed due to a RESTART interpreted by the 5173 * ultravisor. We block all interrupts and let the next sie exit 5174 * refresh our view. 5175 */ 5176 if (kvm_s390_pv_cpu_is_protected(vcpu)) 5177 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; 5178 /* 5179 * Another VCPU might have used IBS while we were offline. 5180 * Let's play safe and flush the VCPU at startup. 5181 */ 5182 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 5183 spin_unlock(&vcpu->kvm->arch.start_stop_lock); 5184 return 0; 5185 } 5186 5187 int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) 5188 { 5189 int i, online_vcpus, r = 0, started_vcpus = 0; 5190 struct kvm_vcpu *started_vcpu = NULL; 5191 5192 if (is_vcpu_stopped(vcpu)) 5193 return 0; 5194 5195 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); 5196 /* Only one cpu at a time may enter/leave the STOPPED state. */ 5197 spin_lock(&vcpu->kvm->arch.start_stop_lock); 5198 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 5199 5200 /* Let's tell the UV that we want to change into the stopped state */ 5201 if (kvm_s390_pv_cpu_is_protected(vcpu)) { 5202 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP); 5203 if (r) { 5204 spin_unlock(&vcpu->kvm->arch.start_stop_lock); 5205 return r; 5206 } 5207 } 5208 5209 /* 5210 * Set the VCPU to STOPPED and THEN clear the interrupt flag, 5211 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders 5212 * have been fully processed. This will ensure that the VCPU 5213 * is kept BUSY if another VCPU is inquiring with SIGP SENSE. 5214 */ 5215 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED); 5216 kvm_s390_clear_stop_irq(vcpu); 5217 5218 __disable_ibs_on_vcpu(vcpu); 5219 5220 for (i = 0; i < online_vcpus; i++) { 5221 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i); 5222 5223 if (!is_vcpu_stopped(tmp)) { 5224 started_vcpus++; 5225 started_vcpu = tmp; 5226 } 5227 } 5228 5229 if (started_vcpus == 1) { 5230 /* 5231 * As we only have one VCPU left, we want to enable the 5232 * IBS facility for that VCPU to speed it up. 5233 */ 5234 __enable_ibs_on_vcpu(started_vcpu); 5235 } 5236 5237 spin_unlock(&vcpu->kvm->arch.start_stop_lock); 5238 return 0; 5239 } 5240 5241 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 5242 struct kvm_enable_cap *cap) 5243 { 5244 int r; 5245 5246 if (cap->flags) 5247 return -EINVAL; 5248 5249 switch (cap->cap) { 5250 case KVM_CAP_S390_CSS_SUPPORT: 5251 if (!vcpu->kvm->arch.css_support) { 5252 vcpu->kvm->arch.css_support = 1; 5253 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); 5254 trace_kvm_s390_enable_css(vcpu->kvm); 5255 } 5256 r = 0; 5257 break; 5258 default: 5259 r = -EINVAL; 5260 break; 5261 } 5262 return r; 5263 } 5264 5265 static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu, 5266 struct kvm_s390_mem_op *mop) 5267 { 5268 void __user *uaddr = (void __user *)mop->buf; 5269 void *sida_addr; 5270 int r = 0; 5271 5272 if (mop->flags || !mop->size) 5273 return -EINVAL; 5274 if (mop->size + mop->sida_offset < mop->size) 5275 return -EINVAL; 5276 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) 5277 return -E2BIG; 5278 if (!kvm_s390_pv_cpu_is_protected(vcpu)) 5279 return -EINVAL; 5280 5281 sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset; 5282 5283 switch (mop->op) { 5284 case KVM_S390_MEMOP_SIDA_READ: 5285 if (copy_to_user(uaddr, sida_addr, mop->size)) 5286 r = -EFAULT; 5287 5288 break; 5289 case KVM_S390_MEMOP_SIDA_WRITE: 5290 if (copy_from_user(sida_addr, uaddr, mop->size)) 5291 r = -EFAULT; 5292 break; 5293 } 5294 return r; 5295 } 5296 5297 static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu, 5298 struct kvm_s390_mem_op *mop) 5299 { 5300 void __user *uaddr = (void __user *)mop->buf; 5301 enum gacc_mode acc_mode; 5302 void *tmpbuf = NULL; 5303 int r; 5304 5305 r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_INJECT_EXCEPTION | 5306 KVM_S390_MEMOP_F_CHECK_ONLY | 5307 KVM_S390_MEMOP_F_SKEY_PROTECTION); 5308 if (r) 5309 return r; 5310 if (mop->ar >= NUM_ACRS) 5311 return -EINVAL; 5312 if (kvm_s390_pv_cpu_is_protected(vcpu)) 5313 return -EINVAL; 5314 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { 5315 tmpbuf = vmalloc(mop->size); 5316 if (!tmpbuf) 5317 return -ENOMEM; 5318 } 5319 5320 acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE; 5321 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { 5322 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, 5323 acc_mode, mop->key); 5324 goto out_inject; 5325 } 5326 if (acc_mode == GACC_FETCH) { 5327 r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf, 5328 mop->size, mop->key); 5329 if (r) 5330 goto out_inject; 5331 if (copy_to_user(uaddr, tmpbuf, mop->size)) { 5332 r = -EFAULT; 5333 goto out_free; 5334 } 5335 } else { 5336 if (copy_from_user(tmpbuf, uaddr, mop->size)) { 5337 r = -EFAULT; 5338 goto out_free; 5339 } 5340 r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf, 5341 mop->size, mop->key); 5342 } 5343 5344 out_inject: 5345 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) 5346 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 5347 5348 out_free: 5349 vfree(tmpbuf); 5350 return r; 5351 } 5352 5353 static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu, 5354 struct kvm_s390_mem_op *mop) 5355 { 5356 int r, srcu_idx; 5357 5358 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 5359 5360 switch (mop->op) { 5361 case KVM_S390_MEMOP_LOGICAL_READ: 5362 case KVM_S390_MEMOP_LOGICAL_WRITE: 5363 r = kvm_s390_vcpu_mem_op(vcpu, mop); 5364 break; 5365 case KVM_S390_MEMOP_SIDA_READ: 5366 case KVM_S390_MEMOP_SIDA_WRITE: 5367 /* we are locked against sida going away by the vcpu->mutex */ 5368 r = kvm_s390_vcpu_sida_op(vcpu, mop); 5369 break; 5370 default: 5371 r = -EINVAL; 5372 } 5373 5374 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); 5375 return r; 5376 } 5377 5378 long kvm_arch_vcpu_async_ioctl(struct file *filp, 5379 unsigned int ioctl, unsigned long arg) 5380 { 5381 struct kvm_vcpu *vcpu = filp->private_data; 5382 void __user *argp = (void __user *)arg; 5383 5384 switch (ioctl) { 5385 case KVM_S390_IRQ: { 5386 struct kvm_s390_irq s390irq; 5387 5388 if (copy_from_user(&s390irq, argp, sizeof(s390irq))) 5389 return -EFAULT; 5390 return kvm_s390_inject_vcpu(vcpu, &s390irq); 5391 } 5392 case KVM_S390_INTERRUPT: { 5393 struct kvm_s390_interrupt s390int; 5394 struct kvm_s390_irq s390irq = {}; 5395 5396 if (copy_from_user(&s390int, argp, sizeof(s390int))) 5397 return -EFAULT; 5398 if (s390int_to_s390irq(&s390int, &s390irq)) 5399 return -EINVAL; 5400 return kvm_s390_inject_vcpu(vcpu, &s390irq); 5401 } 5402 } 5403 return -ENOIOCTLCMD; 5404 } 5405 5406 static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu, 5407 struct kvm_pv_cmd *cmd) 5408 { 5409 struct kvm_s390_pv_dmp dmp; 5410 void *data; 5411 int ret; 5412 5413 /* Dump initialization is a prerequisite */ 5414 if (!vcpu->kvm->arch.pv.dumping) 5415 return -EINVAL; 5416 5417 if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp))) 5418 return -EFAULT; 5419 5420 /* We only handle this subcmd right now */ 5421 if (dmp.subcmd != KVM_PV_DUMP_CPU) 5422 return -EINVAL; 5423 5424 /* CPU dump length is the same as create cpu storage donation. */ 5425 if (dmp.buff_len != uv_info.guest_cpu_stor_len) 5426 return -EINVAL; 5427 5428 data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL); 5429 if (!data) 5430 return -ENOMEM; 5431 5432 ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc); 5433 5434 VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x", 5435 vcpu->vcpu_id, cmd->rc, cmd->rrc); 5436 5437 if (ret) 5438 ret = -EINVAL; 5439 5440 /* On success copy over the dump data */ 5441 if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len)) 5442 ret = -EFAULT; 5443 5444 kvfree(data); 5445 return ret; 5446 } 5447 5448 long kvm_arch_vcpu_ioctl(struct file *filp, 5449 unsigned int ioctl, unsigned long arg) 5450 { 5451 struct kvm_vcpu *vcpu = filp->private_data; 5452 void __user *argp = (void __user *)arg; 5453 int idx; 5454 long r; 5455 u16 rc, rrc; 5456 5457 vcpu_load(vcpu); 5458 5459 switch (ioctl) { 5460 case KVM_S390_STORE_STATUS: 5461 idx = srcu_read_lock(&vcpu->kvm->srcu); 5462 r = kvm_s390_store_status_unloaded(vcpu, arg); 5463 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5464 break; 5465 case KVM_S390_SET_INITIAL_PSW: { 5466 psw_t psw; 5467 5468 r = -EFAULT; 5469 if (copy_from_user(&psw, argp, sizeof(psw))) 5470 break; 5471 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); 5472 break; 5473 } 5474 case KVM_S390_CLEAR_RESET: 5475 r = 0; 5476 kvm_arch_vcpu_ioctl_clear_reset(vcpu); 5477 if (kvm_s390_pv_cpu_is_protected(vcpu)) { 5478 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), 5479 UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc); 5480 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x", 5481 rc, rrc); 5482 } 5483 break; 5484 case KVM_S390_INITIAL_RESET: 5485 r = 0; 5486 kvm_arch_vcpu_ioctl_initial_reset(vcpu); 5487 if (kvm_s390_pv_cpu_is_protected(vcpu)) { 5488 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), 5489 UVC_CMD_CPU_RESET_INITIAL, 5490 &rc, &rrc); 5491 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x", 5492 rc, rrc); 5493 } 5494 break; 5495 case KVM_S390_NORMAL_RESET: 5496 r = 0; 5497 kvm_arch_vcpu_ioctl_normal_reset(vcpu); 5498 if (kvm_s390_pv_cpu_is_protected(vcpu)) { 5499 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), 5500 UVC_CMD_CPU_RESET, &rc, &rrc); 5501 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x", 5502 rc, rrc); 5503 } 5504 break; 5505 case KVM_SET_ONE_REG: 5506 case KVM_GET_ONE_REG: { 5507 struct kvm_one_reg reg; 5508 r = -EINVAL; 5509 if (kvm_s390_pv_cpu_is_protected(vcpu)) 5510 break; 5511 r = -EFAULT; 5512 if (copy_from_user(®, argp, sizeof(reg))) 5513 break; 5514 if (ioctl == KVM_SET_ONE_REG) 5515 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®); 5516 else 5517 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®); 5518 break; 5519 } 5520 #ifdef CONFIG_KVM_S390_UCONTROL 5521 case KVM_S390_UCAS_MAP: { 5522 struct kvm_s390_ucas_mapping ucasmap; 5523 5524 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 5525 r = -EFAULT; 5526 break; 5527 } 5528 5529 if (!kvm_is_ucontrol(vcpu->kvm)) { 5530 r = -EINVAL; 5531 break; 5532 } 5533 5534 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, 5535 ucasmap.vcpu_addr, ucasmap.length); 5536 break; 5537 } 5538 case KVM_S390_UCAS_UNMAP: { 5539 struct kvm_s390_ucas_mapping ucasmap; 5540 5541 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 5542 r = -EFAULT; 5543 break; 5544 } 5545 5546 if (!kvm_is_ucontrol(vcpu->kvm)) { 5547 r = -EINVAL; 5548 break; 5549 } 5550 5551 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, 5552 ucasmap.length); 5553 break; 5554 } 5555 #endif 5556 case KVM_S390_VCPU_FAULT: { 5557 r = gmap_fault(vcpu->arch.gmap, arg, 0); 5558 break; 5559 } 5560 case KVM_ENABLE_CAP: 5561 { 5562 struct kvm_enable_cap cap; 5563 r = -EFAULT; 5564 if (copy_from_user(&cap, argp, sizeof(cap))) 5565 break; 5566 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 5567 break; 5568 } 5569 case KVM_S390_MEM_OP: { 5570 struct kvm_s390_mem_op mem_op; 5571 5572 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0) 5573 r = kvm_s390_vcpu_memsida_op(vcpu, &mem_op); 5574 else 5575 r = -EFAULT; 5576 break; 5577 } 5578 case KVM_S390_SET_IRQ_STATE: { 5579 struct kvm_s390_irq_state irq_state; 5580 5581 r = -EFAULT; 5582 if (copy_from_user(&irq_state, argp, sizeof(irq_state))) 5583 break; 5584 if (irq_state.len > VCPU_IRQS_MAX_BUF || 5585 irq_state.len == 0 || 5586 irq_state.len % sizeof(struct kvm_s390_irq) > 0) { 5587 r = -EINVAL; 5588 break; 5589 } 5590 /* do not use irq_state.flags, it will break old QEMUs */ 5591 r = kvm_s390_set_irq_state(vcpu, 5592 (void __user *) irq_state.buf, 5593 irq_state.len); 5594 break; 5595 } 5596 case KVM_S390_GET_IRQ_STATE: { 5597 struct kvm_s390_irq_state irq_state; 5598 5599 r = -EFAULT; 5600 if (copy_from_user(&irq_state, argp, sizeof(irq_state))) 5601 break; 5602 if (irq_state.len == 0) { 5603 r = -EINVAL; 5604 break; 5605 } 5606 /* do not use irq_state.flags, it will break old QEMUs */ 5607 r = kvm_s390_get_irq_state(vcpu, 5608 (__u8 __user *) irq_state.buf, 5609 irq_state.len); 5610 break; 5611 } 5612 case KVM_S390_PV_CPU_COMMAND: { 5613 struct kvm_pv_cmd cmd; 5614 5615 r = -EINVAL; 5616 if (!is_prot_virt_host()) 5617 break; 5618 5619 r = -EFAULT; 5620 if (copy_from_user(&cmd, argp, sizeof(cmd))) 5621 break; 5622 5623 r = -EINVAL; 5624 if (cmd.flags) 5625 break; 5626 5627 /* We only handle this cmd right now */ 5628 if (cmd.cmd != KVM_PV_DUMP) 5629 break; 5630 5631 r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd); 5632 5633 /* Always copy over UV rc / rrc data */ 5634 if (copy_to_user((__u8 __user *)argp, &cmd.rc, 5635 sizeof(cmd.rc) + sizeof(cmd.rrc))) 5636 r = -EFAULT; 5637 break; 5638 } 5639 default: 5640 r = -ENOTTY; 5641 } 5642 5643 vcpu_put(vcpu); 5644 return r; 5645 } 5646 5647 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 5648 { 5649 #ifdef CONFIG_KVM_S390_UCONTROL 5650 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) 5651 && (kvm_is_ucontrol(vcpu->kvm))) { 5652 vmf->page = virt_to_page(vcpu->arch.sie_block); 5653 get_page(vmf->page); 5654 return 0; 5655 } 5656 #endif 5657 return VM_FAULT_SIGBUS; 5658 } 5659 5660 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) 5661 { 5662 return true; 5663 } 5664 5665 /* Section: memory related */ 5666 int kvm_arch_prepare_memory_region(struct kvm *kvm, 5667 const struct kvm_memory_slot *old, 5668 struct kvm_memory_slot *new, 5669 enum kvm_mr_change change) 5670 { 5671 gpa_t size; 5672 5673 /* When we are protected, we should not change the memory slots */ 5674 if (kvm_s390_pv_get_handle(kvm)) 5675 return -EINVAL; 5676 5677 if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) { 5678 /* 5679 * A few sanity checks. We can have memory slots which have to be 5680 * located/ended at a segment boundary (1MB). The memory in userland is 5681 * ok to be fragmented into various different vmas. It is okay to mmap() 5682 * and munmap() stuff in this slot after doing this call at any time 5683 */ 5684 5685 if (new->userspace_addr & 0xffffful) 5686 return -EINVAL; 5687 5688 size = new->npages * PAGE_SIZE; 5689 if (size & 0xffffful) 5690 return -EINVAL; 5691 5692 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) 5693 return -EINVAL; 5694 } 5695 5696 if (!kvm->arch.migration_mode) 5697 return 0; 5698 5699 /* 5700 * Turn off migration mode when: 5701 * - userspace creates a new memslot with dirty logging off, 5702 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and 5703 * dirty logging is turned off. 5704 * Migration mode expects dirty page logging being enabled to store 5705 * its dirty bitmap. 5706 */ 5707 if (change != KVM_MR_DELETE && 5708 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) 5709 WARN(kvm_s390_vm_stop_migration(kvm), 5710 "Failed to stop migration mode"); 5711 5712 return 0; 5713 } 5714 5715 void kvm_arch_commit_memory_region(struct kvm *kvm, 5716 struct kvm_memory_slot *old, 5717 const struct kvm_memory_slot *new, 5718 enum kvm_mr_change change) 5719 { 5720 int rc = 0; 5721 5722 switch (change) { 5723 case KVM_MR_DELETE: 5724 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, 5725 old->npages * PAGE_SIZE); 5726 break; 5727 case KVM_MR_MOVE: 5728 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, 5729 old->npages * PAGE_SIZE); 5730 if (rc) 5731 break; 5732 fallthrough; 5733 case KVM_MR_CREATE: 5734 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr, 5735 new->base_gfn * PAGE_SIZE, 5736 new->npages * PAGE_SIZE); 5737 break; 5738 case KVM_MR_FLAGS_ONLY: 5739 break; 5740 default: 5741 WARN(1, "Unknown KVM MR CHANGE: %d\n", change); 5742 } 5743 if (rc) 5744 pr_warn("failed to commit memory region\n"); 5745 return; 5746 } 5747 5748 static inline unsigned long nonhyp_mask(int i) 5749 { 5750 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30; 5751 5752 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4); 5753 } 5754 5755 static int __init kvm_s390_init(void) 5756 { 5757 int i, r; 5758 5759 if (!sclp.has_sief2) { 5760 pr_info("SIE is not available\n"); 5761 return -ENODEV; 5762 } 5763 5764 if (nested && hpage) { 5765 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n"); 5766 return -EINVAL; 5767 } 5768 5769 for (i = 0; i < 16; i++) 5770 kvm_s390_fac_base[i] |= 5771 stfle_fac_list[i] & nonhyp_mask(i); 5772 5773 r = __kvm_s390_init(); 5774 if (r) 5775 return r; 5776 5777 r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); 5778 if (r) { 5779 __kvm_s390_exit(); 5780 return r; 5781 } 5782 return 0; 5783 } 5784 5785 static void __exit kvm_s390_exit(void) 5786 { 5787 kvm_exit(); 5788 5789 __kvm_s390_exit(); 5790 } 5791 5792 module_init(kvm_s390_init); 5793 module_exit(kvm_s390_exit); 5794 5795 /* 5796 * Enable autoloading of the kvm module. 5797 * Note that we add the module alias here instead of virt/kvm/kvm_main.c 5798 * since x86 takes a different approach. 5799 */ 5800 #include <linux/miscdevice.h> 5801 MODULE_ALIAS_MISCDEV(KVM_MINOR); 5802 MODULE_ALIAS("devname:kvm"); 5803