1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * kvm nested virtualization support for s390x 4 * 5 * Copyright IBM Corp. 2016, 2018 6 * 7 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> 8 */ 9 #include <linux/vmalloc.h> 10 #include <linux/kvm_host.h> 11 #include <linux/bug.h> 12 #include <linux/list.h> 13 #include <linux/bitmap.h> 14 #include <linux/sched/signal.h> 15 16 #include <asm/gmap.h> 17 #include <asm/mmu_context.h> 18 #include <asm/sclp.h> 19 #include <asm/nmi.h> 20 #include <asm/dis.h> 21 #include "kvm-s390.h" 22 #include "gaccess.h" 23 24 struct vsie_page { 25 struct kvm_s390_sie_block scb_s; /* 0x0000 */ 26 /* 27 * the backup info for machine check. ensure it's at 28 * the same offset as that in struct sie_page! 29 */ 30 struct mcck_volatile_info mcck_info; /* 0x0200 */ 31 /* 32 * The pinned original scb. Be aware that other VCPUs can modify 33 * it while we read from it. Values that are used for conditions or 34 * are reused conditionally, should be accessed via READ_ONCE. 35 */ 36 struct kvm_s390_sie_block *scb_o; /* 0x0218 */ 37 /* the shadow gmap in use by the vsie_page */ 38 struct gmap *gmap; /* 0x0220 */ 39 /* address of the last reported fault to guest2 */ 40 unsigned long fault_addr; /* 0x0228 */ 41 /* calculated guest addresses of satellite control blocks */ 42 gpa_t sca_gpa; /* 0x0230 */ 43 gpa_t itdba_gpa; /* 0x0238 */ 44 gpa_t gvrd_gpa; /* 0x0240 */ 45 gpa_t riccbd_gpa; /* 0x0248 */ 46 gpa_t sdnx_gpa; /* 0x0250 */ 47 __u8 reserved[0x0700 - 0x0258]; /* 0x0258 */ 48 struct kvm_s390_crypto_cb crycb; /* 0x0700 */ 49 __u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */ 50 }; 51 52 /* trigger a validity icpt for the given scb */ 53 static int set_validity_icpt(struct kvm_s390_sie_block *scb, 54 __u16 reason_code) 55 { 56 scb->ipa = 0x1000; 57 scb->ipb = ((__u32) reason_code) << 16; 58 scb->icptcode = ICPT_VALIDITY; 59 return 1; 60 } 61 62 /* mark the prefix as unmapped, this will block the VSIE */ 63 static void prefix_unmapped(struct vsie_page *vsie_page) 64 { 65 atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20); 66 } 67 68 /* mark the prefix as unmapped and wait until the VSIE has been left */ 69 static void prefix_unmapped_sync(struct vsie_page *vsie_page) 70 { 71 prefix_unmapped(vsie_page); 72 if (vsie_page->scb_s.prog0c & PROG_IN_SIE) 73 atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags); 74 while (vsie_page->scb_s.prog0c & PROG_IN_SIE) 75 cpu_relax(); 76 } 77 78 /* mark the prefix as mapped, this will allow the VSIE to run */ 79 static void prefix_mapped(struct vsie_page *vsie_page) 80 { 81 atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20); 82 } 83 84 /* test if the prefix is mapped into the gmap shadow */ 85 static int prefix_is_mapped(struct vsie_page *vsie_page) 86 { 87 return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST); 88 } 89 90 /* copy the updated intervention request bits into the shadow scb */ 91 static void update_intervention_requests(struct vsie_page *vsie_page) 92 { 93 const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT; 94 int cpuflags; 95 96 cpuflags = atomic_read(&vsie_page->scb_o->cpuflags); 97 atomic_andnot(bits, &vsie_page->scb_s.cpuflags); 98 atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags); 99 } 100 101 /* shadow (filter and validate) the cpuflags */ 102 static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 103 { 104 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 105 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 106 int newflags, cpuflags = atomic_read(&scb_o->cpuflags); 107 108 /* we don't allow ESA/390 guests */ 109 if (!(cpuflags & CPUSTAT_ZARCH)) 110 return set_validity_icpt(scb_s, 0x0001U); 111 112 if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS)) 113 return set_validity_icpt(scb_s, 0x0001U); 114 else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR)) 115 return set_validity_icpt(scb_s, 0x0007U); 116 117 /* intervention requests will be set later */ 118 newflags = CPUSTAT_ZARCH; 119 if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8)) 120 newflags |= CPUSTAT_GED; 121 if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) { 122 if (cpuflags & CPUSTAT_GED) 123 return set_validity_icpt(scb_s, 0x0001U); 124 newflags |= CPUSTAT_GED2; 125 } 126 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE)) 127 newflags |= cpuflags & CPUSTAT_P; 128 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS)) 129 newflags |= cpuflags & CPUSTAT_SM; 130 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS)) 131 newflags |= cpuflags & CPUSTAT_IBS; 132 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS)) 133 newflags |= cpuflags & CPUSTAT_KSS; 134 135 atomic_set(&scb_s->cpuflags, newflags); 136 return 0; 137 } 138 139 /* 140 * Create a shadow copy of the crycb block and setup key wrapping, if 141 * requested for guest 3 and enabled for guest 2. 142 * 143 * We only accept format-1 (no AP in g2), but convert it into format-2 144 * There is nothing to do for format-0. 145 * 146 * Returns: - 0 if shadowed or nothing to do 147 * - > 0 if control has to be given to guest 2 148 */ 149 static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 150 { 151 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 152 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 153 const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd); 154 const u32 crycb_addr = crycbd_o & 0x7ffffff8U; 155 unsigned long *b1, *b2; 156 u8 ecb3_flags; 157 158 scb_s->crycbd = 0; 159 if (!(crycbd_o & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1)) 160 return 0; 161 /* format-1 is supported with message-security-assist extension 3 */ 162 if (!test_kvm_facility(vcpu->kvm, 76)) 163 return 0; 164 /* we may only allow it if enabled for guest 2 */ 165 ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & 166 (ECB3_AES | ECB3_DEA); 167 if (!ecb3_flags) 168 return 0; 169 170 if ((crycb_addr & PAGE_MASK) != ((crycb_addr + 128) & PAGE_MASK)) 171 return set_validity_icpt(scb_s, 0x003CU); 172 else if (!crycb_addr) 173 return set_validity_icpt(scb_s, 0x0039U); 174 175 /* copy only the wrapping keys */ 176 if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56)) 177 return set_validity_icpt(scb_s, 0x0035U); 178 179 scb_s->ecb3 |= ecb3_flags; 180 scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT1 | 181 CRYCB_FORMAT2; 182 183 /* xor both blocks in one run */ 184 b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask; 185 b2 = (unsigned long *) 186 vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask; 187 /* as 56%8 == 0, bitmap_xor won't overwrite any data */ 188 bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56); 189 return 0; 190 } 191 192 /* shadow (round up/down) the ibc to avoid validity icpt */ 193 static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 194 { 195 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 196 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 197 /* READ_ONCE does not work on bitfields - use a temporary variable */ 198 const uint32_t __new_ibc = scb_o->ibc; 199 const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU; 200 __u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU; 201 202 scb_s->ibc = 0; 203 /* ibc installed in g2 and requested for g3 */ 204 if (vcpu->kvm->arch.model.ibc && new_ibc) { 205 scb_s->ibc = new_ibc; 206 /* takte care of the minimum ibc level of the machine */ 207 if (scb_s->ibc < min_ibc) 208 scb_s->ibc = min_ibc; 209 /* take care of the maximum ibc level set for the guest */ 210 if (scb_s->ibc > vcpu->kvm->arch.model.ibc) 211 scb_s->ibc = vcpu->kvm->arch.model.ibc; 212 } 213 } 214 215 /* unshadow the scb, copying parameters back to the real scb */ 216 static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 217 { 218 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 219 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 220 221 /* interception */ 222 scb_o->icptcode = scb_s->icptcode; 223 scb_o->icptstatus = scb_s->icptstatus; 224 scb_o->ipa = scb_s->ipa; 225 scb_o->ipb = scb_s->ipb; 226 scb_o->gbea = scb_s->gbea; 227 228 /* timer */ 229 scb_o->cputm = scb_s->cputm; 230 scb_o->ckc = scb_s->ckc; 231 scb_o->todpr = scb_s->todpr; 232 233 /* guest state */ 234 scb_o->gpsw = scb_s->gpsw; 235 scb_o->gg14 = scb_s->gg14; 236 scb_o->gg15 = scb_s->gg15; 237 memcpy(scb_o->gcr, scb_s->gcr, 128); 238 scb_o->pp = scb_s->pp; 239 240 /* branch prediction */ 241 if (test_kvm_facility(vcpu->kvm, 82)) { 242 scb_o->fpf &= ~FPF_BPBC; 243 scb_o->fpf |= scb_s->fpf & FPF_BPBC; 244 } 245 246 /* interrupt intercept */ 247 switch (scb_s->icptcode) { 248 case ICPT_PROGI: 249 case ICPT_INSTPROGI: 250 case ICPT_EXTINT: 251 memcpy((void *)((u64)scb_o + 0xc0), 252 (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0); 253 break; 254 case ICPT_PARTEXEC: 255 /* MVPG only */ 256 memcpy((void *)((u64)scb_o + 0xc0), 257 (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0); 258 break; 259 } 260 261 if (scb_s->ihcpu != 0xffffU) 262 scb_o->ihcpu = scb_s->ihcpu; 263 } 264 265 /* 266 * Setup the shadow scb by copying and checking the relevant parts of the g2 267 * provided scb. 268 * 269 * Returns: - 0 if the scb has been shadowed 270 * - > 0 if control has to be given to guest 2 271 */ 272 static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 273 { 274 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 275 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 276 /* READ_ONCE does not work on bitfields - use a temporary variable */ 277 const uint32_t __new_prefix = scb_o->prefix; 278 const uint32_t new_prefix = READ_ONCE(__new_prefix); 279 const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE; 280 bool had_tx = scb_s->ecb & ECB_TE; 281 unsigned long new_mso = 0; 282 int rc; 283 284 /* make sure we don't have any leftovers when reusing the scb */ 285 scb_s->icptcode = 0; 286 scb_s->eca = 0; 287 scb_s->ecb = 0; 288 scb_s->ecb2 = 0; 289 scb_s->ecb3 = 0; 290 scb_s->ecd = 0; 291 scb_s->fac = 0; 292 scb_s->fpf = 0; 293 294 rc = prepare_cpuflags(vcpu, vsie_page); 295 if (rc) 296 goto out; 297 298 /* timer */ 299 scb_s->cputm = scb_o->cputm; 300 scb_s->ckc = scb_o->ckc; 301 scb_s->todpr = scb_o->todpr; 302 scb_s->epoch = scb_o->epoch; 303 304 /* guest state */ 305 scb_s->gpsw = scb_o->gpsw; 306 scb_s->gg14 = scb_o->gg14; 307 scb_s->gg15 = scb_o->gg15; 308 memcpy(scb_s->gcr, scb_o->gcr, 128); 309 scb_s->pp = scb_o->pp; 310 311 /* interception / execution handling */ 312 scb_s->gbea = scb_o->gbea; 313 scb_s->lctl = scb_o->lctl; 314 scb_s->svcc = scb_o->svcc; 315 scb_s->ictl = scb_o->ictl; 316 /* 317 * SKEY handling functions can't deal with false setting of PTE invalid 318 * bits. Therefore we cannot provide interpretation and would later 319 * have to provide own emulation handlers. 320 */ 321 if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS)) 322 scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; 323 324 scb_s->icpua = scb_o->icpua; 325 326 if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM)) 327 new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL; 328 /* if the hva of the prefix changes, we have to remap the prefix */ 329 if (scb_s->mso != new_mso || scb_s->prefix != new_prefix) 330 prefix_unmapped(vsie_page); 331 /* SIE will do mso/msl validity and exception checks for us */ 332 scb_s->msl = scb_o->msl & 0xfffffffffff00000UL; 333 scb_s->mso = new_mso; 334 scb_s->prefix = new_prefix; 335 336 /* We have to definetly flush the tlb if this scb never ran */ 337 if (scb_s->ihcpu != 0xffffU) 338 scb_s->ihcpu = scb_o->ihcpu; 339 340 /* MVPG and Protection Exception Interpretation are always available */ 341 scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI); 342 /* Host-protection-interruption introduced with ESOP */ 343 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP)) 344 scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT; 345 /* transactional execution */ 346 if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) { 347 /* remap the prefix is tx is toggled on */ 348 if (!had_tx) 349 prefix_unmapped(vsie_page); 350 scb_s->ecb |= ECB_TE; 351 } 352 /* branch prediction */ 353 if (test_kvm_facility(vcpu->kvm, 82)) 354 scb_s->fpf |= scb_o->fpf & FPF_BPBC; 355 /* SIMD */ 356 if (test_kvm_facility(vcpu->kvm, 129)) { 357 scb_s->eca |= scb_o->eca & ECA_VX; 358 scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT; 359 } 360 /* Run-time-Instrumentation */ 361 if (test_kvm_facility(vcpu->kvm, 64)) 362 scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI; 363 /* Instruction Execution Prevention */ 364 if (test_kvm_facility(vcpu->kvm, 130)) 365 scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP; 366 /* Guarded Storage */ 367 if (test_kvm_facility(vcpu->kvm, 133)) { 368 scb_s->ecb |= scb_o->ecb & ECB_GS; 369 scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT; 370 } 371 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF)) 372 scb_s->eca |= scb_o->eca & ECA_SII; 373 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB)) 374 scb_s->eca |= scb_o->eca & ECA_IB; 375 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI)) 376 scb_s->eca |= scb_o->eca & ECA_CEI; 377 /* Epoch Extension */ 378 if (test_kvm_facility(vcpu->kvm, 139)) 379 scb_s->ecd |= scb_o->ecd & ECD_MEF; 380 381 /* etoken */ 382 if (test_kvm_facility(vcpu->kvm, 156)) 383 scb_s->ecd |= scb_o->ecd & ECD_ETOKENF; 384 385 prepare_ibc(vcpu, vsie_page); 386 rc = shadow_crycb(vcpu, vsie_page); 387 out: 388 if (rc) 389 unshadow_scb(vcpu, vsie_page); 390 return rc; 391 } 392 393 void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start, 394 unsigned long end) 395 { 396 struct kvm *kvm = gmap->private; 397 struct vsie_page *cur; 398 unsigned long prefix; 399 struct page *page; 400 int i; 401 402 if (!gmap_is_shadow(gmap)) 403 return; 404 if (start >= 1UL << 31) 405 /* We are only interested in prefix pages */ 406 return; 407 408 /* 409 * Only new shadow blocks are added to the list during runtime, 410 * therefore we can safely reference them all the time. 411 */ 412 for (i = 0; i < kvm->arch.vsie.page_count; i++) { 413 page = READ_ONCE(kvm->arch.vsie.pages[i]); 414 if (!page) 415 continue; 416 cur = page_to_virt(page); 417 if (READ_ONCE(cur->gmap) != gmap) 418 continue; 419 prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT; 420 /* with mso/msl, the prefix lies at an offset */ 421 prefix += cur->scb_s.mso; 422 if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1) 423 prefix_unmapped_sync(cur); 424 } 425 } 426 427 /* 428 * Map the first prefix page and if tx is enabled also the second prefix page. 429 * 430 * The prefix will be protected, a gmap notifier will inform about unmaps. 431 * The shadow scb must not be executed until the prefix is remapped, this is 432 * guaranteed by properly handling PROG_REQUEST. 433 * 434 * Returns: - 0 on if successfully mapped or already mapped 435 * - > 0 if control has to be given to guest 2 436 * - -EAGAIN if the caller can retry immediately 437 * - -ENOMEM if out of memory 438 */ 439 static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 440 { 441 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 442 u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT; 443 int rc; 444 445 if (prefix_is_mapped(vsie_page)) 446 return 0; 447 448 /* mark it as mapped so we can catch any concurrent unmappers */ 449 prefix_mapped(vsie_page); 450 451 /* with mso/msl, the prefix lies at offset *mso* */ 452 prefix += scb_s->mso; 453 454 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix); 455 if (!rc && (scb_s->ecb & ECB_TE)) 456 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, 457 prefix + PAGE_SIZE); 458 /* 459 * We don't have to mprotect, we will be called for all unshadows. 460 * SIE will detect if protection applies and trigger a validity. 461 */ 462 if (rc) 463 prefix_unmapped(vsie_page); 464 if (rc > 0 || rc == -EFAULT) 465 rc = set_validity_icpt(scb_s, 0x0037U); 466 return rc; 467 } 468 469 /* 470 * Pin the guest page given by gpa and set hpa to the pinned host address. 471 * Will always be pinned writable. 472 * 473 * Returns: - 0 on success 474 * - -EINVAL if the gpa is not valid guest storage 475 */ 476 static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa) 477 { 478 struct page *page; 479 480 page = gfn_to_page(kvm, gpa_to_gfn(gpa)); 481 if (is_error_page(page)) 482 return -EINVAL; 483 *hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK); 484 return 0; 485 } 486 487 /* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */ 488 static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa) 489 { 490 kvm_release_pfn_dirty(hpa >> PAGE_SHIFT); 491 /* mark the page always as dirty for migration */ 492 mark_page_dirty(kvm, gpa_to_gfn(gpa)); 493 } 494 495 /* unpin all blocks previously pinned by pin_blocks(), marking them dirty */ 496 static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 497 { 498 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 499 hpa_t hpa; 500 501 hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol; 502 if (hpa) { 503 unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa); 504 vsie_page->sca_gpa = 0; 505 scb_s->scaol = 0; 506 scb_s->scaoh = 0; 507 } 508 509 hpa = scb_s->itdba; 510 if (hpa) { 511 unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa); 512 vsie_page->itdba_gpa = 0; 513 scb_s->itdba = 0; 514 } 515 516 hpa = scb_s->gvrd; 517 if (hpa) { 518 unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa); 519 vsie_page->gvrd_gpa = 0; 520 scb_s->gvrd = 0; 521 } 522 523 hpa = scb_s->riccbd; 524 if (hpa) { 525 unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa); 526 vsie_page->riccbd_gpa = 0; 527 scb_s->riccbd = 0; 528 } 529 530 hpa = scb_s->sdnxo; 531 if (hpa) { 532 unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa); 533 vsie_page->sdnx_gpa = 0; 534 scb_s->sdnxo = 0; 535 } 536 } 537 538 /* 539 * Instead of shadowing some blocks, we can simply forward them because the 540 * addresses in the scb are 64 bit long. 541 * 542 * This works as long as the data lies in one page. If blocks ever exceed one 543 * page, we have to fall back to shadowing. 544 * 545 * As we reuse the sca, the vcpu pointers contained in it are invalid. We must 546 * therefore not enable any facilities that access these pointers (e.g. SIGPIF). 547 * 548 * Returns: - 0 if all blocks were pinned. 549 * - > 0 if control has to be given to guest 2 550 * - -ENOMEM if out of memory 551 */ 552 static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 553 { 554 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 555 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 556 hpa_t hpa; 557 gpa_t gpa; 558 int rc = 0; 559 560 gpa = READ_ONCE(scb_o->scaol) & ~0xfUL; 561 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO)) 562 gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32; 563 if (gpa) { 564 if (gpa < 2 * PAGE_SIZE) 565 rc = set_validity_icpt(scb_s, 0x0038U); 566 else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu)) 567 rc = set_validity_icpt(scb_s, 0x0011U); 568 else if ((gpa & PAGE_MASK) != 569 ((gpa + sizeof(struct bsca_block) - 1) & PAGE_MASK)) 570 rc = set_validity_icpt(scb_s, 0x003bU); 571 if (!rc) { 572 rc = pin_guest_page(vcpu->kvm, gpa, &hpa); 573 if (rc) 574 rc = set_validity_icpt(scb_s, 0x0034U); 575 } 576 if (rc) 577 goto unpin; 578 vsie_page->sca_gpa = gpa; 579 scb_s->scaoh = (u32)((u64)hpa >> 32); 580 scb_s->scaol = (u32)(u64)hpa; 581 } 582 583 gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; 584 if (gpa && (scb_s->ecb & ECB_TE)) { 585 if (gpa < 2 * PAGE_SIZE) { 586 rc = set_validity_icpt(scb_s, 0x0080U); 587 goto unpin; 588 } 589 /* 256 bytes cannot cross page boundaries */ 590 rc = pin_guest_page(vcpu->kvm, gpa, &hpa); 591 if (rc) { 592 rc = set_validity_icpt(scb_s, 0x0080U); 593 goto unpin; 594 } 595 vsie_page->itdba_gpa = gpa; 596 scb_s->itdba = hpa; 597 } 598 599 gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL; 600 if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { 601 if (gpa < 2 * PAGE_SIZE) { 602 rc = set_validity_icpt(scb_s, 0x1310U); 603 goto unpin; 604 } 605 /* 606 * 512 bytes vector registers cannot cross page boundaries 607 * if this block gets bigger, we have to shadow it. 608 */ 609 rc = pin_guest_page(vcpu->kvm, gpa, &hpa); 610 if (rc) { 611 rc = set_validity_icpt(scb_s, 0x1310U); 612 goto unpin; 613 } 614 vsie_page->gvrd_gpa = gpa; 615 scb_s->gvrd = hpa; 616 } 617 618 gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL; 619 if (gpa && (scb_s->ecb3 & ECB3_RI)) { 620 if (gpa < 2 * PAGE_SIZE) { 621 rc = set_validity_icpt(scb_s, 0x0043U); 622 goto unpin; 623 } 624 /* 64 bytes cannot cross page boundaries */ 625 rc = pin_guest_page(vcpu->kvm, gpa, &hpa); 626 if (rc) { 627 rc = set_validity_icpt(scb_s, 0x0043U); 628 goto unpin; 629 } 630 /* Validity 0x0044 will be checked by SIE */ 631 vsie_page->riccbd_gpa = gpa; 632 scb_s->riccbd = hpa; 633 } 634 if (((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) || 635 (scb_s->ecd & ECD_ETOKENF)) { 636 unsigned long sdnxc; 637 638 gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL; 639 sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL; 640 if (!gpa || gpa < 2 * PAGE_SIZE) { 641 rc = set_validity_icpt(scb_s, 0x10b0U); 642 goto unpin; 643 } 644 if (sdnxc < 6 || sdnxc > 12) { 645 rc = set_validity_icpt(scb_s, 0x10b1U); 646 goto unpin; 647 } 648 if (gpa & ((1 << sdnxc) - 1)) { 649 rc = set_validity_icpt(scb_s, 0x10b2U); 650 goto unpin; 651 } 652 /* Due to alignment rules (checked above) this cannot 653 * cross page boundaries 654 */ 655 rc = pin_guest_page(vcpu->kvm, gpa, &hpa); 656 if (rc) { 657 rc = set_validity_icpt(scb_s, 0x10b0U); 658 goto unpin; 659 } 660 vsie_page->sdnx_gpa = gpa; 661 scb_s->sdnxo = hpa | sdnxc; 662 } 663 return 0; 664 unpin: 665 unpin_blocks(vcpu, vsie_page); 666 return rc; 667 } 668 669 /* unpin the scb provided by guest 2, marking it as dirty */ 670 static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, 671 gpa_t gpa) 672 { 673 hpa_t hpa = (hpa_t) vsie_page->scb_o; 674 675 if (hpa) 676 unpin_guest_page(vcpu->kvm, gpa, hpa); 677 vsie_page->scb_o = NULL; 678 } 679 680 /* 681 * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o. 682 * 683 * Returns: - 0 if the scb was pinned. 684 * - > 0 if control has to be given to guest 2 685 */ 686 static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, 687 gpa_t gpa) 688 { 689 hpa_t hpa; 690 int rc; 691 692 rc = pin_guest_page(vcpu->kvm, gpa, &hpa); 693 if (rc) { 694 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 695 WARN_ON_ONCE(rc); 696 return 1; 697 } 698 vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa; 699 return 0; 700 } 701 702 /* 703 * Inject a fault into guest 2. 704 * 705 * Returns: - > 0 if control has to be given to guest 2 706 * < 0 if an error occurred during injection. 707 */ 708 static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr, 709 bool write_flag) 710 { 711 struct kvm_s390_pgm_info pgm = { 712 .code = code, 713 .trans_exc_code = 714 /* 0-51: virtual address */ 715 (vaddr & 0xfffffffffffff000UL) | 716 /* 52-53: store / fetch */ 717 (((unsigned int) !write_flag) + 1) << 10, 718 /* 62-63: asce id (alway primary == 0) */ 719 .exc_access_id = 0, /* always primary */ 720 .op_access_id = 0, /* not MVPG */ 721 }; 722 int rc; 723 724 if (code == PGM_PROTECTION) 725 pgm.trans_exc_code |= 0x4UL; 726 727 rc = kvm_s390_inject_prog_irq(vcpu, &pgm); 728 return rc ? rc : 1; 729 } 730 731 /* 732 * Handle a fault during vsie execution on a gmap shadow. 733 * 734 * Returns: - 0 if the fault was resolved 735 * - > 0 if control has to be given to guest 2 736 * - < 0 if an error occurred 737 */ 738 static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 739 { 740 int rc; 741 742 if (current->thread.gmap_int_code == PGM_PROTECTION) 743 /* we can directly forward all protection exceptions */ 744 return inject_fault(vcpu, PGM_PROTECTION, 745 current->thread.gmap_addr, 1); 746 747 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, 748 current->thread.gmap_addr); 749 if (rc > 0) { 750 rc = inject_fault(vcpu, rc, 751 current->thread.gmap_addr, 752 current->thread.gmap_write_flag); 753 if (rc >= 0) 754 vsie_page->fault_addr = current->thread.gmap_addr; 755 } 756 return rc; 757 } 758 759 /* 760 * Retry the previous fault that required guest 2 intervention. This avoids 761 * one superfluous SIE re-entry and direct exit. 762 * 763 * Will ignore any errors. The next SIE fault will do proper fault handling. 764 */ 765 static void handle_last_fault(struct kvm_vcpu *vcpu, 766 struct vsie_page *vsie_page) 767 { 768 if (vsie_page->fault_addr) 769 kvm_s390_shadow_fault(vcpu, vsie_page->gmap, 770 vsie_page->fault_addr); 771 vsie_page->fault_addr = 0; 772 } 773 774 static inline void clear_vsie_icpt(struct vsie_page *vsie_page) 775 { 776 vsie_page->scb_s.icptcode = 0; 777 } 778 779 /* rewind the psw and clear the vsie icpt, so we can retry execution */ 780 static void retry_vsie_icpt(struct vsie_page *vsie_page) 781 { 782 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 783 int ilen = insn_length(scb_s->ipa >> 8); 784 785 /* take care of EXECUTE instructions */ 786 if (scb_s->icptstatus & 1) { 787 ilen = (scb_s->icptstatus >> 4) & 0x6; 788 if (!ilen) 789 ilen = 4; 790 } 791 scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen); 792 clear_vsie_icpt(vsie_page); 793 } 794 795 /* 796 * Try to shadow + enable the guest 2 provided facility list. 797 * Retry instruction execution if enabled for and provided by guest 2. 798 * 799 * Returns: - 0 if handled (retry or guest 2 icpt) 800 * - > 0 if control has to be given to guest 2 801 */ 802 static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 803 { 804 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 805 __u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U; 806 807 if (fac && test_kvm_facility(vcpu->kvm, 7)) { 808 retry_vsie_icpt(vsie_page); 809 if (read_guest_real(vcpu, fac, &vsie_page->fac, 810 sizeof(vsie_page->fac))) 811 return set_validity_icpt(scb_s, 0x1090U); 812 scb_s->fac = (__u32)(__u64) &vsie_page->fac; 813 } 814 return 0; 815 } 816 817 /* 818 * Run the vsie on a shadow scb and a shadow gmap, without any further 819 * sanity checks, handling SIE faults. 820 * 821 * Returns: - 0 everything went fine 822 * - > 0 if control has to be given to guest 2 823 * - < 0 if an error occurred 824 */ 825 static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 826 __releases(vcpu->kvm->srcu) 827 __acquires(vcpu->kvm->srcu) 828 { 829 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 830 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 831 int guest_bp_isolation; 832 int rc; 833 834 handle_last_fault(vcpu, vsie_page); 835 836 if (need_resched()) 837 schedule(); 838 if (test_cpu_flag(CIF_MCCK_PENDING)) 839 s390_handle_mcck(); 840 841 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 842 843 /* save current guest state of bp isolation override */ 844 guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST); 845 846 /* 847 * The guest is running with BPBC, so we have to force it on for our 848 * nested guest. This is done by enabling BPBC globally, so the BPBC 849 * control in the SCB (which the nested guest can modify) is simply 850 * ignored. 851 */ 852 if (test_kvm_facility(vcpu->kvm, 82) && 853 vcpu->arch.sie_block->fpf & FPF_BPBC) 854 set_thread_flag(TIF_ISOLATE_BP_GUEST); 855 856 local_irq_disable(); 857 guest_enter_irqoff(); 858 local_irq_enable(); 859 860 rc = sie64a(scb_s, vcpu->run->s.regs.gprs); 861 862 local_irq_disable(); 863 guest_exit_irqoff(); 864 local_irq_enable(); 865 866 /* restore guest state for bp isolation override */ 867 if (!guest_bp_isolation) 868 clear_thread_flag(TIF_ISOLATE_BP_GUEST); 869 870 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 871 872 if (rc == -EINTR) { 873 VCPU_EVENT(vcpu, 3, "%s", "machine check"); 874 kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info); 875 return 0; 876 } 877 878 if (rc > 0) 879 rc = 0; /* we could still have an icpt */ 880 else if (rc == -EFAULT) 881 return handle_fault(vcpu, vsie_page); 882 883 switch (scb_s->icptcode) { 884 case ICPT_INST: 885 if (scb_s->ipa == 0xb2b0) 886 rc = handle_stfle(vcpu, vsie_page); 887 break; 888 case ICPT_STOP: 889 /* stop not requested by g2 - must have been a kick */ 890 if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT)) 891 clear_vsie_icpt(vsie_page); 892 break; 893 case ICPT_VALIDITY: 894 if ((scb_s->ipa & 0xf000) != 0xf000) 895 scb_s->ipa += 0x1000; 896 break; 897 } 898 return rc; 899 } 900 901 static void release_gmap_shadow(struct vsie_page *vsie_page) 902 { 903 if (vsie_page->gmap) 904 gmap_put(vsie_page->gmap); 905 WRITE_ONCE(vsie_page->gmap, NULL); 906 prefix_unmapped(vsie_page); 907 } 908 909 static int acquire_gmap_shadow(struct kvm_vcpu *vcpu, 910 struct vsie_page *vsie_page) 911 { 912 unsigned long asce; 913 union ctlreg0 cr0; 914 struct gmap *gmap; 915 int edat; 916 917 asce = vcpu->arch.sie_block->gcr[1]; 918 cr0.val = vcpu->arch.sie_block->gcr[0]; 919 edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8); 920 edat += edat && test_kvm_facility(vcpu->kvm, 78); 921 922 /* 923 * ASCE or EDAT could have changed since last icpt, or the gmap 924 * we're holding has been unshadowed. If the gmap is still valid, 925 * we can safely reuse it. 926 */ 927 if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat)) 928 return 0; 929 930 /* release the old shadow - if any, and mark the prefix as unmapped */ 931 release_gmap_shadow(vsie_page); 932 gmap = gmap_shadow(vcpu->arch.gmap, asce, edat); 933 if (IS_ERR(gmap)) 934 return PTR_ERR(gmap); 935 gmap->private = vcpu->kvm; 936 WRITE_ONCE(vsie_page->gmap, gmap); 937 return 0; 938 } 939 940 /* 941 * Register the shadow scb at the VCPU, e.g. for kicking out of vsie. 942 */ 943 static void register_shadow_scb(struct kvm_vcpu *vcpu, 944 struct vsie_page *vsie_page) 945 { 946 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 947 948 WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s); 949 /* 950 * External calls have to lead to a kick of the vcpu and 951 * therefore the vsie -> Simulate Wait state. 952 */ 953 kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT); 954 /* 955 * We have to adjust the g3 epoch by the g2 epoch. The epoch will 956 * automatically be adjusted on tod clock changes via kvm_sync_clock. 957 */ 958 preempt_disable(); 959 scb_s->epoch += vcpu->kvm->arch.epoch; 960 961 if (scb_s->ecd & ECD_MEF) { 962 scb_s->epdx += vcpu->kvm->arch.epdx; 963 if (scb_s->epoch < vcpu->kvm->arch.epoch) 964 scb_s->epdx += 1; 965 } 966 967 preempt_enable(); 968 } 969 970 /* 971 * Unregister a shadow scb from a VCPU. 972 */ 973 static void unregister_shadow_scb(struct kvm_vcpu *vcpu) 974 { 975 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT); 976 WRITE_ONCE(vcpu->arch.vsie_block, NULL); 977 } 978 979 /* 980 * Run the vsie on a shadowed scb, managing the gmap shadow, handling 981 * prefix pages and faults. 982 * 983 * Returns: - 0 if no errors occurred 984 * - > 0 if control has to be given to guest 2 985 * - -ENOMEM if out of memory 986 */ 987 static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 988 { 989 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 990 int rc = 0; 991 992 while (1) { 993 rc = acquire_gmap_shadow(vcpu, vsie_page); 994 if (!rc) 995 rc = map_prefix(vcpu, vsie_page); 996 if (!rc) { 997 gmap_enable(vsie_page->gmap); 998 update_intervention_requests(vsie_page); 999 rc = do_vsie_run(vcpu, vsie_page); 1000 gmap_enable(vcpu->arch.gmap); 1001 } 1002 atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20); 1003 1004 if (rc == -EAGAIN) 1005 rc = 0; 1006 if (rc || scb_s->icptcode || signal_pending(current) || 1007 kvm_s390_vcpu_has_irq(vcpu, 0)) 1008 break; 1009 } 1010 1011 if (rc == -EFAULT) { 1012 /* 1013 * Addressing exceptions are always presentes as intercepts. 1014 * As addressing exceptions are suppressing and our guest 3 PSW 1015 * points at the responsible instruction, we have to 1016 * forward the PSW and set the ilc. If we can't read guest 3 1017 * instruction, we can use an arbitrary ilc. Let's always use 1018 * ilen = 4 for now, so we can avoid reading in guest 3 virtual 1019 * memory. (we could also fake the shadow so the hardware 1020 * handles it). 1021 */ 1022 scb_s->icptcode = ICPT_PROGI; 1023 scb_s->iprcc = PGM_ADDRESSING; 1024 scb_s->pgmilc = 4; 1025 scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4); 1026 } 1027 return rc; 1028 } 1029 1030 /* 1031 * Get or create a vsie page for a scb address. 1032 * 1033 * Returns: - address of a vsie page (cached or new one) 1034 * - NULL if the same scb address is already used by another VCPU 1035 * - ERR_PTR(-ENOMEM) if out of memory 1036 */ 1037 static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr) 1038 { 1039 struct vsie_page *vsie_page; 1040 struct page *page; 1041 int nr_vcpus; 1042 1043 rcu_read_lock(); 1044 page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9); 1045 rcu_read_unlock(); 1046 if (page) { 1047 if (page_ref_inc_return(page) == 2) 1048 return page_to_virt(page); 1049 page_ref_dec(page); 1050 } 1051 1052 /* 1053 * We want at least #online_vcpus shadows, so every VCPU can execute 1054 * the VSIE in parallel. 1055 */ 1056 nr_vcpus = atomic_read(&kvm->online_vcpus); 1057 1058 mutex_lock(&kvm->arch.vsie.mutex); 1059 if (kvm->arch.vsie.page_count < nr_vcpus) { 1060 page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA); 1061 if (!page) { 1062 mutex_unlock(&kvm->arch.vsie.mutex); 1063 return ERR_PTR(-ENOMEM); 1064 } 1065 page_ref_inc(page); 1066 kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page; 1067 kvm->arch.vsie.page_count++; 1068 } else { 1069 /* reuse an existing entry that belongs to nobody */ 1070 while (true) { 1071 page = kvm->arch.vsie.pages[kvm->arch.vsie.next]; 1072 if (page_ref_inc_return(page) == 2) 1073 break; 1074 page_ref_dec(page); 1075 kvm->arch.vsie.next++; 1076 kvm->arch.vsie.next %= nr_vcpus; 1077 } 1078 radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9); 1079 } 1080 page->index = addr; 1081 /* double use of the same address */ 1082 if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) { 1083 page_ref_dec(page); 1084 mutex_unlock(&kvm->arch.vsie.mutex); 1085 return NULL; 1086 } 1087 mutex_unlock(&kvm->arch.vsie.mutex); 1088 1089 vsie_page = page_to_virt(page); 1090 memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block)); 1091 release_gmap_shadow(vsie_page); 1092 vsie_page->fault_addr = 0; 1093 vsie_page->scb_s.ihcpu = 0xffffU; 1094 return vsie_page; 1095 } 1096 1097 /* put a vsie page acquired via get_vsie_page */ 1098 static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page) 1099 { 1100 struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT); 1101 1102 page_ref_dec(page); 1103 } 1104 1105 int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu) 1106 { 1107 struct vsie_page *vsie_page; 1108 unsigned long scb_addr; 1109 int rc; 1110 1111 vcpu->stat.instruction_sie++; 1112 if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2)) 1113 return -EOPNOTSUPP; 1114 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1115 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1116 1117 BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE); 1118 scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL); 1119 1120 /* 512 byte alignment */ 1121 if (unlikely(scb_addr & 0x1ffUL)) 1122 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1123 1124 if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0)) 1125 return 0; 1126 1127 vsie_page = get_vsie_page(vcpu->kvm, scb_addr); 1128 if (IS_ERR(vsie_page)) 1129 return PTR_ERR(vsie_page); 1130 else if (!vsie_page) 1131 /* double use of sie control block - simply do nothing */ 1132 return 0; 1133 1134 rc = pin_scb(vcpu, vsie_page, scb_addr); 1135 if (rc) 1136 goto out_put; 1137 rc = shadow_scb(vcpu, vsie_page); 1138 if (rc) 1139 goto out_unpin_scb; 1140 rc = pin_blocks(vcpu, vsie_page); 1141 if (rc) 1142 goto out_unshadow; 1143 register_shadow_scb(vcpu, vsie_page); 1144 rc = vsie_run(vcpu, vsie_page); 1145 unregister_shadow_scb(vcpu); 1146 unpin_blocks(vcpu, vsie_page); 1147 out_unshadow: 1148 unshadow_scb(vcpu, vsie_page); 1149 out_unpin_scb: 1150 unpin_scb(vcpu, vsie_page, scb_addr); 1151 out_put: 1152 put_vsie_page(vcpu->kvm, vsie_page); 1153 1154 return rc < 0 ? rc : 0; 1155 } 1156 1157 /* Init the vsie data structures. To be called when a vm is initialized. */ 1158 void kvm_s390_vsie_init(struct kvm *kvm) 1159 { 1160 mutex_init(&kvm->arch.vsie.mutex); 1161 INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL); 1162 } 1163 1164 /* Destroy the vsie data structures. To be called when a vm is destroyed. */ 1165 void kvm_s390_vsie_destroy(struct kvm *kvm) 1166 { 1167 struct vsie_page *vsie_page; 1168 struct page *page; 1169 int i; 1170 1171 mutex_lock(&kvm->arch.vsie.mutex); 1172 for (i = 0; i < kvm->arch.vsie.page_count; i++) { 1173 page = kvm->arch.vsie.pages[i]; 1174 kvm->arch.vsie.pages[i] = NULL; 1175 vsie_page = page_to_virt(page); 1176 release_gmap_shadow(vsie_page); 1177 /* free the radix tree entry */ 1178 radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9); 1179 __free_page(page); 1180 } 1181 kvm->arch.vsie.page_count = 0; 1182 mutex_unlock(&kvm->arch.vsie.mutex); 1183 } 1184 1185 void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu) 1186 { 1187 struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block); 1188 1189 /* 1190 * Even if the VCPU lets go of the shadow sie block reference, it is 1191 * still valid in the cache. So we can safely kick it. 1192 */ 1193 if (scb) { 1194 atomic_or(PROG_BLOCK_SIE, &scb->prog20); 1195 if (scb->prog0c & PROG_IN_SIE) 1196 atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags); 1197 } 1198 } 1199