1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * handling privileged instructions 4 * 5 * Copyright IBM Corp. 2008, 2018 6 * 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com> 9 */ 10 11 #include <linux/kvm.h> 12 #include <linux/gfp.h> 13 #include <linux/errno.h> 14 #include <linux/compat.h> 15 #include <linux/mm_types.h> 16 17 #include <asm/asm-offsets.h> 18 #include <asm/facility.h> 19 #include <asm/current.h> 20 #include <asm/debug.h> 21 #include <asm/ebcdic.h> 22 #include <asm/sysinfo.h> 23 #include <asm/pgtable.h> 24 #include <asm/page-states.h> 25 #include <asm/pgalloc.h> 26 #include <asm/gmap.h> 27 #include <asm/io.h> 28 #include <asm/ptrace.h> 29 #include <asm/sclp.h> 30 #include "gaccess.h" 31 #include "kvm-s390.h" 32 #include "trace.h" 33 34 static int handle_ri(struct kvm_vcpu *vcpu) 35 { 36 vcpu->stat.instruction_ri++; 37 38 if (test_kvm_facility(vcpu->kvm, 64)) { 39 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)"); 40 vcpu->arch.sie_block->ecb3 |= ECB3_RI; 41 kvm_s390_retry_instr(vcpu); 42 return 0; 43 } else 44 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 45 } 46 47 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu) 48 { 49 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) 50 return handle_ri(vcpu); 51 else 52 return -EOPNOTSUPP; 53 } 54 55 static int handle_gs(struct kvm_vcpu *vcpu) 56 { 57 vcpu->stat.instruction_gs++; 58 59 if (test_kvm_facility(vcpu->kvm, 133)) { 60 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)"); 61 preempt_disable(); 62 __ctl_set_bit(2, 4); 63 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb; 64 restore_gs_cb(current->thread.gs_cb); 65 preempt_enable(); 66 vcpu->arch.sie_block->ecb |= ECB_GS; 67 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; 68 vcpu->arch.gs_enabled = 1; 69 kvm_s390_retry_instr(vcpu); 70 return 0; 71 } else 72 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 73 } 74 75 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu) 76 { 77 int code = vcpu->arch.sie_block->ipb & 0xff; 78 79 if (code == 0x49 || code == 0x4d) 80 return handle_gs(vcpu); 81 else 82 return -EOPNOTSUPP; 83 } 84 /* Handle SCK (SET CLOCK) interception */ 85 static int handle_set_clock(struct kvm_vcpu *vcpu) 86 { 87 struct kvm_s390_vm_tod_clock gtod = { 0 }; 88 int rc; 89 u8 ar; 90 u64 op2; 91 92 vcpu->stat.instruction_sck++; 93 94 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 95 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 96 97 op2 = kvm_s390_get_base_disp_s(vcpu, &ar); 98 if (op2 & 7) /* Operand must be on a doubleword boundary */ 99 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 100 rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod)); 101 if (rc) 102 return kvm_s390_inject_prog_cond(vcpu, rc); 103 104 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod); 105 kvm_s390_set_tod_clock(vcpu->kvm, >od); 106 107 kvm_s390_set_psw_cc(vcpu, 0); 108 return 0; 109 } 110 111 static int handle_set_prefix(struct kvm_vcpu *vcpu) 112 { 113 u64 operand2; 114 u32 address; 115 int rc; 116 u8 ar; 117 118 vcpu->stat.instruction_spx++; 119 120 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 121 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 122 123 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 124 125 /* must be word boundary */ 126 if (operand2 & 3) 127 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 128 129 /* get the value */ 130 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); 131 if (rc) 132 return kvm_s390_inject_prog_cond(vcpu, rc); 133 134 address &= 0x7fffe000u; 135 136 /* 137 * Make sure the new value is valid memory. We only need to check the 138 * first page, since address is 8k aligned and memory pieces are always 139 * at least 1MB aligned and have at least a size of 1MB. 140 */ 141 if (kvm_is_error_gpa(vcpu->kvm, address)) 142 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 143 144 kvm_s390_set_prefix(vcpu, address); 145 trace_kvm_s390_handle_prefix(vcpu, 1, address); 146 return 0; 147 } 148 149 static int handle_store_prefix(struct kvm_vcpu *vcpu) 150 { 151 u64 operand2; 152 u32 address; 153 int rc; 154 u8 ar; 155 156 vcpu->stat.instruction_stpx++; 157 158 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 159 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 160 161 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 162 163 /* must be word boundary */ 164 if (operand2 & 3) 165 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 166 167 address = kvm_s390_get_prefix(vcpu); 168 169 /* get the value */ 170 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); 171 if (rc) 172 return kvm_s390_inject_prog_cond(vcpu, rc); 173 174 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2); 175 trace_kvm_s390_handle_prefix(vcpu, 0, address); 176 return 0; 177 } 178 179 static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 180 { 181 u16 vcpu_id = vcpu->vcpu_id; 182 u64 ga; 183 int rc; 184 u8 ar; 185 186 vcpu->stat.instruction_stap++; 187 188 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 189 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 190 191 ga = kvm_s390_get_base_disp_s(vcpu, &ar); 192 193 if (ga & 1) 194 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 195 196 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); 197 if (rc) 198 return kvm_s390_inject_prog_cond(vcpu, rc); 199 200 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga); 201 trace_kvm_s390_handle_stap(vcpu, ga); 202 return 0; 203 } 204 205 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu) 206 { 207 int rc; 208 209 trace_kvm_s390_skey_related_inst(vcpu); 210 /* Already enabled? */ 211 if (vcpu->arch.skey_enabled) 212 return 0; 213 214 rc = s390_enable_skey(); 215 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc); 216 if (rc) 217 return rc; 218 219 if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS)) 220 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS); 221 if (!vcpu->kvm->arch.use_skf) 222 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; 223 else 224 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); 225 vcpu->arch.skey_enabled = true; 226 return 0; 227 } 228 229 static int try_handle_skey(struct kvm_vcpu *vcpu) 230 { 231 int rc; 232 233 rc = kvm_s390_skey_check_enable(vcpu); 234 if (rc) 235 return rc; 236 if (vcpu->kvm->arch.use_skf) { 237 /* with storage-key facility, SIE interprets it for us */ 238 kvm_s390_retry_instr(vcpu); 239 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 240 return -EAGAIN; 241 } 242 return 0; 243 } 244 245 static int handle_iske(struct kvm_vcpu *vcpu) 246 { 247 unsigned long gaddr, vmaddr; 248 unsigned char key; 249 int reg1, reg2; 250 bool unlocked; 251 int rc; 252 253 vcpu->stat.instruction_iske++; 254 255 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 256 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 257 258 rc = try_handle_skey(vcpu); 259 if (rc) 260 return rc != -EAGAIN ? rc : 0; 261 262 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 263 264 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 265 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr); 266 gaddr = kvm_s390_real_to_abs(vcpu, gaddr); 267 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr)); 268 if (kvm_is_error_hva(vmaddr)) 269 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 270 retry: 271 unlocked = false; 272 down_read(¤t->mm->mmap_sem); 273 rc = get_guest_storage_key(current->mm, vmaddr, &key); 274 275 if (rc) { 276 rc = fixup_user_fault(current, current->mm, vmaddr, 277 FAULT_FLAG_WRITE, &unlocked); 278 if (!rc) { 279 up_read(¤t->mm->mmap_sem); 280 goto retry; 281 } 282 } 283 if (rc) 284 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 285 up_read(¤t->mm->mmap_sem); 286 vcpu->run->s.regs.gprs[reg1] &= ~0xff; 287 vcpu->run->s.regs.gprs[reg1] |= key; 288 return 0; 289 } 290 291 static int handle_rrbe(struct kvm_vcpu *vcpu) 292 { 293 unsigned long vmaddr, gaddr; 294 int reg1, reg2; 295 bool unlocked; 296 int rc; 297 298 vcpu->stat.instruction_rrbe++; 299 300 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 301 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 302 303 rc = try_handle_skey(vcpu); 304 if (rc) 305 return rc != -EAGAIN ? rc : 0; 306 307 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 308 309 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 310 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr); 311 gaddr = kvm_s390_real_to_abs(vcpu, gaddr); 312 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr)); 313 if (kvm_is_error_hva(vmaddr)) 314 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 315 retry: 316 unlocked = false; 317 down_read(¤t->mm->mmap_sem); 318 rc = reset_guest_reference_bit(current->mm, vmaddr); 319 if (rc < 0) { 320 rc = fixup_user_fault(current, current->mm, vmaddr, 321 FAULT_FLAG_WRITE, &unlocked); 322 if (!rc) { 323 up_read(¤t->mm->mmap_sem); 324 goto retry; 325 } 326 } 327 if (rc < 0) 328 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 329 up_read(¤t->mm->mmap_sem); 330 kvm_s390_set_psw_cc(vcpu, rc); 331 return 0; 332 } 333 334 #define SSKE_NQ 0x8 335 #define SSKE_MR 0x4 336 #define SSKE_MC 0x2 337 #define SSKE_MB 0x1 338 static int handle_sske(struct kvm_vcpu *vcpu) 339 { 340 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28; 341 unsigned long start, end; 342 unsigned char key, oldkey; 343 int reg1, reg2; 344 bool unlocked; 345 int rc; 346 347 vcpu->stat.instruction_sske++; 348 349 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 350 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 351 352 rc = try_handle_skey(vcpu); 353 if (rc) 354 return rc != -EAGAIN ? rc : 0; 355 356 if (!test_kvm_facility(vcpu->kvm, 8)) 357 m3 &= ~SSKE_MB; 358 if (!test_kvm_facility(vcpu->kvm, 10)) 359 m3 &= ~(SSKE_MC | SSKE_MR); 360 if (!test_kvm_facility(vcpu->kvm, 14)) 361 m3 &= ~SSKE_NQ; 362 363 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 364 365 key = vcpu->run->s.regs.gprs[reg1] & 0xfe; 366 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 367 start = kvm_s390_logical_to_effective(vcpu, start); 368 if (m3 & SSKE_MB) { 369 /* start already designates an absolute address */ 370 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1); 371 } else { 372 start = kvm_s390_real_to_abs(vcpu, start); 373 end = start + PAGE_SIZE; 374 } 375 376 while (start != end) { 377 unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); 378 unlocked = false; 379 380 if (kvm_is_error_hva(vmaddr)) 381 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 382 383 down_read(¤t->mm->mmap_sem); 384 rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey, 385 m3 & SSKE_NQ, m3 & SSKE_MR, 386 m3 & SSKE_MC); 387 388 if (rc < 0) { 389 rc = fixup_user_fault(current, current->mm, vmaddr, 390 FAULT_FLAG_WRITE, &unlocked); 391 rc = !rc ? -EAGAIN : rc; 392 } 393 if (rc == -EFAULT) 394 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 395 396 up_read(¤t->mm->mmap_sem); 397 if (rc >= 0) 398 start += PAGE_SIZE; 399 } 400 401 if (m3 & (SSKE_MC | SSKE_MR)) { 402 if (m3 & SSKE_MB) { 403 /* skey in reg1 is unpredictable */ 404 kvm_s390_set_psw_cc(vcpu, 3); 405 } else { 406 kvm_s390_set_psw_cc(vcpu, rc); 407 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL; 408 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8; 409 } 410 } 411 if (m3 & SSKE_MB) { 412 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) 413 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK; 414 else 415 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL; 416 end = kvm_s390_logical_to_effective(vcpu, end); 417 vcpu->run->s.regs.gprs[reg2] |= end; 418 } 419 return 0; 420 } 421 422 static int handle_ipte_interlock(struct kvm_vcpu *vcpu) 423 { 424 vcpu->stat.instruction_ipte_interlock++; 425 if (psw_bits(vcpu->arch.sie_block->gpsw).pstate) 426 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 427 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); 428 kvm_s390_retry_instr(vcpu); 429 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); 430 return 0; 431 } 432 433 static int handle_test_block(struct kvm_vcpu *vcpu) 434 { 435 gpa_t addr; 436 int reg2; 437 438 vcpu->stat.instruction_tb++; 439 440 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 441 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 442 443 kvm_s390_get_regs_rre(vcpu, NULL, ®2); 444 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 445 addr = kvm_s390_logical_to_effective(vcpu, addr); 446 if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) 447 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 448 addr = kvm_s390_real_to_abs(vcpu, addr); 449 450 if (kvm_is_error_gpa(vcpu->kvm, addr)) 451 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 452 /* 453 * We don't expect errors on modern systems, and do not care 454 * about storage keys (yet), so let's just clear the page. 455 */ 456 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) 457 return -EFAULT; 458 kvm_s390_set_psw_cc(vcpu, 0); 459 vcpu->run->s.regs.gprs[0] = 0; 460 return 0; 461 } 462 463 static int handle_tpi(struct kvm_vcpu *vcpu) 464 { 465 struct kvm_s390_interrupt_info *inti; 466 unsigned long len; 467 u32 tpi_data[3]; 468 int rc; 469 u64 addr; 470 u8 ar; 471 472 vcpu->stat.instruction_tpi++; 473 474 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 475 if (addr & 3) 476 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 477 478 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); 479 if (!inti) { 480 kvm_s390_set_psw_cc(vcpu, 0); 481 return 0; 482 } 483 484 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; 485 tpi_data[1] = inti->io.io_int_parm; 486 tpi_data[2] = inti->io.io_int_word; 487 if (addr) { 488 /* 489 * Store the two-word I/O interruption code into the 490 * provided area. 491 */ 492 len = sizeof(tpi_data) - 4; 493 rc = write_guest(vcpu, addr, ar, &tpi_data, len); 494 if (rc) { 495 rc = kvm_s390_inject_prog_cond(vcpu, rc); 496 goto reinject_interrupt; 497 } 498 } else { 499 /* 500 * Store the three-word I/O interruption code into 501 * the appropriate lowcore area. 502 */ 503 len = sizeof(tpi_data); 504 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { 505 /* failed writes to the low core are not recoverable */ 506 rc = -EFAULT; 507 goto reinject_interrupt; 508 } 509 } 510 511 /* irq was successfully handed to the guest */ 512 kfree(inti); 513 kvm_s390_set_psw_cc(vcpu, 1); 514 return 0; 515 reinject_interrupt: 516 /* 517 * If we encounter a problem storing the interruption code, the 518 * instruction is suppressed from the guest's view: reinject the 519 * interrupt. 520 */ 521 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { 522 kfree(inti); 523 rc = -EFAULT; 524 } 525 /* don't set the cc, a pgm irq was injected or we drop to user space */ 526 return rc ? -EFAULT : 0; 527 } 528 529 static int handle_tsch(struct kvm_vcpu *vcpu) 530 { 531 struct kvm_s390_interrupt_info *inti = NULL; 532 const u64 isc_mask = 0xffUL << 24; /* all iscs set */ 533 534 vcpu->stat.instruction_tsch++; 535 536 /* a valid schid has at least one bit set */ 537 if (vcpu->run->s.regs.gprs[1]) 538 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, 539 vcpu->run->s.regs.gprs[1]); 540 541 /* 542 * Prepare exit to userspace. 543 * We indicate whether we dequeued a pending I/O interrupt 544 * so that userspace can re-inject it if the instruction gets 545 * a program check. While this may re-order the pending I/O 546 * interrupts, this is no problem since the priority is kept 547 * intact. 548 */ 549 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; 550 vcpu->run->s390_tsch.dequeued = !!inti; 551 if (inti) { 552 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; 553 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; 554 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; 555 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; 556 } 557 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; 558 kfree(inti); 559 return -EREMOTE; 560 } 561 562 static int handle_io_inst(struct kvm_vcpu *vcpu) 563 { 564 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); 565 566 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 567 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 568 569 if (vcpu->kvm->arch.css_support) { 570 /* 571 * Most I/O instructions will be handled by userspace. 572 * Exceptions are tpi and the interrupt portion of tsch. 573 */ 574 if (vcpu->arch.sie_block->ipa == 0xb236) 575 return handle_tpi(vcpu); 576 if (vcpu->arch.sie_block->ipa == 0xb235) 577 return handle_tsch(vcpu); 578 /* Handle in userspace. */ 579 vcpu->stat.instruction_io_other++; 580 return -EOPNOTSUPP; 581 } else { 582 /* 583 * Set condition code 3 to stop the guest from issuing channel 584 * I/O instructions. 585 */ 586 kvm_s390_set_psw_cc(vcpu, 3); 587 return 0; 588 } 589 } 590 591 static int handle_stfl(struct kvm_vcpu *vcpu) 592 { 593 int rc; 594 unsigned int fac; 595 596 vcpu->stat.instruction_stfl++; 597 598 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 599 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 600 601 /* 602 * We need to shift the lower 32 facility bits (bit 0-31) from a u64 603 * into a u32 memory representation. They will remain bits 0-31. 604 */ 605 fac = *vcpu->kvm->arch.model.fac_list >> 32; 606 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list), 607 &fac, sizeof(fac)); 608 if (rc) 609 return rc; 610 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac); 611 trace_kvm_s390_handle_stfl(vcpu, fac); 612 return 0; 613 } 614 615 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) 616 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL 617 #define PSW_ADDR_24 0x0000000000ffffffUL 618 #define PSW_ADDR_31 0x000000007fffffffUL 619 620 int is_valid_psw(psw_t *psw) 621 { 622 if (psw->mask & PSW_MASK_UNASSIGNED) 623 return 0; 624 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { 625 if (psw->addr & ~PSW_ADDR_31) 626 return 0; 627 } 628 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) 629 return 0; 630 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) 631 return 0; 632 if (psw->addr & 1) 633 return 0; 634 return 1; 635 } 636 637 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) 638 { 639 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; 640 psw_compat_t new_psw; 641 u64 addr; 642 int rc; 643 u8 ar; 644 645 vcpu->stat.instruction_lpsw++; 646 647 if (gpsw->mask & PSW_MASK_PSTATE) 648 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 649 650 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 651 if (addr & 7) 652 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 653 654 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 655 if (rc) 656 return kvm_s390_inject_prog_cond(vcpu, rc); 657 if (!(new_psw.mask & PSW32_MASK_BASE)) 658 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 659 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; 660 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; 661 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; 662 if (!is_valid_psw(gpsw)) 663 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 664 return 0; 665 } 666 667 static int handle_lpswe(struct kvm_vcpu *vcpu) 668 { 669 psw_t new_psw; 670 u64 addr; 671 int rc; 672 u8 ar; 673 674 vcpu->stat.instruction_lpswe++; 675 676 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 677 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 678 679 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 680 if (addr & 7) 681 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 682 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 683 if (rc) 684 return kvm_s390_inject_prog_cond(vcpu, rc); 685 vcpu->arch.sie_block->gpsw = new_psw; 686 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) 687 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 688 return 0; 689 } 690 691 static int handle_stidp(struct kvm_vcpu *vcpu) 692 { 693 u64 stidp_data = vcpu->kvm->arch.model.cpuid; 694 u64 operand2; 695 int rc; 696 u8 ar; 697 698 vcpu->stat.instruction_stidp++; 699 700 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 701 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 702 703 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 704 705 if (operand2 & 7) 706 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 707 708 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); 709 if (rc) 710 return kvm_s390_inject_prog_cond(vcpu, rc); 711 712 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data); 713 return 0; 714 } 715 716 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) 717 { 718 int cpus = 0; 719 int n; 720 721 cpus = atomic_read(&vcpu->kvm->online_vcpus); 722 723 /* deal with other level 3 hypervisors */ 724 if (stsi(mem, 3, 2, 2)) 725 mem->count = 0; 726 if (mem->count < 8) 727 mem->count++; 728 for (n = mem->count - 1; n > 0 ; n--) 729 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); 730 731 memset(&mem->vm[0], 0, sizeof(mem->vm[0])); 732 mem->vm[0].cpus_total = cpus; 733 mem->vm[0].cpus_configured = cpus; 734 mem->vm[0].cpus_standby = 0; 735 mem->vm[0].cpus_reserved = 0; 736 mem->vm[0].caf = 1000; 737 memcpy(mem->vm[0].name, "KVMguest", 8); 738 ASCEBC(mem->vm[0].name, 8); 739 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); 740 ASCEBC(mem->vm[0].cpi, 16); 741 } 742 743 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar, 744 u8 fc, u8 sel1, u16 sel2) 745 { 746 vcpu->run->exit_reason = KVM_EXIT_S390_STSI; 747 vcpu->run->s390_stsi.addr = addr; 748 vcpu->run->s390_stsi.ar = ar; 749 vcpu->run->s390_stsi.fc = fc; 750 vcpu->run->s390_stsi.sel1 = sel1; 751 vcpu->run->s390_stsi.sel2 = sel2; 752 } 753 754 static int handle_stsi(struct kvm_vcpu *vcpu) 755 { 756 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; 757 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; 758 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; 759 unsigned long mem = 0; 760 u64 operand2; 761 int rc = 0; 762 u8 ar; 763 764 vcpu->stat.instruction_stsi++; 765 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2); 766 767 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 768 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 769 770 if (fc > 3) { 771 kvm_s390_set_psw_cc(vcpu, 3); 772 return 0; 773 } 774 775 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 776 || vcpu->run->s.regs.gprs[1] & 0xffff0000) 777 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 778 779 if (fc == 0) { 780 vcpu->run->s.regs.gprs[0] = 3 << 28; 781 kvm_s390_set_psw_cc(vcpu, 0); 782 return 0; 783 } 784 785 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 786 787 if (operand2 & 0xfff) 788 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 789 790 switch (fc) { 791 case 1: /* same handling for 1 and 2 */ 792 case 2: 793 mem = get_zeroed_page(GFP_KERNEL); 794 if (!mem) 795 goto out_no_data; 796 if (stsi((void *) mem, fc, sel1, sel2)) 797 goto out_no_data; 798 break; 799 case 3: 800 if (sel1 != 2 || sel2 != 2) 801 goto out_no_data; 802 mem = get_zeroed_page(GFP_KERNEL); 803 if (!mem) 804 goto out_no_data; 805 handle_stsi_3_2_2(vcpu, (void *) mem); 806 break; 807 } 808 809 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); 810 if (rc) { 811 rc = kvm_s390_inject_prog_cond(vcpu, rc); 812 goto out; 813 } 814 if (vcpu->kvm->arch.user_stsi) { 815 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); 816 rc = -EREMOTE; 817 } 818 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 819 free_page(mem); 820 kvm_s390_set_psw_cc(vcpu, 0); 821 vcpu->run->s.regs.gprs[0] = 0; 822 return rc; 823 out_no_data: 824 kvm_s390_set_psw_cc(vcpu, 3); 825 out: 826 free_page(mem); 827 return rc; 828 } 829 830 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) 831 { 832 switch (vcpu->arch.sie_block->ipa & 0x00ff) { 833 case 0x02: 834 return handle_stidp(vcpu); 835 case 0x04: 836 return handle_set_clock(vcpu); 837 case 0x10: 838 return handle_set_prefix(vcpu); 839 case 0x11: 840 return handle_store_prefix(vcpu); 841 case 0x12: 842 return handle_store_cpu_address(vcpu); 843 case 0x14: 844 return kvm_s390_handle_vsie(vcpu); 845 case 0x21: 846 case 0x50: 847 return handle_ipte_interlock(vcpu); 848 case 0x29: 849 return handle_iske(vcpu); 850 case 0x2a: 851 return handle_rrbe(vcpu); 852 case 0x2b: 853 return handle_sske(vcpu); 854 case 0x2c: 855 return handle_test_block(vcpu); 856 case 0x30: 857 case 0x31: 858 case 0x32: 859 case 0x33: 860 case 0x34: 861 case 0x35: 862 case 0x36: 863 case 0x37: 864 case 0x38: 865 case 0x39: 866 case 0x3a: 867 case 0x3b: 868 case 0x3c: 869 case 0x5f: 870 case 0x74: 871 case 0x76: 872 return handle_io_inst(vcpu); 873 case 0x56: 874 return handle_sthyi(vcpu); 875 case 0x7d: 876 return handle_stsi(vcpu); 877 case 0xb1: 878 return handle_stfl(vcpu); 879 case 0xb2: 880 return handle_lpswe(vcpu); 881 default: 882 return -EOPNOTSUPP; 883 } 884 } 885 886 static int handle_epsw(struct kvm_vcpu *vcpu) 887 { 888 int reg1, reg2; 889 890 vcpu->stat.instruction_epsw++; 891 892 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 893 894 /* This basically extracts the mask half of the psw. */ 895 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; 896 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; 897 if (reg2) { 898 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; 899 vcpu->run->s.regs.gprs[reg2] |= 900 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; 901 } 902 return 0; 903 } 904 905 #define PFMF_RESERVED 0xfffc0101UL 906 #define PFMF_SK 0x00020000UL 907 #define PFMF_CF 0x00010000UL 908 #define PFMF_UI 0x00008000UL 909 #define PFMF_FSC 0x00007000UL 910 #define PFMF_NQ 0x00000800UL 911 #define PFMF_MR 0x00000400UL 912 #define PFMF_MC 0x00000200UL 913 #define PFMF_KEY 0x000000feUL 914 915 static int handle_pfmf(struct kvm_vcpu *vcpu) 916 { 917 bool mr = false, mc = false, nq; 918 int reg1, reg2; 919 unsigned long start, end; 920 unsigned char key; 921 922 vcpu->stat.instruction_pfmf++; 923 924 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 925 926 if (!test_kvm_facility(vcpu->kvm, 8)) 927 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 928 929 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 930 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 931 932 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) 933 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 934 935 /* Only provide non-quiescing support if enabled for the guest */ 936 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && 937 !test_kvm_facility(vcpu->kvm, 14)) 938 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 939 940 /* Only provide conditional-SSKE support if enabled for the guest */ 941 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK && 942 test_kvm_facility(vcpu->kvm, 10)) { 943 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR; 944 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC; 945 } 946 947 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ; 948 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY; 949 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 950 start = kvm_s390_logical_to_effective(vcpu, start); 951 952 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 953 if (kvm_s390_check_low_addr_prot_real(vcpu, start)) 954 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 955 } 956 957 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 958 case 0x00000000: 959 /* only 4k frames specify a real address */ 960 start = kvm_s390_real_to_abs(vcpu, start); 961 end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); 962 break; 963 case 0x00001000: 964 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1); 965 break; 966 case 0x00002000: 967 /* only support 2G frame size if EDAT2 is available and we are 968 not in 24-bit addressing mode */ 969 if (!test_kvm_facility(vcpu->kvm, 78) || 970 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT) 971 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 972 end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1); 973 break; 974 default: 975 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 976 } 977 978 while (start != end) { 979 unsigned long vmaddr; 980 bool unlocked = false; 981 982 /* Translate guest address to host address */ 983 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); 984 if (kvm_is_error_hva(vmaddr)) 985 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 986 987 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 988 if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE)) 989 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 990 } 991 992 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { 993 int rc = kvm_s390_skey_check_enable(vcpu); 994 995 if (rc) 996 return rc; 997 down_read(¤t->mm->mmap_sem); 998 rc = cond_set_guest_storage_key(current->mm, vmaddr, 999 key, NULL, nq, mr, mc); 1000 if (rc < 0) { 1001 rc = fixup_user_fault(current, current->mm, vmaddr, 1002 FAULT_FLAG_WRITE, &unlocked); 1003 rc = !rc ? -EAGAIN : rc; 1004 } 1005 if (rc == -EFAULT) 1006 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 1007 1008 up_read(¤t->mm->mmap_sem); 1009 if (rc >= 0) 1010 start += PAGE_SIZE; 1011 } 1012 } 1013 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 1014 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) { 1015 vcpu->run->s.regs.gprs[reg2] = end; 1016 } else { 1017 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL; 1018 end = kvm_s390_logical_to_effective(vcpu, end); 1019 vcpu->run->s.regs.gprs[reg2] |= end; 1020 } 1021 } 1022 return 0; 1023 } 1024 1025 /* 1026 * Must be called with relevant read locks held (kvm->mm->mmap_sem, kvm->srcu) 1027 */ 1028 static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc) 1029 { 1030 int r1, r2, nappended, entries; 1031 unsigned long gfn, hva, res, pgstev, ptev; 1032 unsigned long *cbrlo; 1033 1034 /* 1035 * We don't need to set SD.FPF.SK to 1 here, because if we have a 1036 * machine check here we either handle it or crash 1037 */ 1038 1039 kvm_s390_get_regs_rre(vcpu, &r1, &r2); 1040 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT; 1041 hva = gfn_to_hva(vcpu->kvm, gfn); 1042 entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; 1043 1044 if (kvm_is_error_hva(hva)) 1045 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 1046 1047 nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev); 1048 if (nappended < 0) { 1049 res = orc ? 0x10 : 0; 1050 vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */ 1051 return 0; 1052 } 1053 res = (pgstev & _PGSTE_GPS_USAGE_MASK) >> 22; 1054 /* 1055 * Set the block-content state part of the result. 0 means resident, so 1056 * nothing to do if the page is valid. 2 is for preserved pages 1057 * (non-present and non-zero), and 3 for zero pages (non-present and 1058 * zero). 1059 */ 1060 if (ptev & _PAGE_INVALID) { 1061 res |= 2; 1062 if (pgstev & _PGSTE_GPS_ZERO) 1063 res |= 1; 1064 } 1065 if (pgstev & _PGSTE_GPS_NODAT) 1066 res |= 0x20; 1067 vcpu->run->s.regs.gprs[r1] = res; 1068 /* 1069 * It is possible that all the normal 511 slots were full, in which case 1070 * we will now write in the 512th slot, which is reserved for host use. 1071 * In both cases we let the normal essa handling code process all the 1072 * slots, including the reserved one, if needed. 1073 */ 1074 if (nappended > 0) { 1075 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK); 1076 cbrlo[entries] = gfn << PAGE_SHIFT; 1077 } 1078 1079 if (orc) { 1080 struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn); 1081 1082 /* Increment only if we are really flipping the bit */ 1083 if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) 1084 atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages); 1085 } 1086 1087 return nappended; 1088 } 1089 1090 static int handle_essa(struct kvm_vcpu *vcpu) 1091 { 1092 /* entries expected to be 1FF */ 1093 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; 1094 unsigned long *cbrlo; 1095 struct gmap *gmap; 1096 int i, orc; 1097 1098 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries); 1099 gmap = vcpu->arch.gmap; 1100 vcpu->stat.instruction_essa++; 1101 if (!vcpu->kvm->arch.use_cmma) 1102 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 1103 1104 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1105 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1106 /* Check for invalid operation request code */ 1107 orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; 1108 /* ORCs 0-6 are always valid */ 1109 if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT 1110 : ESSA_SET_STABLE_IF_RESIDENT)) 1111 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1112 1113 if (!vcpu->kvm->arch.migration_mode) { 1114 /* 1115 * CMMA is enabled in the KVM settings, but is disabled in 1116 * the SIE block and in the mm_context, and we are not doing 1117 * a migration. Enable CMMA in the mm_context. 1118 * Since we need to take a write lock to write to the context 1119 * to avoid races with storage keys handling, we check if the 1120 * value really needs to be written to; if the value is 1121 * already correct, we do nothing and avoid the lock. 1122 */ 1123 if (vcpu->kvm->mm->context.uses_cmm == 0) { 1124 down_write(&vcpu->kvm->mm->mmap_sem); 1125 vcpu->kvm->mm->context.uses_cmm = 1; 1126 up_write(&vcpu->kvm->mm->mmap_sem); 1127 } 1128 /* 1129 * If we are here, we are supposed to have CMMA enabled in 1130 * the SIE block. Enabling CMMA works on a per-CPU basis, 1131 * while the context use_cmma flag is per process. 1132 * It's possible that the context flag is enabled and the 1133 * SIE flag is not, so we set the flag always; if it was 1134 * already set, nothing changes, otherwise we enable it 1135 * on this CPU too. 1136 */ 1137 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; 1138 /* Retry the ESSA instruction */ 1139 kvm_s390_retry_instr(vcpu); 1140 } else { 1141 int srcu_idx; 1142 1143 down_read(&vcpu->kvm->mm->mmap_sem); 1144 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1145 i = __do_essa(vcpu, orc); 1146 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); 1147 up_read(&vcpu->kvm->mm->mmap_sem); 1148 if (i < 0) 1149 return i; 1150 /* Account for the possible extra cbrl entry */ 1151 entries += i; 1152 } 1153 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ 1154 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); 1155 down_read(&gmap->mm->mmap_sem); 1156 for (i = 0; i < entries; ++i) 1157 __gmap_zap(gmap, cbrlo[i]); 1158 up_read(&gmap->mm->mmap_sem); 1159 return 0; 1160 } 1161 1162 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) 1163 { 1164 switch (vcpu->arch.sie_block->ipa & 0x00ff) { 1165 case 0x8a: 1166 case 0x8e: 1167 case 0x8f: 1168 return handle_ipte_interlock(vcpu); 1169 case 0x8d: 1170 return handle_epsw(vcpu); 1171 case 0xab: 1172 return handle_essa(vcpu); 1173 case 0xaf: 1174 return handle_pfmf(vcpu); 1175 default: 1176 return -EOPNOTSUPP; 1177 } 1178 } 1179 1180 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) 1181 { 1182 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1183 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1184 int reg, rc, nr_regs; 1185 u32 ctl_array[16]; 1186 u64 ga; 1187 u8 ar; 1188 1189 vcpu->stat.instruction_lctl++; 1190 1191 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1192 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1193 1194 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 1195 1196 if (ga & 3) 1197 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1198 1199 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1200 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 1201 1202 nr_regs = ((reg3 - reg1) & 0xf) + 1; 1203 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 1204 if (rc) 1205 return kvm_s390_inject_prog_cond(vcpu, rc); 1206 reg = reg1; 1207 nr_regs = 0; 1208 do { 1209 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; 1210 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; 1211 if (reg == reg3) 1212 break; 1213 reg = (reg + 1) % 16; 1214 } while (1); 1215 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1216 return 0; 1217 } 1218 1219 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) 1220 { 1221 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1222 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1223 int reg, rc, nr_regs; 1224 u32 ctl_array[16]; 1225 u64 ga; 1226 u8 ar; 1227 1228 vcpu->stat.instruction_stctl++; 1229 1230 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1231 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1232 1233 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 1234 1235 if (ga & 3) 1236 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1237 1238 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1239 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); 1240 1241 reg = reg1; 1242 nr_regs = 0; 1243 do { 1244 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 1245 if (reg == reg3) 1246 break; 1247 reg = (reg + 1) % 16; 1248 } while (1); 1249 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 1250 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 1251 } 1252 1253 static int handle_lctlg(struct kvm_vcpu *vcpu) 1254 { 1255 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1256 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1257 int reg, rc, nr_regs; 1258 u64 ctl_array[16]; 1259 u64 ga; 1260 u8 ar; 1261 1262 vcpu->stat.instruction_lctlg++; 1263 1264 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1265 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1266 1267 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 1268 1269 if (ga & 7) 1270 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1271 1272 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1273 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 1274 1275 nr_regs = ((reg3 - reg1) & 0xf) + 1; 1276 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 1277 if (rc) 1278 return kvm_s390_inject_prog_cond(vcpu, rc); 1279 reg = reg1; 1280 nr_regs = 0; 1281 do { 1282 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; 1283 if (reg == reg3) 1284 break; 1285 reg = (reg + 1) % 16; 1286 } while (1); 1287 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1288 return 0; 1289 } 1290 1291 static int handle_stctg(struct kvm_vcpu *vcpu) 1292 { 1293 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1294 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1295 int reg, rc, nr_regs; 1296 u64 ctl_array[16]; 1297 u64 ga; 1298 u8 ar; 1299 1300 vcpu->stat.instruction_stctg++; 1301 1302 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1303 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1304 1305 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 1306 1307 if (ga & 7) 1308 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1309 1310 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1311 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); 1312 1313 reg = reg1; 1314 nr_regs = 0; 1315 do { 1316 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 1317 if (reg == reg3) 1318 break; 1319 reg = (reg + 1) % 16; 1320 } while (1); 1321 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 1322 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 1323 } 1324 1325 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) 1326 { 1327 switch (vcpu->arch.sie_block->ipb & 0x000000ff) { 1328 case 0x25: 1329 return handle_stctg(vcpu); 1330 case 0x2f: 1331 return handle_lctlg(vcpu); 1332 case 0x60: 1333 case 0x61: 1334 case 0x62: 1335 return handle_ri(vcpu); 1336 default: 1337 return -EOPNOTSUPP; 1338 } 1339 } 1340 1341 static int handle_tprot(struct kvm_vcpu *vcpu) 1342 { 1343 u64 address1, address2; 1344 unsigned long hva, gpa; 1345 int ret = 0, cc = 0; 1346 bool writable; 1347 u8 ar; 1348 1349 vcpu->stat.instruction_tprot++; 1350 1351 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1352 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1353 1354 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); 1355 1356 /* we only handle the Linux memory detection case: 1357 * access key == 0 1358 * everything else goes to userspace. */ 1359 if (address2 & 0xf0) 1360 return -EOPNOTSUPP; 1361 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 1362 ipte_lock(vcpu); 1363 ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE); 1364 if (ret == PGM_PROTECTION) { 1365 /* Write protected? Try again with read-only... */ 1366 cc = 1; 1367 ret = guest_translate_address(vcpu, address1, ar, &gpa, 1368 GACC_FETCH); 1369 } 1370 if (ret) { 1371 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { 1372 ret = kvm_s390_inject_program_int(vcpu, ret); 1373 } else if (ret > 0) { 1374 /* Translation not available */ 1375 kvm_s390_set_psw_cc(vcpu, 3); 1376 ret = 0; 1377 } 1378 goto out_unlock; 1379 } 1380 1381 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); 1382 if (kvm_is_error_hva(hva)) { 1383 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 1384 } else { 1385 if (!writable) 1386 cc = 1; /* Write not permitted ==> read-only */ 1387 kvm_s390_set_psw_cc(vcpu, cc); 1388 /* Note: CC2 only occurs for storage keys (not supported yet) */ 1389 } 1390 out_unlock: 1391 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 1392 ipte_unlock(vcpu); 1393 return ret; 1394 } 1395 1396 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) 1397 { 1398 switch (vcpu->arch.sie_block->ipa & 0x00ff) { 1399 case 0x01: 1400 return handle_tprot(vcpu); 1401 default: 1402 return -EOPNOTSUPP; 1403 } 1404 } 1405 1406 static int handle_sckpf(struct kvm_vcpu *vcpu) 1407 { 1408 u32 value; 1409 1410 vcpu->stat.instruction_sckpf++; 1411 1412 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1413 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1414 1415 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) 1416 return kvm_s390_inject_program_int(vcpu, 1417 PGM_SPECIFICATION); 1418 1419 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; 1420 vcpu->arch.sie_block->todpr = value; 1421 1422 return 0; 1423 } 1424 1425 static int handle_ptff(struct kvm_vcpu *vcpu) 1426 { 1427 vcpu->stat.instruction_ptff++; 1428 1429 /* we don't emulate any control instructions yet */ 1430 kvm_s390_set_psw_cc(vcpu, 3); 1431 return 0; 1432 } 1433 1434 int kvm_s390_handle_01(struct kvm_vcpu *vcpu) 1435 { 1436 switch (vcpu->arch.sie_block->ipa & 0x00ff) { 1437 case 0x04: 1438 return handle_ptff(vcpu); 1439 case 0x07: 1440 return handle_sckpf(vcpu); 1441 default: 1442 return -EOPNOTSUPP; 1443 } 1444 } 1445