1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * handling privileged instructions 4 * 5 * Copyright IBM Corp. 2008, 2018 6 * 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com> 9 */ 10 11 #include <linux/kvm.h> 12 #include <linux/gfp.h> 13 #include <linux/errno.h> 14 #include <linux/compat.h> 15 #include <linux/mm_types.h> 16 17 #include <asm/asm-offsets.h> 18 #include <asm/facility.h> 19 #include <asm/current.h> 20 #include <asm/debug.h> 21 #include <asm/ebcdic.h> 22 #include <asm/sysinfo.h> 23 #include <asm/pgtable.h> 24 #include <asm/page-states.h> 25 #include <asm/pgalloc.h> 26 #include <asm/gmap.h> 27 #include <asm/io.h> 28 #include <asm/ptrace.h> 29 #include <asm/sclp.h> 30 #include "gaccess.h" 31 #include "kvm-s390.h" 32 #include "trace.h" 33 34 static int handle_ri(struct kvm_vcpu *vcpu) 35 { 36 vcpu->stat.instruction_ri++; 37 38 if (test_kvm_facility(vcpu->kvm, 64)) { 39 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)"); 40 vcpu->arch.sie_block->ecb3 |= ECB3_RI; 41 kvm_s390_retry_instr(vcpu); 42 return 0; 43 } else 44 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 45 } 46 47 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu) 48 { 49 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) 50 return handle_ri(vcpu); 51 else 52 return -EOPNOTSUPP; 53 } 54 55 static int handle_gs(struct kvm_vcpu *vcpu) 56 { 57 vcpu->stat.instruction_gs++; 58 59 if (test_kvm_facility(vcpu->kvm, 133)) { 60 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)"); 61 preempt_disable(); 62 __ctl_set_bit(2, 4); 63 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb; 64 restore_gs_cb(current->thread.gs_cb); 65 preempt_enable(); 66 vcpu->arch.sie_block->ecb |= ECB_GS; 67 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; 68 vcpu->arch.gs_enabled = 1; 69 kvm_s390_retry_instr(vcpu); 70 return 0; 71 } else 72 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 73 } 74 75 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu) 76 { 77 int code = vcpu->arch.sie_block->ipb & 0xff; 78 79 if (code == 0x49 || code == 0x4d) 80 return handle_gs(vcpu); 81 else 82 return -EOPNOTSUPP; 83 } 84 /* Handle SCK (SET CLOCK) interception */ 85 static int handle_set_clock(struct kvm_vcpu *vcpu) 86 { 87 struct kvm_s390_vm_tod_clock gtod = { 0 }; 88 int rc; 89 u8 ar; 90 u64 op2; 91 92 vcpu->stat.instruction_sck++; 93 94 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 95 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 96 97 op2 = kvm_s390_get_base_disp_s(vcpu, &ar); 98 if (op2 & 7) /* Operand must be on a doubleword boundary */ 99 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 100 rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod)); 101 if (rc) 102 return kvm_s390_inject_prog_cond(vcpu, rc); 103 104 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod); 105 kvm_s390_set_tod_clock(vcpu->kvm, >od); 106 107 kvm_s390_set_psw_cc(vcpu, 0); 108 return 0; 109 } 110 111 static int handle_set_prefix(struct kvm_vcpu *vcpu) 112 { 113 u64 operand2; 114 u32 address; 115 int rc; 116 u8 ar; 117 118 vcpu->stat.instruction_spx++; 119 120 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 121 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 122 123 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 124 125 /* must be word boundary */ 126 if (operand2 & 3) 127 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 128 129 /* get the value */ 130 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); 131 if (rc) 132 return kvm_s390_inject_prog_cond(vcpu, rc); 133 134 address &= 0x7fffe000u; 135 136 /* 137 * Make sure the new value is valid memory. We only need to check the 138 * first page, since address is 8k aligned and memory pieces are always 139 * at least 1MB aligned and have at least a size of 1MB. 140 */ 141 if (kvm_is_error_gpa(vcpu->kvm, address)) 142 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 143 144 kvm_s390_set_prefix(vcpu, address); 145 trace_kvm_s390_handle_prefix(vcpu, 1, address); 146 return 0; 147 } 148 149 static int handle_store_prefix(struct kvm_vcpu *vcpu) 150 { 151 u64 operand2; 152 u32 address; 153 int rc; 154 u8 ar; 155 156 vcpu->stat.instruction_stpx++; 157 158 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 159 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 160 161 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 162 163 /* must be word boundary */ 164 if (operand2 & 3) 165 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 166 167 address = kvm_s390_get_prefix(vcpu); 168 169 /* get the value */ 170 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); 171 if (rc) 172 return kvm_s390_inject_prog_cond(vcpu, rc); 173 174 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2); 175 trace_kvm_s390_handle_prefix(vcpu, 0, address); 176 return 0; 177 } 178 179 static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 180 { 181 u16 vcpu_id = vcpu->vcpu_id; 182 u64 ga; 183 int rc; 184 u8 ar; 185 186 vcpu->stat.instruction_stap++; 187 188 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 189 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 190 191 ga = kvm_s390_get_base_disp_s(vcpu, &ar); 192 193 if (ga & 1) 194 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 195 196 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); 197 if (rc) 198 return kvm_s390_inject_prog_cond(vcpu, rc); 199 200 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga); 201 trace_kvm_s390_handle_stap(vcpu, ga); 202 return 0; 203 } 204 205 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu) 206 { 207 int rc = 0; 208 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; 209 210 trace_kvm_s390_skey_related_inst(vcpu); 211 if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) && 212 !kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS)) 213 return rc; 214 215 rc = s390_enable_skey(); 216 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc); 217 if (!rc) { 218 if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS)) 219 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS); 220 else 221 sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | 222 ICTL_RRBE); 223 } 224 return rc; 225 } 226 227 static int try_handle_skey(struct kvm_vcpu *vcpu) 228 { 229 int rc; 230 231 rc = kvm_s390_skey_check_enable(vcpu); 232 if (rc) 233 return rc; 234 if (sclp.has_skey) { 235 /* with storage-key facility, SIE interprets it for us */ 236 kvm_s390_retry_instr(vcpu); 237 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 238 return -EAGAIN; 239 } 240 return 0; 241 } 242 243 static int handle_iske(struct kvm_vcpu *vcpu) 244 { 245 unsigned long addr; 246 unsigned char key; 247 int reg1, reg2; 248 int rc; 249 250 vcpu->stat.instruction_iske++; 251 252 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 253 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 254 255 rc = try_handle_skey(vcpu); 256 if (rc) 257 return rc != -EAGAIN ? rc : 0; 258 259 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 260 261 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 262 addr = kvm_s390_logical_to_effective(vcpu, addr); 263 addr = kvm_s390_real_to_abs(vcpu, addr); 264 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr)); 265 if (kvm_is_error_hva(addr)) 266 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 267 268 down_read(¤t->mm->mmap_sem); 269 rc = get_guest_storage_key(current->mm, addr, &key); 270 up_read(¤t->mm->mmap_sem); 271 if (rc) 272 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 273 vcpu->run->s.regs.gprs[reg1] &= ~0xff; 274 vcpu->run->s.regs.gprs[reg1] |= key; 275 return 0; 276 } 277 278 static int handle_rrbe(struct kvm_vcpu *vcpu) 279 { 280 unsigned long addr; 281 int reg1, reg2; 282 int rc; 283 284 vcpu->stat.instruction_rrbe++; 285 286 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 287 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 288 289 rc = try_handle_skey(vcpu); 290 if (rc) 291 return rc != -EAGAIN ? rc : 0; 292 293 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 294 295 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 296 addr = kvm_s390_logical_to_effective(vcpu, addr); 297 addr = kvm_s390_real_to_abs(vcpu, addr); 298 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr)); 299 if (kvm_is_error_hva(addr)) 300 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 301 302 down_read(¤t->mm->mmap_sem); 303 rc = reset_guest_reference_bit(current->mm, addr); 304 up_read(¤t->mm->mmap_sem); 305 if (rc < 0) 306 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 307 308 kvm_s390_set_psw_cc(vcpu, rc); 309 return 0; 310 } 311 312 #define SSKE_NQ 0x8 313 #define SSKE_MR 0x4 314 #define SSKE_MC 0x2 315 #define SSKE_MB 0x1 316 static int handle_sske(struct kvm_vcpu *vcpu) 317 { 318 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28; 319 unsigned long start, end; 320 unsigned char key, oldkey; 321 int reg1, reg2; 322 int rc; 323 324 vcpu->stat.instruction_sske++; 325 326 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 327 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 328 329 rc = try_handle_skey(vcpu); 330 if (rc) 331 return rc != -EAGAIN ? rc : 0; 332 333 if (!test_kvm_facility(vcpu->kvm, 8)) 334 m3 &= ~SSKE_MB; 335 if (!test_kvm_facility(vcpu->kvm, 10)) 336 m3 &= ~(SSKE_MC | SSKE_MR); 337 if (!test_kvm_facility(vcpu->kvm, 14)) 338 m3 &= ~SSKE_NQ; 339 340 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 341 342 key = vcpu->run->s.regs.gprs[reg1] & 0xfe; 343 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 344 start = kvm_s390_logical_to_effective(vcpu, start); 345 if (m3 & SSKE_MB) { 346 /* start already designates an absolute address */ 347 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1); 348 } else { 349 start = kvm_s390_real_to_abs(vcpu, start); 350 end = start + PAGE_SIZE; 351 } 352 353 while (start != end) { 354 unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); 355 356 if (kvm_is_error_hva(addr)) 357 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 358 359 down_read(¤t->mm->mmap_sem); 360 rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey, 361 m3 & SSKE_NQ, m3 & SSKE_MR, 362 m3 & SSKE_MC); 363 up_read(¤t->mm->mmap_sem); 364 if (rc < 0) 365 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 366 start += PAGE_SIZE; 367 } 368 369 if (m3 & (SSKE_MC | SSKE_MR)) { 370 if (m3 & SSKE_MB) { 371 /* skey in reg1 is unpredictable */ 372 kvm_s390_set_psw_cc(vcpu, 3); 373 } else { 374 kvm_s390_set_psw_cc(vcpu, rc); 375 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL; 376 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8; 377 } 378 } 379 if (m3 & SSKE_MB) { 380 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) 381 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK; 382 else 383 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL; 384 end = kvm_s390_logical_to_effective(vcpu, end); 385 vcpu->run->s.regs.gprs[reg2] |= end; 386 } 387 return 0; 388 } 389 390 static int handle_ipte_interlock(struct kvm_vcpu *vcpu) 391 { 392 vcpu->stat.instruction_ipte_interlock++; 393 if (psw_bits(vcpu->arch.sie_block->gpsw).pstate) 394 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 395 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); 396 kvm_s390_retry_instr(vcpu); 397 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); 398 return 0; 399 } 400 401 static int handle_test_block(struct kvm_vcpu *vcpu) 402 { 403 gpa_t addr; 404 int reg2; 405 406 vcpu->stat.instruction_tb++; 407 408 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 409 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 410 411 kvm_s390_get_regs_rre(vcpu, NULL, ®2); 412 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 413 addr = kvm_s390_logical_to_effective(vcpu, addr); 414 if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) 415 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 416 addr = kvm_s390_real_to_abs(vcpu, addr); 417 418 if (kvm_is_error_gpa(vcpu->kvm, addr)) 419 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 420 /* 421 * We don't expect errors on modern systems, and do not care 422 * about storage keys (yet), so let's just clear the page. 423 */ 424 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) 425 return -EFAULT; 426 kvm_s390_set_psw_cc(vcpu, 0); 427 vcpu->run->s.regs.gprs[0] = 0; 428 return 0; 429 } 430 431 static int handle_tpi(struct kvm_vcpu *vcpu) 432 { 433 struct kvm_s390_interrupt_info *inti; 434 unsigned long len; 435 u32 tpi_data[3]; 436 int rc; 437 u64 addr; 438 u8 ar; 439 440 vcpu->stat.instruction_tpi++; 441 442 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 443 if (addr & 3) 444 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 445 446 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); 447 if (!inti) { 448 kvm_s390_set_psw_cc(vcpu, 0); 449 return 0; 450 } 451 452 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; 453 tpi_data[1] = inti->io.io_int_parm; 454 tpi_data[2] = inti->io.io_int_word; 455 if (addr) { 456 /* 457 * Store the two-word I/O interruption code into the 458 * provided area. 459 */ 460 len = sizeof(tpi_data) - 4; 461 rc = write_guest(vcpu, addr, ar, &tpi_data, len); 462 if (rc) { 463 rc = kvm_s390_inject_prog_cond(vcpu, rc); 464 goto reinject_interrupt; 465 } 466 } else { 467 /* 468 * Store the three-word I/O interruption code into 469 * the appropriate lowcore area. 470 */ 471 len = sizeof(tpi_data); 472 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { 473 /* failed writes to the low core are not recoverable */ 474 rc = -EFAULT; 475 goto reinject_interrupt; 476 } 477 } 478 479 /* irq was successfully handed to the guest */ 480 kfree(inti); 481 kvm_s390_set_psw_cc(vcpu, 1); 482 return 0; 483 reinject_interrupt: 484 /* 485 * If we encounter a problem storing the interruption code, the 486 * instruction is suppressed from the guest's view: reinject the 487 * interrupt. 488 */ 489 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { 490 kfree(inti); 491 rc = -EFAULT; 492 } 493 /* don't set the cc, a pgm irq was injected or we drop to user space */ 494 return rc ? -EFAULT : 0; 495 } 496 497 static int handle_tsch(struct kvm_vcpu *vcpu) 498 { 499 struct kvm_s390_interrupt_info *inti = NULL; 500 const u64 isc_mask = 0xffUL << 24; /* all iscs set */ 501 502 vcpu->stat.instruction_tsch++; 503 504 /* a valid schid has at least one bit set */ 505 if (vcpu->run->s.regs.gprs[1]) 506 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, 507 vcpu->run->s.regs.gprs[1]); 508 509 /* 510 * Prepare exit to userspace. 511 * We indicate whether we dequeued a pending I/O interrupt 512 * so that userspace can re-inject it if the instruction gets 513 * a program check. While this may re-order the pending I/O 514 * interrupts, this is no problem since the priority is kept 515 * intact. 516 */ 517 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; 518 vcpu->run->s390_tsch.dequeued = !!inti; 519 if (inti) { 520 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; 521 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; 522 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; 523 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; 524 } 525 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; 526 kfree(inti); 527 return -EREMOTE; 528 } 529 530 static int handle_io_inst(struct kvm_vcpu *vcpu) 531 { 532 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); 533 534 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 535 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 536 537 if (vcpu->kvm->arch.css_support) { 538 /* 539 * Most I/O instructions will be handled by userspace. 540 * Exceptions are tpi and the interrupt portion of tsch. 541 */ 542 if (vcpu->arch.sie_block->ipa == 0xb236) 543 return handle_tpi(vcpu); 544 if (vcpu->arch.sie_block->ipa == 0xb235) 545 return handle_tsch(vcpu); 546 /* Handle in userspace. */ 547 vcpu->stat.instruction_io_other++; 548 return -EOPNOTSUPP; 549 } else { 550 /* 551 * Set condition code 3 to stop the guest from issuing channel 552 * I/O instructions. 553 */ 554 kvm_s390_set_psw_cc(vcpu, 3); 555 return 0; 556 } 557 } 558 559 static int handle_stfl(struct kvm_vcpu *vcpu) 560 { 561 int rc; 562 unsigned int fac; 563 564 vcpu->stat.instruction_stfl++; 565 566 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 567 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 568 569 /* 570 * We need to shift the lower 32 facility bits (bit 0-31) from a u64 571 * into a u32 memory representation. They will remain bits 0-31. 572 */ 573 fac = *vcpu->kvm->arch.model.fac_list >> 32; 574 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list), 575 &fac, sizeof(fac)); 576 if (rc) 577 return rc; 578 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac); 579 trace_kvm_s390_handle_stfl(vcpu, fac); 580 return 0; 581 } 582 583 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) 584 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL 585 #define PSW_ADDR_24 0x0000000000ffffffUL 586 #define PSW_ADDR_31 0x000000007fffffffUL 587 588 int is_valid_psw(psw_t *psw) 589 { 590 if (psw->mask & PSW_MASK_UNASSIGNED) 591 return 0; 592 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { 593 if (psw->addr & ~PSW_ADDR_31) 594 return 0; 595 } 596 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) 597 return 0; 598 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) 599 return 0; 600 if (psw->addr & 1) 601 return 0; 602 return 1; 603 } 604 605 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) 606 { 607 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; 608 psw_compat_t new_psw; 609 u64 addr; 610 int rc; 611 u8 ar; 612 613 vcpu->stat.instruction_lpsw++; 614 615 if (gpsw->mask & PSW_MASK_PSTATE) 616 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 617 618 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 619 if (addr & 7) 620 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 621 622 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 623 if (rc) 624 return kvm_s390_inject_prog_cond(vcpu, rc); 625 if (!(new_psw.mask & PSW32_MASK_BASE)) 626 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 627 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; 628 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; 629 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; 630 if (!is_valid_psw(gpsw)) 631 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 632 return 0; 633 } 634 635 static int handle_lpswe(struct kvm_vcpu *vcpu) 636 { 637 psw_t new_psw; 638 u64 addr; 639 int rc; 640 u8 ar; 641 642 vcpu->stat.instruction_lpswe++; 643 644 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 645 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 646 647 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 648 if (addr & 7) 649 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 650 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 651 if (rc) 652 return kvm_s390_inject_prog_cond(vcpu, rc); 653 vcpu->arch.sie_block->gpsw = new_psw; 654 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) 655 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 656 return 0; 657 } 658 659 static int handle_stidp(struct kvm_vcpu *vcpu) 660 { 661 u64 stidp_data = vcpu->kvm->arch.model.cpuid; 662 u64 operand2; 663 int rc; 664 u8 ar; 665 666 vcpu->stat.instruction_stidp++; 667 668 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 669 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 670 671 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 672 673 if (operand2 & 7) 674 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 675 676 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); 677 if (rc) 678 return kvm_s390_inject_prog_cond(vcpu, rc); 679 680 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data); 681 return 0; 682 } 683 684 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) 685 { 686 int cpus = 0; 687 int n; 688 689 cpus = atomic_read(&vcpu->kvm->online_vcpus); 690 691 /* deal with other level 3 hypervisors */ 692 if (stsi(mem, 3, 2, 2)) 693 mem->count = 0; 694 if (mem->count < 8) 695 mem->count++; 696 for (n = mem->count - 1; n > 0 ; n--) 697 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); 698 699 memset(&mem->vm[0], 0, sizeof(mem->vm[0])); 700 mem->vm[0].cpus_total = cpus; 701 mem->vm[0].cpus_configured = cpus; 702 mem->vm[0].cpus_standby = 0; 703 mem->vm[0].cpus_reserved = 0; 704 mem->vm[0].caf = 1000; 705 memcpy(mem->vm[0].name, "KVMguest", 8); 706 ASCEBC(mem->vm[0].name, 8); 707 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); 708 ASCEBC(mem->vm[0].cpi, 16); 709 } 710 711 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar, 712 u8 fc, u8 sel1, u16 sel2) 713 { 714 vcpu->run->exit_reason = KVM_EXIT_S390_STSI; 715 vcpu->run->s390_stsi.addr = addr; 716 vcpu->run->s390_stsi.ar = ar; 717 vcpu->run->s390_stsi.fc = fc; 718 vcpu->run->s390_stsi.sel1 = sel1; 719 vcpu->run->s390_stsi.sel2 = sel2; 720 } 721 722 static int handle_stsi(struct kvm_vcpu *vcpu) 723 { 724 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; 725 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; 726 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; 727 unsigned long mem = 0; 728 u64 operand2; 729 int rc = 0; 730 u8 ar; 731 732 vcpu->stat.instruction_stsi++; 733 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2); 734 735 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 736 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 737 738 if (fc > 3) { 739 kvm_s390_set_psw_cc(vcpu, 3); 740 return 0; 741 } 742 743 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 744 || vcpu->run->s.regs.gprs[1] & 0xffff0000) 745 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 746 747 if (fc == 0) { 748 vcpu->run->s.regs.gprs[0] = 3 << 28; 749 kvm_s390_set_psw_cc(vcpu, 0); 750 return 0; 751 } 752 753 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 754 755 if (operand2 & 0xfff) 756 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 757 758 switch (fc) { 759 case 1: /* same handling for 1 and 2 */ 760 case 2: 761 mem = get_zeroed_page(GFP_KERNEL); 762 if (!mem) 763 goto out_no_data; 764 if (stsi((void *) mem, fc, sel1, sel2)) 765 goto out_no_data; 766 break; 767 case 3: 768 if (sel1 != 2 || sel2 != 2) 769 goto out_no_data; 770 mem = get_zeroed_page(GFP_KERNEL); 771 if (!mem) 772 goto out_no_data; 773 handle_stsi_3_2_2(vcpu, (void *) mem); 774 break; 775 } 776 777 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); 778 if (rc) { 779 rc = kvm_s390_inject_prog_cond(vcpu, rc); 780 goto out; 781 } 782 if (vcpu->kvm->arch.user_stsi) { 783 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); 784 rc = -EREMOTE; 785 } 786 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 787 free_page(mem); 788 kvm_s390_set_psw_cc(vcpu, 0); 789 vcpu->run->s.regs.gprs[0] = 0; 790 return rc; 791 out_no_data: 792 kvm_s390_set_psw_cc(vcpu, 3); 793 out: 794 free_page(mem); 795 return rc; 796 } 797 798 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) 799 { 800 switch (vcpu->arch.sie_block->ipa & 0x00ff) { 801 case 0x02: 802 return handle_stidp(vcpu); 803 case 0x04: 804 return handle_set_clock(vcpu); 805 case 0x10: 806 return handle_set_prefix(vcpu); 807 case 0x11: 808 return handle_store_prefix(vcpu); 809 case 0x12: 810 return handle_store_cpu_address(vcpu); 811 case 0x14: 812 return kvm_s390_handle_vsie(vcpu); 813 case 0x21: 814 case 0x50: 815 return handle_ipte_interlock(vcpu); 816 case 0x29: 817 return handle_iske(vcpu); 818 case 0x2a: 819 return handle_rrbe(vcpu); 820 case 0x2b: 821 return handle_sske(vcpu); 822 case 0x2c: 823 return handle_test_block(vcpu); 824 case 0x30: 825 case 0x31: 826 case 0x32: 827 case 0x33: 828 case 0x34: 829 case 0x35: 830 case 0x36: 831 case 0x37: 832 case 0x38: 833 case 0x39: 834 case 0x3a: 835 case 0x3b: 836 case 0x3c: 837 case 0x5f: 838 case 0x74: 839 case 0x76: 840 return handle_io_inst(vcpu); 841 case 0x56: 842 return handle_sthyi(vcpu); 843 case 0x7d: 844 return handle_stsi(vcpu); 845 case 0xb1: 846 return handle_stfl(vcpu); 847 case 0xb2: 848 return handle_lpswe(vcpu); 849 default: 850 return -EOPNOTSUPP; 851 } 852 } 853 854 static int handle_epsw(struct kvm_vcpu *vcpu) 855 { 856 int reg1, reg2; 857 858 vcpu->stat.instruction_epsw++; 859 860 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 861 862 /* This basically extracts the mask half of the psw. */ 863 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; 864 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; 865 if (reg2) { 866 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; 867 vcpu->run->s.regs.gprs[reg2] |= 868 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; 869 } 870 return 0; 871 } 872 873 #define PFMF_RESERVED 0xfffc0101UL 874 #define PFMF_SK 0x00020000UL 875 #define PFMF_CF 0x00010000UL 876 #define PFMF_UI 0x00008000UL 877 #define PFMF_FSC 0x00007000UL 878 #define PFMF_NQ 0x00000800UL 879 #define PFMF_MR 0x00000400UL 880 #define PFMF_MC 0x00000200UL 881 #define PFMF_KEY 0x000000feUL 882 883 static int handle_pfmf(struct kvm_vcpu *vcpu) 884 { 885 bool mr = false, mc = false, nq; 886 int reg1, reg2; 887 unsigned long start, end; 888 unsigned char key; 889 890 vcpu->stat.instruction_pfmf++; 891 892 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 893 894 if (!test_kvm_facility(vcpu->kvm, 8)) 895 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 896 897 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 898 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 899 900 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) 901 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 902 903 /* Only provide non-quiescing support if enabled for the guest */ 904 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && 905 !test_kvm_facility(vcpu->kvm, 14)) 906 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 907 908 /* Only provide conditional-SSKE support if enabled for the guest */ 909 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK && 910 test_kvm_facility(vcpu->kvm, 10)) { 911 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR; 912 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC; 913 } 914 915 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ; 916 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY; 917 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 918 start = kvm_s390_logical_to_effective(vcpu, start); 919 920 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 921 if (kvm_s390_check_low_addr_prot_real(vcpu, start)) 922 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 923 } 924 925 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 926 case 0x00000000: 927 /* only 4k frames specify a real address */ 928 start = kvm_s390_real_to_abs(vcpu, start); 929 end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); 930 break; 931 case 0x00001000: 932 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1); 933 break; 934 case 0x00002000: 935 /* only support 2G frame size if EDAT2 is available and we are 936 not in 24-bit addressing mode */ 937 if (!test_kvm_facility(vcpu->kvm, 78) || 938 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT) 939 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 940 end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1); 941 break; 942 default: 943 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 944 } 945 946 while (start != end) { 947 unsigned long useraddr; 948 949 /* Translate guest address to host address */ 950 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); 951 if (kvm_is_error_hva(useraddr)) 952 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 953 954 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 955 if (clear_user((void __user *)useraddr, PAGE_SIZE)) 956 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 957 } 958 959 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { 960 int rc = kvm_s390_skey_check_enable(vcpu); 961 962 if (rc) 963 return rc; 964 down_read(¤t->mm->mmap_sem); 965 rc = cond_set_guest_storage_key(current->mm, useraddr, 966 key, NULL, nq, mr, mc); 967 up_read(¤t->mm->mmap_sem); 968 if (rc < 0) 969 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 970 } 971 972 start += PAGE_SIZE; 973 } 974 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 975 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) { 976 vcpu->run->s.regs.gprs[reg2] = end; 977 } else { 978 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL; 979 end = kvm_s390_logical_to_effective(vcpu, end); 980 vcpu->run->s.regs.gprs[reg2] |= end; 981 } 982 } 983 return 0; 984 } 985 986 static inline int do_essa(struct kvm_vcpu *vcpu, const int orc) 987 { 988 struct kvm_s390_migration_state *ms = vcpu->kvm->arch.migration_state; 989 int r1, r2, nappended, entries; 990 unsigned long gfn, hva, res, pgstev, ptev; 991 unsigned long *cbrlo; 992 993 /* 994 * We don't need to set SD.FPF.SK to 1 here, because if we have a 995 * machine check here we either handle it or crash 996 */ 997 998 kvm_s390_get_regs_rre(vcpu, &r1, &r2); 999 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT; 1000 hva = gfn_to_hva(vcpu->kvm, gfn); 1001 entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; 1002 1003 if (kvm_is_error_hva(hva)) 1004 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 1005 1006 nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev); 1007 if (nappended < 0) { 1008 res = orc ? 0x10 : 0; 1009 vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */ 1010 return 0; 1011 } 1012 res = (pgstev & _PGSTE_GPS_USAGE_MASK) >> 22; 1013 /* 1014 * Set the block-content state part of the result. 0 means resident, so 1015 * nothing to do if the page is valid. 2 is for preserved pages 1016 * (non-present and non-zero), and 3 for zero pages (non-present and 1017 * zero). 1018 */ 1019 if (ptev & _PAGE_INVALID) { 1020 res |= 2; 1021 if (pgstev & _PGSTE_GPS_ZERO) 1022 res |= 1; 1023 } 1024 if (pgstev & _PGSTE_GPS_NODAT) 1025 res |= 0x20; 1026 vcpu->run->s.regs.gprs[r1] = res; 1027 /* 1028 * It is possible that all the normal 511 slots were full, in which case 1029 * we will now write in the 512th slot, which is reserved for host use. 1030 * In both cases we let the normal essa handling code process all the 1031 * slots, including the reserved one, if needed. 1032 */ 1033 if (nappended > 0) { 1034 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK); 1035 cbrlo[entries] = gfn << PAGE_SHIFT; 1036 } 1037 1038 if (orc && gfn < ms->bitmap_size) { 1039 /* increment only if we are really flipping the bit to 1 */ 1040 if (!test_and_set_bit(gfn, ms->pgste_bitmap)) 1041 atomic64_inc(&ms->dirty_pages); 1042 } 1043 1044 return nappended; 1045 } 1046 1047 static int handle_essa(struct kvm_vcpu *vcpu) 1048 { 1049 /* entries expected to be 1FF */ 1050 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; 1051 unsigned long *cbrlo; 1052 struct gmap *gmap; 1053 int i, orc; 1054 1055 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries); 1056 gmap = vcpu->arch.gmap; 1057 vcpu->stat.instruction_essa++; 1058 if (!vcpu->kvm->arch.use_cmma) 1059 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 1060 1061 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1062 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1063 /* Check for invalid operation request code */ 1064 orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; 1065 /* ORCs 0-6 are always valid */ 1066 if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT 1067 : ESSA_SET_STABLE_IF_RESIDENT)) 1068 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1069 1070 if (likely(!vcpu->kvm->arch.migration_state)) { 1071 /* 1072 * CMMA is enabled in the KVM settings, but is disabled in 1073 * the SIE block and in the mm_context, and we are not doing 1074 * a migration. Enable CMMA in the mm_context. 1075 * Since we need to take a write lock to write to the context 1076 * to avoid races with storage keys handling, we check if the 1077 * value really needs to be written to; if the value is 1078 * already correct, we do nothing and avoid the lock. 1079 */ 1080 if (vcpu->kvm->mm->context.uses_cmm == 0) { 1081 down_write(&vcpu->kvm->mm->mmap_sem); 1082 vcpu->kvm->mm->context.uses_cmm = 1; 1083 up_write(&vcpu->kvm->mm->mmap_sem); 1084 } 1085 /* 1086 * If we are here, we are supposed to have CMMA enabled in 1087 * the SIE block. Enabling CMMA works on a per-CPU basis, 1088 * while the context use_cmma flag is per process. 1089 * It's possible that the context flag is enabled and the 1090 * SIE flag is not, so we set the flag always; if it was 1091 * already set, nothing changes, otherwise we enable it 1092 * on this CPU too. 1093 */ 1094 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; 1095 /* Retry the ESSA instruction */ 1096 kvm_s390_retry_instr(vcpu); 1097 } else { 1098 /* Account for the possible extra cbrl entry */ 1099 i = do_essa(vcpu, orc); 1100 if (i < 0) 1101 return i; 1102 entries += i; 1103 } 1104 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ 1105 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); 1106 down_read(&gmap->mm->mmap_sem); 1107 for (i = 0; i < entries; ++i) 1108 __gmap_zap(gmap, cbrlo[i]); 1109 up_read(&gmap->mm->mmap_sem); 1110 return 0; 1111 } 1112 1113 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) 1114 { 1115 switch (vcpu->arch.sie_block->ipa & 0x00ff) { 1116 case 0x8a: 1117 case 0x8e: 1118 case 0x8f: 1119 return handle_ipte_interlock(vcpu); 1120 case 0x8d: 1121 return handle_epsw(vcpu); 1122 case 0xab: 1123 return handle_essa(vcpu); 1124 case 0xaf: 1125 return handle_pfmf(vcpu); 1126 default: 1127 return -EOPNOTSUPP; 1128 } 1129 } 1130 1131 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) 1132 { 1133 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1134 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1135 int reg, rc, nr_regs; 1136 u32 ctl_array[16]; 1137 u64 ga; 1138 u8 ar; 1139 1140 vcpu->stat.instruction_lctl++; 1141 1142 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1143 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1144 1145 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 1146 1147 if (ga & 3) 1148 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1149 1150 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1151 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 1152 1153 nr_regs = ((reg3 - reg1) & 0xf) + 1; 1154 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 1155 if (rc) 1156 return kvm_s390_inject_prog_cond(vcpu, rc); 1157 reg = reg1; 1158 nr_regs = 0; 1159 do { 1160 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; 1161 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; 1162 if (reg == reg3) 1163 break; 1164 reg = (reg + 1) % 16; 1165 } while (1); 1166 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1167 return 0; 1168 } 1169 1170 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) 1171 { 1172 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1173 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1174 int reg, rc, nr_regs; 1175 u32 ctl_array[16]; 1176 u64 ga; 1177 u8 ar; 1178 1179 vcpu->stat.instruction_stctl++; 1180 1181 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1182 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1183 1184 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 1185 1186 if (ga & 3) 1187 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1188 1189 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1190 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); 1191 1192 reg = reg1; 1193 nr_regs = 0; 1194 do { 1195 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 1196 if (reg == reg3) 1197 break; 1198 reg = (reg + 1) % 16; 1199 } while (1); 1200 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 1201 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 1202 } 1203 1204 static int handle_lctlg(struct kvm_vcpu *vcpu) 1205 { 1206 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1207 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1208 int reg, rc, nr_regs; 1209 u64 ctl_array[16]; 1210 u64 ga; 1211 u8 ar; 1212 1213 vcpu->stat.instruction_lctlg++; 1214 1215 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1216 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1217 1218 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 1219 1220 if (ga & 7) 1221 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1222 1223 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1224 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 1225 1226 nr_regs = ((reg3 - reg1) & 0xf) + 1; 1227 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 1228 if (rc) 1229 return kvm_s390_inject_prog_cond(vcpu, rc); 1230 reg = reg1; 1231 nr_regs = 0; 1232 do { 1233 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; 1234 if (reg == reg3) 1235 break; 1236 reg = (reg + 1) % 16; 1237 } while (1); 1238 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1239 return 0; 1240 } 1241 1242 static int handle_stctg(struct kvm_vcpu *vcpu) 1243 { 1244 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1245 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1246 int reg, rc, nr_regs; 1247 u64 ctl_array[16]; 1248 u64 ga; 1249 u8 ar; 1250 1251 vcpu->stat.instruction_stctg++; 1252 1253 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1254 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1255 1256 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 1257 1258 if (ga & 7) 1259 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1260 1261 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1262 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); 1263 1264 reg = reg1; 1265 nr_regs = 0; 1266 do { 1267 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 1268 if (reg == reg3) 1269 break; 1270 reg = (reg + 1) % 16; 1271 } while (1); 1272 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 1273 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 1274 } 1275 1276 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) 1277 { 1278 switch (vcpu->arch.sie_block->ipb & 0x000000ff) { 1279 case 0x25: 1280 return handle_stctg(vcpu); 1281 case 0x2f: 1282 return handle_lctlg(vcpu); 1283 case 0x60: 1284 case 0x61: 1285 case 0x62: 1286 return handle_ri(vcpu); 1287 default: 1288 return -EOPNOTSUPP; 1289 } 1290 } 1291 1292 static int handle_tprot(struct kvm_vcpu *vcpu) 1293 { 1294 u64 address1, address2; 1295 unsigned long hva, gpa; 1296 int ret = 0, cc = 0; 1297 bool writable; 1298 u8 ar; 1299 1300 vcpu->stat.instruction_tprot++; 1301 1302 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1303 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1304 1305 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); 1306 1307 /* we only handle the Linux memory detection case: 1308 * access key == 0 1309 * everything else goes to userspace. */ 1310 if (address2 & 0xf0) 1311 return -EOPNOTSUPP; 1312 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 1313 ipte_lock(vcpu); 1314 ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE); 1315 if (ret == PGM_PROTECTION) { 1316 /* Write protected? Try again with read-only... */ 1317 cc = 1; 1318 ret = guest_translate_address(vcpu, address1, ar, &gpa, 1319 GACC_FETCH); 1320 } 1321 if (ret) { 1322 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { 1323 ret = kvm_s390_inject_program_int(vcpu, ret); 1324 } else if (ret > 0) { 1325 /* Translation not available */ 1326 kvm_s390_set_psw_cc(vcpu, 3); 1327 ret = 0; 1328 } 1329 goto out_unlock; 1330 } 1331 1332 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); 1333 if (kvm_is_error_hva(hva)) { 1334 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 1335 } else { 1336 if (!writable) 1337 cc = 1; /* Write not permitted ==> read-only */ 1338 kvm_s390_set_psw_cc(vcpu, cc); 1339 /* Note: CC2 only occurs for storage keys (not supported yet) */ 1340 } 1341 out_unlock: 1342 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 1343 ipte_unlock(vcpu); 1344 return ret; 1345 } 1346 1347 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) 1348 { 1349 switch (vcpu->arch.sie_block->ipa & 0x00ff) { 1350 case 0x01: 1351 return handle_tprot(vcpu); 1352 default: 1353 return -EOPNOTSUPP; 1354 } 1355 } 1356 1357 static int handle_sckpf(struct kvm_vcpu *vcpu) 1358 { 1359 u32 value; 1360 1361 vcpu->stat.instruction_sckpf++; 1362 1363 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1364 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1365 1366 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) 1367 return kvm_s390_inject_program_int(vcpu, 1368 PGM_SPECIFICATION); 1369 1370 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; 1371 vcpu->arch.sie_block->todpr = value; 1372 1373 return 0; 1374 } 1375 1376 static int handle_ptff(struct kvm_vcpu *vcpu) 1377 { 1378 vcpu->stat.instruction_ptff++; 1379 1380 /* we don't emulate any control instructions yet */ 1381 kvm_s390_set_psw_cc(vcpu, 3); 1382 return 0; 1383 } 1384 1385 int kvm_s390_handle_01(struct kvm_vcpu *vcpu) 1386 { 1387 switch (vcpu->arch.sie_block->ipa & 0x00ff) { 1388 case 0x04: 1389 return handle_ptff(vcpu); 1390 case 0x07: 1391 return handle_sckpf(vcpu); 1392 default: 1393 return -EOPNOTSUPP; 1394 } 1395 } 1396