1 /* 2 * handling privileged instructions 3 * 4 * Copyright IBM Corp. 2008, 2013 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 */ 13 14 #include <linux/kvm.h> 15 #include <linux/gfp.h> 16 #include <linux/errno.h> 17 #include <linux/compat.h> 18 #include <linux/mm_types.h> 19 20 #include <asm/asm-offsets.h> 21 #include <asm/facility.h> 22 #include <asm/current.h> 23 #include <asm/debug.h> 24 #include <asm/ebcdic.h> 25 #include <asm/sysinfo.h> 26 #include <asm/pgtable.h> 27 #include <asm/pgalloc.h> 28 #include <asm/gmap.h> 29 #include <asm/io.h> 30 #include <asm/ptrace.h> 31 #include <asm/compat.h> 32 #include <asm/sclp.h> 33 #include "gaccess.h" 34 #include "kvm-s390.h" 35 #include "trace.h" 36 37 static int handle_ri(struct kvm_vcpu *vcpu) 38 { 39 if (test_kvm_facility(vcpu->kvm, 64)) { 40 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)"); 41 vcpu->arch.sie_block->ecb3 |= ECB3_RI; 42 kvm_s390_retry_instr(vcpu); 43 return 0; 44 } else 45 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 46 } 47 48 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu) 49 { 50 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) 51 return handle_ri(vcpu); 52 else 53 return -EOPNOTSUPP; 54 } 55 56 static int handle_gs(struct kvm_vcpu *vcpu) 57 { 58 if (test_kvm_facility(vcpu->kvm, 133)) { 59 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)"); 60 preempt_disable(); 61 __ctl_set_bit(2, 4); 62 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb; 63 restore_gs_cb(current->thread.gs_cb); 64 preempt_enable(); 65 vcpu->arch.sie_block->ecb |= ECB_GS; 66 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; 67 vcpu->arch.gs_enabled = 1; 68 kvm_s390_retry_instr(vcpu); 69 return 0; 70 } else 71 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 72 } 73 74 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu) 75 { 76 int code = vcpu->arch.sie_block->ipb & 0xff; 77 78 if (code == 0x49 || code == 0x4d) 79 return handle_gs(vcpu); 80 else 81 return -EOPNOTSUPP; 82 } 83 /* Handle SCK (SET CLOCK) interception */ 84 static int handle_set_clock(struct kvm_vcpu *vcpu) 85 { 86 int rc; 87 u8 ar; 88 u64 op2, val; 89 90 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 91 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 92 93 op2 = kvm_s390_get_base_disp_s(vcpu, &ar); 94 if (op2 & 7) /* Operand must be on a doubleword boundary */ 95 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 96 rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); 97 if (rc) 98 return kvm_s390_inject_prog_cond(vcpu, rc); 99 100 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); 101 kvm_s390_set_tod_clock(vcpu->kvm, val); 102 103 kvm_s390_set_psw_cc(vcpu, 0); 104 return 0; 105 } 106 107 static int handle_set_prefix(struct kvm_vcpu *vcpu) 108 { 109 u64 operand2; 110 u32 address; 111 int rc; 112 u8 ar; 113 114 vcpu->stat.instruction_spx++; 115 116 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 117 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 118 119 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 120 121 /* must be word boundary */ 122 if (operand2 & 3) 123 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 124 125 /* get the value */ 126 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); 127 if (rc) 128 return kvm_s390_inject_prog_cond(vcpu, rc); 129 130 address &= 0x7fffe000u; 131 132 /* 133 * Make sure the new value is valid memory. We only need to check the 134 * first page, since address is 8k aligned and memory pieces are always 135 * at least 1MB aligned and have at least a size of 1MB. 136 */ 137 if (kvm_is_error_gpa(vcpu->kvm, address)) 138 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 139 140 kvm_s390_set_prefix(vcpu, address); 141 trace_kvm_s390_handle_prefix(vcpu, 1, address); 142 return 0; 143 } 144 145 static int handle_store_prefix(struct kvm_vcpu *vcpu) 146 { 147 u64 operand2; 148 u32 address; 149 int rc; 150 u8 ar; 151 152 vcpu->stat.instruction_stpx++; 153 154 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 155 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 156 157 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 158 159 /* must be word boundary */ 160 if (operand2 & 3) 161 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 162 163 address = kvm_s390_get_prefix(vcpu); 164 165 /* get the value */ 166 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); 167 if (rc) 168 return kvm_s390_inject_prog_cond(vcpu, rc); 169 170 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2); 171 trace_kvm_s390_handle_prefix(vcpu, 0, address); 172 return 0; 173 } 174 175 static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 176 { 177 u16 vcpu_id = vcpu->vcpu_id; 178 u64 ga; 179 int rc; 180 u8 ar; 181 182 vcpu->stat.instruction_stap++; 183 184 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 185 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 186 187 ga = kvm_s390_get_base_disp_s(vcpu, &ar); 188 189 if (ga & 1) 190 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 191 192 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); 193 if (rc) 194 return kvm_s390_inject_prog_cond(vcpu, rc); 195 196 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga); 197 trace_kvm_s390_handle_stap(vcpu, ga); 198 return 0; 199 } 200 201 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu) 202 { 203 int rc = 0; 204 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; 205 206 trace_kvm_s390_skey_related_inst(vcpu); 207 if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) && 208 !(atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS)) 209 return rc; 210 211 rc = s390_enable_skey(); 212 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc); 213 if (!rc) { 214 if (atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS) 215 atomic_andnot(CPUSTAT_KSS, &sie_block->cpuflags); 216 else 217 sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | 218 ICTL_RRBE); 219 } 220 return rc; 221 } 222 223 static int try_handle_skey(struct kvm_vcpu *vcpu) 224 { 225 int rc; 226 227 vcpu->stat.instruction_storage_key++; 228 rc = kvm_s390_skey_check_enable(vcpu); 229 if (rc) 230 return rc; 231 if (sclp.has_skey) { 232 /* with storage-key facility, SIE interprets it for us */ 233 kvm_s390_retry_instr(vcpu); 234 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 235 return -EAGAIN; 236 } 237 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 238 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 239 return 0; 240 } 241 242 static int handle_iske(struct kvm_vcpu *vcpu) 243 { 244 unsigned long addr; 245 unsigned char key; 246 int reg1, reg2; 247 int rc; 248 249 rc = try_handle_skey(vcpu); 250 if (rc) 251 return rc != -EAGAIN ? rc : 0; 252 253 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 254 255 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 256 addr = kvm_s390_logical_to_effective(vcpu, addr); 257 addr = kvm_s390_real_to_abs(vcpu, addr); 258 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr)); 259 if (kvm_is_error_hva(addr)) 260 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 261 262 down_read(¤t->mm->mmap_sem); 263 rc = get_guest_storage_key(current->mm, addr, &key); 264 up_read(¤t->mm->mmap_sem); 265 if (rc) 266 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 267 vcpu->run->s.regs.gprs[reg1] &= ~0xff; 268 vcpu->run->s.regs.gprs[reg1] |= key; 269 return 0; 270 } 271 272 static int handle_rrbe(struct kvm_vcpu *vcpu) 273 { 274 unsigned long addr; 275 int reg1, reg2; 276 int rc; 277 278 rc = try_handle_skey(vcpu); 279 if (rc) 280 return rc != -EAGAIN ? rc : 0; 281 282 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 283 284 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 285 addr = kvm_s390_logical_to_effective(vcpu, addr); 286 addr = kvm_s390_real_to_abs(vcpu, addr); 287 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr)); 288 if (kvm_is_error_hva(addr)) 289 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 290 291 down_read(¤t->mm->mmap_sem); 292 rc = reset_guest_reference_bit(current->mm, addr); 293 up_read(¤t->mm->mmap_sem); 294 if (rc < 0) 295 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 296 297 kvm_s390_set_psw_cc(vcpu, rc); 298 return 0; 299 } 300 301 #define SSKE_NQ 0x8 302 #define SSKE_MR 0x4 303 #define SSKE_MC 0x2 304 #define SSKE_MB 0x1 305 static int handle_sske(struct kvm_vcpu *vcpu) 306 { 307 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28; 308 unsigned long start, end; 309 unsigned char key, oldkey; 310 int reg1, reg2; 311 int rc; 312 313 rc = try_handle_skey(vcpu); 314 if (rc) 315 return rc != -EAGAIN ? rc : 0; 316 317 if (!test_kvm_facility(vcpu->kvm, 8)) 318 m3 &= ~SSKE_MB; 319 if (!test_kvm_facility(vcpu->kvm, 10)) 320 m3 &= ~(SSKE_MC | SSKE_MR); 321 if (!test_kvm_facility(vcpu->kvm, 14)) 322 m3 &= ~SSKE_NQ; 323 324 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 325 326 key = vcpu->run->s.regs.gprs[reg1] & 0xfe; 327 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 328 start = kvm_s390_logical_to_effective(vcpu, start); 329 if (m3 & SSKE_MB) { 330 /* start already designates an absolute address */ 331 end = (start + (1UL << 20)) & ~((1UL << 20) - 1); 332 } else { 333 start = kvm_s390_real_to_abs(vcpu, start); 334 end = start + PAGE_SIZE; 335 } 336 337 while (start != end) { 338 unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); 339 340 if (kvm_is_error_hva(addr)) 341 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 342 343 down_read(¤t->mm->mmap_sem); 344 rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey, 345 m3 & SSKE_NQ, m3 & SSKE_MR, 346 m3 & SSKE_MC); 347 up_read(¤t->mm->mmap_sem); 348 if (rc < 0) 349 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 350 start += PAGE_SIZE; 351 } 352 353 if (m3 & (SSKE_MC | SSKE_MR)) { 354 if (m3 & SSKE_MB) { 355 /* skey in reg1 is unpredictable */ 356 kvm_s390_set_psw_cc(vcpu, 3); 357 } else { 358 kvm_s390_set_psw_cc(vcpu, rc); 359 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL; 360 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8; 361 } 362 } 363 if (m3 & SSKE_MB) { 364 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) 365 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK; 366 else 367 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL; 368 end = kvm_s390_logical_to_effective(vcpu, end); 369 vcpu->run->s.regs.gprs[reg2] |= end; 370 } 371 return 0; 372 } 373 374 static int handle_ipte_interlock(struct kvm_vcpu *vcpu) 375 { 376 vcpu->stat.instruction_ipte_interlock++; 377 if (psw_bits(vcpu->arch.sie_block->gpsw).p) 378 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 379 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); 380 kvm_s390_retry_instr(vcpu); 381 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); 382 return 0; 383 } 384 385 static int handle_test_block(struct kvm_vcpu *vcpu) 386 { 387 gpa_t addr; 388 int reg2; 389 390 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 391 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 392 393 kvm_s390_get_regs_rre(vcpu, NULL, ®2); 394 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 395 addr = kvm_s390_logical_to_effective(vcpu, addr); 396 if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) 397 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 398 addr = kvm_s390_real_to_abs(vcpu, addr); 399 400 if (kvm_is_error_gpa(vcpu->kvm, addr)) 401 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 402 /* 403 * We don't expect errors on modern systems, and do not care 404 * about storage keys (yet), so let's just clear the page. 405 */ 406 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) 407 return -EFAULT; 408 kvm_s390_set_psw_cc(vcpu, 0); 409 vcpu->run->s.regs.gprs[0] = 0; 410 return 0; 411 } 412 413 static int handle_tpi(struct kvm_vcpu *vcpu) 414 { 415 struct kvm_s390_interrupt_info *inti; 416 unsigned long len; 417 u32 tpi_data[3]; 418 int rc; 419 u64 addr; 420 u8 ar; 421 422 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 423 if (addr & 3) 424 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 425 426 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); 427 if (!inti) { 428 kvm_s390_set_psw_cc(vcpu, 0); 429 return 0; 430 } 431 432 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; 433 tpi_data[1] = inti->io.io_int_parm; 434 tpi_data[2] = inti->io.io_int_word; 435 if (addr) { 436 /* 437 * Store the two-word I/O interruption code into the 438 * provided area. 439 */ 440 len = sizeof(tpi_data) - 4; 441 rc = write_guest(vcpu, addr, ar, &tpi_data, len); 442 if (rc) { 443 rc = kvm_s390_inject_prog_cond(vcpu, rc); 444 goto reinject_interrupt; 445 } 446 } else { 447 /* 448 * Store the three-word I/O interruption code into 449 * the appropriate lowcore area. 450 */ 451 len = sizeof(tpi_data); 452 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { 453 /* failed writes to the low core are not recoverable */ 454 rc = -EFAULT; 455 goto reinject_interrupt; 456 } 457 } 458 459 /* irq was successfully handed to the guest */ 460 kfree(inti); 461 kvm_s390_set_psw_cc(vcpu, 1); 462 return 0; 463 reinject_interrupt: 464 /* 465 * If we encounter a problem storing the interruption code, the 466 * instruction is suppressed from the guest's view: reinject the 467 * interrupt. 468 */ 469 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { 470 kfree(inti); 471 rc = -EFAULT; 472 } 473 /* don't set the cc, a pgm irq was injected or we drop to user space */ 474 return rc ? -EFAULT : 0; 475 } 476 477 static int handle_tsch(struct kvm_vcpu *vcpu) 478 { 479 struct kvm_s390_interrupt_info *inti = NULL; 480 const u64 isc_mask = 0xffUL << 24; /* all iscs set */ 481 482 /* a valid schid has at least one bit set */ 483 if (vcpu->run->s.regs.gprs[1]) 484 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, 485 vcpu->run->s.regs.gprs[1]); 486 487 /* 488 * Prepare exit to userspace. 489 * We indicate whether we dequeued a pending I/O interrupt 490 * so that userspace can re-inject it if the instruction gets 491 * a program check. While this may re-order the pending I/O 492 * interrupts, this is no problem since the priority is kept 493 * intact. 494 */ 495 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; 496 vcpu->run->s390_tsch.dequeued = !!inti; 497 if (inti) { 498 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; 499 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; 500 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; 501 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; 502 } 503 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; 504 kfree(inti); 505 return -EREMOTE; 506 } 507 508 static int handle_io_inst(struct kvm_vcpu *vcpu) 509 { 510 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); 511 512 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 513 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 514 515 if (vcpu->kvm->arch.css_support) { 516 /* 517 * Most I/O instructions will be handled by userspace. 518 * Exceptions are tpi and the interrupt portion of tsch. 519 */ 520 if (vcpu->arch.sie_block->ipa == 0xb236) 521 return handle_tpi(vcpu); 522 if (vcpu->arch.sie_block->ipa == 0xb235) 523 return handle_tsch(vcpu); 524 /* Handle in userspace. */ 525 return -EOPNOTSUPP; 526 } else { 527 /* 528 * Set condition code 3 to stop the guest from issuing channel 529 * I/O instructions. 530 */ 531 kvm_s390_set_psw_cc(vcpu, 3); 532 return 0; 533 } 534 } 535 536 static int handle_stfl(struct kvm_vcpu *vcpu) 537 { 538 int rc; 539 unsigned int fac; 540 541 vcpu->stat.instruction_stfl++; 542 543 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 544 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 545 546 /* 547 * We need to shift the lower 32 facility bits (bit 0-31) from a u64 548 * into a u32 memory representation. They will remain bits 0-31. 549 */ 550 fac = *vcpu->kvm->arch.model.fac_list >> 32; 551 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list), 552 &fac, sizeof(fac)); 553 if (rc) 554 return rc; 555 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac); 556 trace_kvm_s390_handle_stfl(vcpu, fac); 557 return 0; 558 } 559 560 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) 561 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL 562 #define PSW_ADDR_24 0x0000000000ffffffUL 563 #define PSW_ADDR_31 0x000000007fffffffUL 564 565 int is_valid_psw(psw_t *psw) 566 { 567 if (psw->mask & PSW_MASK_UNASSIGNED) 568 return 0; 569 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { 570 if (psw->addr & ~PSW_ADDR_31) 571 return 0; 572 } 573 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) 574 return 0; 575 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) 576 return 0; 577 if (psw->addr & 1) 578 return 0; 579 return 1; 580 } 581 582 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) 583 { 584 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; 585 psw_compat_t new_psw; 586 u64 addr; 587 int rc; 588 u8 ar; 589 590 if (gpsw->mask & PSW_MASK_PSTATE) 591 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 592 593 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 594 if (addr & 7) 595 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 596 597 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 598 if (rc) 599 return kvm_s390_inject_prog_cond(vcpu, rc); 600 if (!(new_psw.mask & PSW32_MASK_BASE)) 601 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 602 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; 603 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; 604 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; 605 if (!is_valid_psw(gpsw)) 606 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 607 return 0; 608 } 609 610 static int handle_lpswe(struct kvm_vcpu *vcpu) 611 { 612 psw_t new_psw; 613 u64 addr; 614 int rc; 615 u8 ar; 616 617 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 618 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 619 620 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 621 if (addr & 7) 622 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 623 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 624 if (rc) 625 return kvm_s390_inject_prog_cond(vcpu, rc); 626 vcpu->arch.sie_block->gpsw = new_psw; 627 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) 628 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 629 return 0; 630 } 631 632 static int handle_stidp(struct kvm_vcpu *vcpu) 633 { 634 u64 stidp_data = vcpu->kvm->arch.model.cpuid; 635 u64 operand2; 636 int rc; 637 u8 ar; 638 639 vcpu->stat.instruction_stidp++; 640 641 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 642 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 643 644 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 645 646 if (operand2 & 7) 647 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 648 649 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); 650 if (rc) 651 return kvm_s390_inject_prog_cond(vcpu, rc); 652 653 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data); 654 return 0; 655 } 656 657 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) 658 { 659 int cpus = 0; 660 int n; 661 662 cpus = atomic_read(&vcpu->kvm->online_vcpus); 663 664 /* deal with other level 3 hypervisors */ 665 if (stsi(mem, 3, 2, 2)) 666 mem->count = 0; 667 if (mem->count < 8) 668 mem->count++; 669 for (n = mem->count - 1; n > 0 ; n--) 670 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); 671 672 memset(&mem->vm[0], 0, sizeof(mem->vm[0])); 673 mem->vm[0].cpus_total = cpus; 674 mem->vm[0].cpus_configured = cpus; 675 mem->vm[0].cpus_standby = 0; 676 mem->vm[0].cpus_reserved = 0; 677 mem->vm[0].caf = 1000; 678 memcpy(mem->vm[0].name, "KVMguest", 8); 679 ASCEBC(mem->vm[0].name, 8); 680 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); 681 ASCEBC(mem->vm[0].cpi, 16); 682 } 683 684 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar, 685 u8 fc, u8 sel1, u16 sel2) 686 { 687 vcpu->run->exit_reason = KVM_EXIT_S390_STSI; 688 vcpu->run->s390_stsi.addr = addr; 689 vcpu->run->s390_stsi.ar = ar; 690 vcpu->run->s390_stsi.fc = fc; 691 vcpu->run->s390_stsi.sel1 = sel1; 692 vcpu->run->s390_stsi.sel2 = sel2; 693 } 694 695 static int handle_stsi(struct kvm_vcpu *vcpu) 696 { 697 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; 698 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; 699 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; 700 unsigned long mem = 0; 701 u64 operand2; 702 int rc = 0; 703 u8 ar; 704 705 vcpu->stat.instruction_stsi++; 706 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2); 707 708 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 709 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 710 711 if (fc > 3) { 712 kvm_s390_set_psw_cc(vcpu, 3); 713 return 0; 714 } 715 716 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 717 || vcpu->run->s.regs.gprs[1] & 0xffff0000) 718 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 719 720 if (fc == 0) { 721 vcpu->run->s.regs.gprs[0] = 3 << 28; 722 kvm_s390_set_psw_cc(vcpu, 0); 723 return 0; 724 } 725 726 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 727 728 if (operand2 & 0xfff) 729 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 730 731 switch (fc) { 732 case 1: /* same handling for 1 and 2 */ 733 case 2: 734 mem = get_zeroed_page(GFP_KERNEL); 735 if (!mem) 736 goto out_no_data; 737 if (stsi((void *) mem, fc, sel1, sel2)) 738 goto out_no_data; 739 break; 740 case 3: 741 if (sel1 != 2 || sel2 != 2) 742 goto out_no_data; 743 mem = get_zeroed_page(GFP_KERNEL); 744 if (!mem) 745 goto out_no_data; 746 handle_stsi_3_2_2(vcpu, (void *) mem); 747 break; 748 } 749 750 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); 751 if (rc) { 752 rc = kvm_s390_inject_prog_cond(vcpu, rc); 753 goto out; 754 } 755 if (vcpu->kvm->arch.user_stsi) { 756 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); 757 rc = -EREMOTE; 758 } 759 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 760 free_page(mem); 761 kvm_s390_set_psw_cc(vcpu, 0); 762 vcpu->run->s.regs.gprs[0] = 0; 763 return rc; 764 out_no_data: 765 kvm_s390_set_psw_cc(vcpu, 3); 766 out: 767 free_page(mem); 768 return rc; 769 } 770 771 static const intercept_handler_t b2_handlers[256] = { 772 [0x02] = handle_stidp, 773 [0x04] = handle_set_clock, 774 [0x10] = handle_set_prefix, 775 [0x11] = handle_store_prefix, 776 [0x12] = handle_store_cpu_address, 777 [0x14] = kvm_s390_handle_vsie, 778 [0x21] = handle_ipte_interlock, 779 [0x29] = handle_iske, 780 [0x2a] = handle_rrbe, 781 [0x2b] = handle_sske, 782 [0x2c] = handle_test_block, 783 [0x30] = handle_io_inst, 784 [0x31] = handle_io_inst, 785 [0x32] = handle_io_inst, 786 [0x33] = handle_io_inst, 787 [0x34] = handle_io_inst, 788 [0x35] = handle_io_inst, 789 [0x36] = handle_io_inst, 790 [0x37] = handle_io_inst, 791 [0x38] = handle_io_inst, 792 [0x39] = handle_io_inst, 793 [0x3a] = handle_io_inst, 794 [0x3b] = handle_io_inst, 795 [0x3c] = handle_io_inst, 796 [0x50] = handle_ipte_interlock, 797 [0x56] = handle_sthyi, 798 [0x5f] = handle_io_inst, 799 [0x74] = handle_io_inst, 800 [0x76] = handle_io_inst, 801 [0x7d] = handle_stsi, 802 [0xb1] = handle_stfl, 803 [0xb2] = handle_lpswe, 804 }; 805 806 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) 807 { 808 intercept_handler_t handler; 809 810 /* 811 * A lot of B2 instructions are priviledged. Here we check for 812 * the privileged ones, that we can handle in the kernel. 813 * Anything else goes to userspace. 814 */ 815 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 816 if (handler) 817 return handler(vcpu); 818 819 return -EOPNOTSUPP; 820 } 821 822 static int handle_epsw(struct kvm_vcpu *vcpu) 823 { 824 int reg1, reg2; 825 826 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 827 828 /* This basically extracts the mask half of the psw. */ 829 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; 830 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; 831 if (reg2) { 832 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; 833 vcpu->run->s.regs.gprs[reg2] |= 834 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; 835 } 836 return 0; 837 } 838 839 #define PFMF_RESERVED 0xfffc0101UL 840 #define PFMF_SK 0x00020000UL 841 #define PFMF_CF 0x00010000UL 842 #define PFMF_UI 0x00008000UL 843 #define PFMF_FSC 0x00007000UL 844 #define PFMF_NQ 0x00000800UL 845 #define PFMF_MR 0x00000400UL 846 #define PFMF_MC 0x00000200UL 847 #define PFMF_KEY 0x000000feUL 848 849 static int handle_pfmf(struct kvm_vcpu *vcpu) 850 { 851 bool mr = false, mc = false, nq; 852 int reg1, reg2; 853 unsigned long start, end; 854 unsigned char key; 855 856 vcpu->stat.instruction_pfmf++; 857 858 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 859 860 if (!test_kvm_facility(vcpu->kvm, 8)) 861 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 862 863 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 864 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 865 866 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) 867 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 868 869 /* Only provide non-quiescing support if enabled for the guest */ 870 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && 871 !test_kvm_facility(vcpu->kvm, 14)) 872 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 873 874 /* Only provide conditional-SSKE support if enabled for the guest */ 875 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK && 876 test_kvm_facility(vcpu->kvm, 10)) { 877 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR; 878 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC; 879 } 880 881 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ; 882 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY; 883 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 884 start = kvm_s390_logical_to_effective(vcpu, start); 885 886 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 887 if (kvm_s390_check_low_addr_prot_real(vcpu, start)) 888 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 889 } 890 891 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 892 case 0x00000000: 893 /* only 4k frames specify a real address */ 894 start = kvm_s390_real_to_abs(vcpu, start); 895 end = (start + (1UL << 12)) & ~((1UL << 12) - 1); 896 break; 897 case 0x00001000: 898 end = (start + (1UL << 20)) & ~((1UL << 20) - 1); 899 break; 900 case 0x00002000: 901 /* only support 2G frame size if EDAT2 is available and we are 902 not in 24-bit addressing mode */ 903 if (!test_kvm_facility(vcpu->kvm, 78) || 904 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT) 905 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 906 end = (start + (1UL << 31)) & ~((1UL << 31) - 1); 907 break; 908 default: 909 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 910 } 911 912 while (start != end) { 913 unsigned long useraddr; 914 915 /* Translate guest address to host address */ 916 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); 917 if (kvm_is_error_hva(useraddr)) 918 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 919 920 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 921 if (clear_user((void __user *)useraddr, PAGE_SIZE)) 922 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 923 } 924 925 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { 926 int rc = kvm_s390_skey_check_enable(vcpu); 927 928 if (rc) 929 return rc; 930 down_read(¤t->mm->mmap_sem); 931 rc = cond_set_guest_storage_key(current->mm, useraddr, 932 key, NULL, nq, mr, mc); 933 up_read(¤t->mm->mmap_sem); 934 if (rc < 0) 935 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 936 } 937 938 start += PAGE_SIZE; 939 } 940 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 941 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) { 942 vcpu->run->s.regs.gprs[reg2] = end; 943 } else { 944 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL; 945 end = kvm_s390_logical_to_effective(vcpu, end); 946 vcpu->run->s.regs.gprs[reg2] |= end; 947 } 948 } 949 return 0; 950 } 951 952 static int handle_essa(struct kvm_vcpu *vcpu) 953 { 954 /* entries expected to be 1FF */ 955 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; 956 unsigned long *cbrlo; 957 struct gmap *gmap; 958 int i; 959 960 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries); 961 gmap = vcpu->arch.gmap; 962 vcpu->stat.instruction_essa++; 963 if (!vcpu->kvm->arch.use_cmma) 964 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 965 966 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 967 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 968 969 if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6) 970 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 971 972 /* Retry the ESSA instruction */ 973 kvm_s390_retry_instr(vcpu); 974 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ 975 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); 976 down_read(&gmap->mm->mmap_sem); 977 for (i = 0; i < entries; ++i) 978 __gmap_zap(gmap, cbrlo[i]); 979 up_read(&gmap->mm->mmap_sem); 980 return 0; 981 } 982 983 static const intercept_handler_t b9_handlers[256] = { 984 [0x8a] = handle_ipte_interlock, 985 [0x8d] = handle_epsw, 986 [0x8e] = handle_ipte_interlock, 987 [0x8f] = handle_ipte_interlock, 988 [0xab] = handle_essa, 989 [0xaf] = handle_pfmf, 990 }; 991 992 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) 993 { 994 intercept_handler_t handler; 995 996 /* This is handled just as for the B2 instructions. */ 997 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 998 if (handler) 999 return handler(vcpu); 1000 1001 return -EOPNOTSUPP; 1002 } 1003 1004 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) 1005 { 1006 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1007 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1008 int reg, rc, nr_regs; 1009 u32 ctl_array[16]; 1010 u64 ga; 1011 u8 ar; 1012 1013 vcpu->stat.instruction_lctl++; 1014 1015 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1016 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1017 1018 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 1019 1020 if (ga & 3) 1021 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1022 1023 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1024 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 1025 1026 nr_regs = ((reg3 - reg1) & 0xf) + 1; 1027 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 1028 if (rc) 1029 return kvm_s390_inject_prog_cond(vcpu, rc); 1030 reg = reg1; 1031 nr_regs = 0; 1032 do { 1033 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; 1034 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; 1035 if (reg == reg3) 1036 break; 1037 reg = (reg + 1) % 16; 1038 } while (1); 1039 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1040 return 0; 1041 } 1042 1043 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) 1044 { 1045 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1046 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1047 int reg, rc, nr_regs; 1048 u32 ctl_array[16]; 1049 u64 ga; 1050 u8 ar; 1051 1052 vcpu->stat.instruction_stctl++; 1053 1054 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1055 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1056 1057 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 1058 1059 if (ga & 3) 1060 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1061 1062 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1063 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); 1064 1065 reg = reg1; 1066 nr_regs = 0; 1067 do { 1068 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 1069 if (reg == reg3) 1070 break; 1071 reg = (reg + 1) % 16; 1072 } while (1); 1073 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 1074 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 1075 } 1076 1077 static int handle_lctlg(struct kvm_vcpu *vcpu) 1078 { 1079 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1080 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1081 int reg, rc, nr_regs; 1082 u64 ctl_array[16]; 1083 u64 ga; 1084 u8 ar; 1085 1086 vcpu->stat.instruction_lctlg++; 1087 1088 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1089 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1090 1091 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 1092 1093 if (ga & 7) 1094 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1095 1096 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1097 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 1098 1099 nr_regs = ((reg3 - reg1) & 0xf) + 1; 1100 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 1101 if (rc) 1102 return kvm_s390_inject_prog_cond(vcpu, rc); 1103 reg = reg1; 1104 nr_regs = 0; 1105 do { 1106 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; 1107 if (reg == reg3) 1108 break; 1109 reg = (reg + 1) % 16; 1110 } while (1); 1111 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1112 return 0; 1113 } 1114 1115 static int handle_stctg(struct kvm_vcpu *vcpu) 1116 { 1117 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1118 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1119 int reg, rc, nr_regs; 1120 u64 ctl_array[16]; 1121 u64 ga; 1122 u8 ar; 1123 1124 vcpu->stat.instruction_stctg++; 1125 1126 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1127 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1128 1129 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 1130 1131 if (ga & 7) 1132 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1133 1134 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1135 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); 1136 1137 reg = reg1; 1138 nr_regs = 0; 1139 do { 1140 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 1141 if (reg == reg3) 1142 break; 1143 reg = (reg + 1) % 16; 1144 } while (1); 1145 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 1146 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 1147 } 1148 1149 static const intercept_handler_t eb_handlers[256] = { 1150 [0x2f] = handle_lctlg, 1151 [0x25] = handle_stctg, 1152 [0x60] = handle_ri, 1153 [0x61] = handle_ri, 1154 [0x62] = handle_ri, 1155 }; 1156 1157 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) 1158 { 1159 intercept_handler_t handler; 1160 1161 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; 1162 if (handler) 1163 return handler(vcpu); 1164 return -EOPNOTSUPP; 1165 } 1166 1167 static int handle_tprot(struct kvm_vcpu *vcpu) 1168 { 1169 u64 address1, address2; 1170 unsigned long hva, gpa; 1171 int ret = 0, cc = 0; 1172 bool writable; 1173 u8 ar; 1174 1175 vcpu->stat.instruction_tprot++; 1176 1177 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1178 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1179 1180 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); 1181 1182 /* we only handle the Linux memory detection case: 1183 * access key == 0 1184 * everything else goes to userspace. */ 1185 if (address2 & 0xf0) 1186 return -EOPNOTSUPP; 1187 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 1188 ipte_lock(vcpu); 1189 ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE); 1190 if (ret == PGM_PROTECTION) { 1191 /* Write protected? Try again with read-only... */ 1192 cc = 1; 1193 ret = guest_translate_address(vcpu, address1, ar, &gpa, 1194 GACC_FETCH); 1195 } 1196 if (ret) { 1197 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { 1198 ret = kvm_s390_inject_program_int(vcpu, ret); 1199 } else if (ret > 0) { 1200 /* Translation not available */ 1201 kvm_s390_set_psw_cc(vcpu, 3); 1202 ret = 0; 1203 } 1204 goto out_unlock; 1205 } 1206 1207 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); 1208 if (kvm_is_error_hva(hva)) { 1209 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 1210 } else { 1211 if (!writable) 1212 cc = 1; /* Write not permitted ==> read-only */ 1213 kvm_s390_set_psw_cc(vcpu, cc); 1214 /* Note: CC2 only occurs for storage keys (not supported yet) */ 1215 } 1216 out_unlock: 1217 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 1218 ipte_unlock(vcpu); 1219 return ret; 1220 } 1221 1222 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) 1223 { 1224 /* For e5xx... instructions we only handle TPROT */ 1225 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) 1226 return handle_tprot(vcpu); 1227 return -EOPNOTSUPP; 1228 } 1229 1230 static int handle_sckpf(struct kvm_vcpu *vcpu) 1231 { 1232 u32 value; 1233 1234 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1235 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1236 1237 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) 1238 return kvm_s390_inject_program_int(vcpu, 1239 PGM_SPECIFICATION); 1240 1241 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; 1242 vcpu->arch.sie_block->todpr = value; 1243 1244 return 0; 1245 } 1246 1247 static int handle_ptff(struct kvm_vcpu *vcpu) 1248 { 1249 /* we don't emulate any control instructions yet */ 1250 kvm_s390_set_psw_cc(vcpu, 3); 1251 return 0; 1252 } 1253 1254 static const intercept_handler_t x01_handlers[256] = { 1255 [0x04] = handle_ptff, 1256 [0x07] = handle_sckpf, 1257 }; 1258 1259 int kvm_s390_handle_01(struct kvm_vcpu *vcpu) 1260 { 1261 intercept_handler_t handler; 1262 1263 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 1264 if (handler) 1265 return handler(vcpu); 1266 return -EOPNOTSUPP; 1267 } 1268