1 /* 2 * handling privileged instructions 3 * 4 * Copyright IBM Corp. 2008, 2013 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 */ 13 14 #include <linux/kvm.h> 15 #include <linux/gfp.h> 16 #include <linux/errno.h> 17 #include <linux/compat.h> 18 #include <linux/mm_types.h> 19 20 #include <asm/asm-offsets.h> 21 #include <asm/facility.h> 22 #include <asm/current.h> 23 #include <asm/debug.h> 24 #include <asm/ebcdic.h> 25 #include <asm/sysinfo.h> 26 #include <asm/pgtable.h> 27 #include <asm/page-states.h> 28 #include <asm/pgalloc.h> 29 #include <asm/gmap.h> 30 #include <asm/io.h> 31 #include <asm/ptrace.h> 32 #include <asm/compat.h> 33 #include <asm/sclp.h> 34 #include "gaccess.h" 35 #include "kvm-s390.h" 36 #include "trace.h" 37 38 static int handle_ri(struct kvm_vcpu *vcpu) 39 { 40 if (test_kvm_facility(vcpu->kvm, 64)) { 41 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)"); 42 vcpu->arch.sie_block->ecb3 |= ECB3_RI; 43 kvm_s390_retry_instr(vcpu); 44 return 0; 45 } else 46 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 47 } 48 49 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu) 50 { 51 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) 52 return handle_ri(vcpu); 53 else 54 return -EOPNOTSUPP; 55 } 56 57 static int handle_gs(struct kvm_vcpu *vcpu) 58 { 59 if (test_kvm_facility(vcpu->kvm, 133)) { 60 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)"); 61 preempt_disable(); 62 __ctl_set_bit(2, 4); 63 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb; 64 restore_gs_cb(current->thread.gs_cb); 65 preempt_enable(); 66 vcpu->arch.sie_block->ecb |= ECB_GS; 67 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; 68 vcpu->arch.gs_enabled = 1; 69 kvm_s390_retry_instr(vcpu); 70 return 0; 71 } else 72 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 73 } 74 75 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu) 76 { 77 int code = vcpu->arch.sie_block->ipb & 0xff; 78 79 if (code == 0x49 || code == 0x4d) 80 return handle_gs(vcpu); 81 else 82 return -EOPNOTSUPP; 83 } 84 /* Handle SCK (SET CLOCK) interception */ 85 static int handle_set_clock(struct kvm_vcpu *vcpu) 86 { 87 int rc; 88 u8 ar; 89 u64 op2, val; 90 91 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 92 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 93 94 op2 = kvm_s390_get_base_disp_s(vcpu, &ar); 95 if (op2 & 7) /* Operand must be on a doubleword boundary */ 96 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 97 rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); 98 if (rc) 99 return kvm_s390_inject_prog_cond(vcpu, rc); 100 101 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); 102 kvm_s390_set_tod_clock(vcpu->kvm, val); 103 104 kvm_s390_set_psw_cc(vcpu, 0); 105 return 0; 106 } 107 108 static int handle_set_prefix(struct kvm_vcpu *vcpu) 109 { 110 u64 operand2; 111 u32 address; 112 int rc; 113 u8 ar; 114 115 vcpu->stat.instruction_spx++; 116 117 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 118 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 119 120 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 121 122 /* must be word boundary */ 123 if (operand2 & 3) 124 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 125 126 /* get the value */ 127 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); 128 if (rc) 129 return kvm_s390_inject_prog_cond(vcpu, rc); 130 131 address &= 0x7fffe000u; 132 133 /* 134 * Make sure the new value is valid memory. We only need to check the 135 * first page, since address is 8k aligned and memory pieces are always 136 * at least 1MB aligned and have at least a size of 1MB. 137 */ 138 if (kvm_is_error_gpa(vcpu->kvm, address)) 139 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 140 141 kvm_s390_set_prefix(vcpu, address); 142 trace_kvm_s390_handle_prefix(vcpu, 1, address); 143 return 0; 144 } 145 146 static int handle_store_prefix(struct kvm_vcpu *vcpu) 147 { 148 u64 operand2; 149 u32 address; 150 int rc; 151 u8 ar; 152 153 vcpu->stat.instruction_stpx++; 154 155 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 156 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 157 158 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 159 160 /* must be word boundary */ 161 if (operand2 & 3) 162 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 163 164 address = kvm_s390_get_prefix(vcpu); 165 166 /* get the value */ 167 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); 168 if (rc) 169 return kvm_s390_inject_prog_cond(vcpu, rc); 170 171 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2); 172 trace_kvm_s390_handle_prefix(vcpu, 0, address); 173 return 0; 174 } 175 176 static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 177 { 178 u16 vcpu_id = vcpu->vcpu_id; 179 u64 ga; 180 int rc; 181 u8 ar; 182 183 vcpu->stat.instruction_stap++; 184 185 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 186 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 187 188 ga = kvm_s390_get_base_disp_s(vcpu, &ar); 189 190 if (ga & 1) 191 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 192 193 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); 194 if (rc) 195 return kvm_s390_inject_prog_cond(vcpu, rc); 196 197 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga); 198 trace_kvm_s390_handle_stap(vcpu, ga); 199 return 0; 200 } 201 202 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu) 203 { 204 int rc = 0; 205 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; 206 207 trace_kvm_s390_skey_related_inst(vcpu); 208 if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) && 209 !(atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS)) 210 return rc; 211 212 rc = s390_enable_skey(); 213 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc); 214 if (!rc) { 215 if (atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS) 216 atomic_andnot(CPUSTAT_KSS, &sie_block->cpuflags); 217 else 218 sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | 219 ICTL_RRBE); 220 } 221 return rc; 222 } 223 224 static int try_handle_skey(struct kvm_vcpu *vcpu) 225 { 226 int rc; 227 228 vcpu->stat.instruction_storage_key++; 229 rc = kvm_s390_skey_check_enable(vcpu); 230 if (rc) 231 return rc; 232 if (sclp.has_skey) { 233 /* with storage-key facility, SIE interprets it for us */ 234 kvm_s390_retry_instr(vcpu); 235 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 236 return -EAGAIN; 237 } 238 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 239 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 240 return 0; 241 } 242 243 static int handle_iske(struct kvm_vcpu *vcpu) 244 { 245 unsigned long addr; 246 unsigned char key; 247 int reg1, reg2; 248 int rc; 249 250 rc = try_handle_skey(vcpu); 251 if (rc) 252 return rc != -EAGAIN ? rc : 0; 253 254 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 255 256 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 257 addr = kvm_s390_logical_to_effective(vcpu, addr); 258 addr = kvm_s390_real_to_abs(vcpu, addr); 259 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr)); 260 if (kvm_is_error_hva(addr)) 261 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 262 263 down_read(¤t->mm->mmap_sem); 264 rc = get_guest_storage_key(current->mm, addr, &key); 265 up_read(¤t->mm->mmap_sem); 266 if (rc) 267 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 268 vcpu->run->s.regs.gprs[reg1] &= ~0xff; 269 vcpu->run->s.regs.gprs[reg1] |= key; 270 return 0; 271 } 272 273 static int handle_rrbe(struct kvm_vcpu *vcpu) 274 { 275 unsigned long addr; 276 int reg1, reg2; 277 int rc; 278 279 rc = try_handle_skey(vcpu); 280 if (rc) 281 return rc != -EAGAIN ? rc : 0; 282 283 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 284 285 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 286 addr = kvm_s390_logical_to_effective(vcpu, addr); 287 addr = kvm_s390_real_to_abs(vcpu, addr); 288 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr)); 289 if (kvm_is_error_hva(addr)) 290 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 291 292 down_read(¤t->mm->mmap_sem); 293 rc = reset_guest_reference_bit(current->mm, addr); 294 up_read(¤t->mm->mmap_sem); 295 if (rc < 0) 296 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 297 298 kvm_s390_set_psw_cc(vcpu, rc); 299 return 0; 300 } 301 302 #define SSKE_NQ 0x8 303 #define SSKE_MR 0x4 304 #define SSKE_MC 0x2 305 #define SSKE_MB 0x1 306 static int handle_sske(struct kvm_vcpu *vcpu) 307 { 308 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28; 309 unsigned long start, end; 310 unsigned char key, oldkey; 311 int reg1, reg2; 312 int rc; 313 314 rc = try_handle_skey(vcpu); 315 if (rc) 316 return rc != -EAGAIN ? rc : 0; 317 318 if (!test_kvm_facility(vcpu->kvm, 8)) 319 m3 &= ~SSKE_MB; 320 if (!test_kvm_facility(vcpu->kvm, 10)) 321 m3 &= ~(SSKE_MC | SSKE_MR); 322 if (!test_kvm_facility(vcpu->kvm, 14)) 323 m3 &= ~SSKE_NQ; 324 325 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 326 327 key = vcpu->run->s.regs.gprs[reg1] & 0xfe; 328 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 329 start = kvm_s390_logical_to_effective(vcpu, start); 330 if (m3 & SSKE_MB) { 331 /* start already designates an absolute address */ 332 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1); 333 } else { 334 start = kvm_s390_real_to_abs(vcpu, start); 335 end = start + PAGE_SIZE; 336 } 337 338 while (start != end) { 339 unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); 340 341 if (kvm_is_error_hva(addr)) 342 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 343 344 down_read(¤t->mm->mmap_sem); 345 rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey, 346 m3 & SSKE_NQ, m3 & SSKE_MR, 347 m3 & SSKE_MC); 348 up_read(¤t->mm->mmap_sem); 349 if (rc < 0) 350 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 351 start += PAGE_SIZE; 352 } 353 354 if (m3 & (SSKE_MC | SSKE_MR)) { 355 if (m3 & SSKE_MB) { 356 /* skey in reg1 is unpredictable */ 357 kvm_s390_set_psw_cc(vcpu, 3); 358 } else { 359 kvm_s390_set_psw_cc(vcpu, rc); 360 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL; 361 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8; 362 } 363 } 364 if (m3 & SSKE_MB) { 365 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) 366 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK; 367 else 368 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL; 369 end = kvm_s390_logical_to_effective(vcpu, end); 370 vcpu->run->s.regs.gprs[reg2] |= end; 371 } 372 return 0; 373 } 374 375 static int handle_ipte_interlock(struct kvm_vcpu *vcpu) 376 { 377 vcpu->stat.instruction_ipte_interlock++; 378 if (psw_bits(vcpu->arch.sie_block->gpsw).pstate) 379 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 380 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); 381 kvm_s390_retry_instr(vcpu); 382 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); 383 return 0; 384 } 385 386 static int handle_test_block(struct kvm_vcpu *vcpu) 387 { 388 gpa_t addr; 389 int reg2; 390 391 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 392 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 393 394 kvm_s390_get_regs_rre(vcpu, NULL, ®2); 395 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 396 addr = kvm_s390_logical_to_effective(vcpu, addr); 397 if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) 398 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 399 addr = kvm_s390_real_to_abs(vcpu, addr); 400 401 if (kvm_is_error_gpa(vcpu->kvm, addr)) 402 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 403 /* 404 * We don't expect errors on modern systems, and do not care 405 * about storage keys (yet), so let's just clear the page. 406 */ 407 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) 408 return -EFAULT; 409 kvm_s390_set_psw_cc(vcpu, 0); 410 vcpu->run->s.regs.gprs[0] = 0; 411 return 0; 412 } 413 414 static int handle_tpi(struct kvm_vcpu *vcpu) 415 { 416 struct kvm_s390_interrupt_info *inti; 417 unsigned long len; 418 u32 tpi_data[3]; 419 int rc; 420 u64 addr; 421 u8 ar; 422 423 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 424 if (addr & 3) 425 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 426 427 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); 428 if (!inti) { 429 kvm_s390_set_psw_cc(vcpu, 0); 430 return 0; 431 } 432 433 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; 434 tpi_data[1] = inti->io.io_int_parm; 435 tpi_data[2] = inti->io.io_int_word; 436 if (addr) { 437 /* 438 * Store the two-word I/O interruption code into the 439 * provided area. 440 */ 441 len = sizeof(tpi_data) - 4; 442 rc = write_guest(vcpu, addr, ar, &tpi_data, len); 443 if (rc) { 444 rc = kvm_s390_inject_prog_cond(vcpu, rc); 445 goto reinject_interrupt; 446 } 447 } else { 448 /* 449 * Store the three-word I/O interruption code into 450 * the appropriate lowcore area. 451 */ 452 len = sizeof(tpi_data); 453 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { 454 /* failed writes to the low core are not recoverable */ 455 rc = -EFAULT; 456 goto reinject_interrupt; 457 } 458 } 459 460 /* irq was successfully handed to the guest */ 461 kfree(inti); 462 kvm_s390_set_psw_cc(vcpu, 1); 463 return 0; 464 reinject_interrupt: 465 /* 466 * If we encounter a problem storing the interruption code, the 467 * instruction is suppressed from the guest's view: reinject the 468 * interrupt. 469 */ 470 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { 471 kfree(inti); 472 rc = -EFAULT; 473 } 474 /* don't set the cc, a pgm irq was injected or we drop to user space */ 475 return rc ? -EFAULT : 0; 476 } 477 478 static int handle_tsch(struct kvm_vcpu *vcpu) 479 { 480 struct kvm_s390_interrupt_info *inti = NULL; 481 const u64 isc_mask = 0xffUL << 24; /* all iscs set */ 482 483 /* a valid schid has at least one bit set */ 484 if (vcpu->run->s.regs.gprs[1]) 485 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, 486 vcpu->run->s.regs.gprs[1]); 487 488 /* 489 * Prepare exit to userspace. 490 * We indicate whether we dequeued a pending I/O interrupt 491 * so that userspace can re-inject it if the instruction gets 492 * a program check. While this may re-order the pending I/O 493 * interrupts, this is no problem since the priority is kept 494 * intact. 495 */ 496 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; 497 vcpu->run->s390_tsch.dequeued = !!inti; 498 if (inti) { 499 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; 500 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; 501 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; 502 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; 503 } 504 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; 505 kfree(inti); 506 return -EREMOTE; 507 } 508 509 static int handle_io_inst(struct kvm_vcpu *vcpu) 510 { 511 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); 512 513 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 514 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 515 516 if (vcpu->kvm->arch.css_support) { 517 /* 518 * Most I/O instructions will be handled by userspace. 519 * Exceptions are tpi and the interrupt portion of tsch. 520 */ 521 if (vcpu->arch.sie_block->ipa == 0xb236) 522 return handle_tpi(vcpu); 523 if (vcpu->arch.sie_block->ipa == 0xb235) 524 return handle_tsch(vcpu); 525 /* Handle in userspace. */ 526 return -EOPNOTSUPP; 527 } else { 528 /* 529 * Set condition code 3 to stop the guest from issuing channel 530 * I/O instructions. 531 */ 532 kvm_s390_set_psw_cc(vcpu, 3); 533 return 0; 534 } 535 } 536 537 static int handle_stfl(struct kvm_vcpu *vcpu) 538 { 539 int rc; 540 unsigned int fac; 541 542 vcpu->stat.instruction_stfl++; 543 544 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 545 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 546 547 /* 548 * We need to shift the lower 32 facility bits (bit 0-31) from a u64 549 * into a u32 memory representation. They will remain bits 0-31. 550 */ 551 fac = *vcpu->kvm->arch.model.fac_list >> 32; 552 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list), 553 &fac, sizeof(fac)); 554 if (rc) 555 return rc; 556 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac); 557 trace_kvm_s390_handle_stfl(vcpu, fac); 558 return 0; 559 } 560 561 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) 562 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL 563 #define PSW_ADDR_24 0x0000000000ffffffUL 564 #define PSW_ADDR_31 0x000000007fffffffUL 565 566 int is_valid_psw(psw_t *psw) 567 { 568 if (psw->mask & PSW_MASK_UNASSIGNED) 569 return 0; 570 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { 571 if (psw->addr & ~PSW_ADDR_31) 572 return 0; 573 } 574 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) 575 return 0; 576 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) 577 return 0; 578 if (psw->addr & 1) 579 return 0; 580 return 1; 581 } 582 583 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) 584 { 585 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; 586 psw_compat_t new_psw; 587 u64 addr; 588 int rc; 589 u8 ar; 590 591 if (gpsw->mask & PSW_MASK_PSTATE) 592 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 593 594 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 595 if (addr & 7) 596 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 597 598 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 599 if (rc) 600 return kvm_s390_inject_prog_cond(vcpu, rc); 601 if (!(new_psw.mask & PSW32_MASK_BASE)) 602 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 603 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; 604 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; 605 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; 606 if (!is_valid_psw(gpsw)) 607 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 608 return 0; 609 } 610 611 static int handle_lpswe(struct kvm_vcpu *vcpu) 612 { 613 psw_t new_psw; 614 u64 addr; 615 int rc; 616 u8 ar; 617 618 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 619 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 620 621 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 622 if (addr & 7) 623 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 624 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 625 if (rc) 626 return kvm_s390_inject_prog_cond(vcpu, rc); 627 vcpu->arch.sie_block->gpsw = new_psw; 628 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) 629 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 630 return 0; 631 } 632 633 static int handle_stidp(struct kvm_vcpu *vcpu) 634 { 635 u64 stidp_data = vcpu->kvm->arch.model.cpuid; 636 u64 operand2; 637 int rc; 638 u8 ar; 639 640 vcpu->stat.instruction_stidp++; 641 642 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 643 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 644 645 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 646 647 if (operand2 & 7) 648 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 649 650 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); 651 if (rc) 652 return kvm_s390_inject_prog_cond(vcpu, rc); 653 654 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data); 655 return 0; 656 } 657 658 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) 659 { 660 int cpus = 0; 661 int n; 662 663 cpus = atomic_read(&vcpu->kvm->online_vcpus); 664 665 /* deal with other level 3 hypervisors */ 666 if (stsi(mem, 3, 2, 2)) 667 mem->count = 0; 668 if (mem->count < 8) 669 mem->count++; 670 for (n = mem->count - 1; n > 0 ; n--) 671 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); 672 673 memset(&mem->vm[0], 0, sizeof(mem->vm[0])); 674 mem->vm[0].cpus_total = cpus; 675 mem->vm[0].cpus_configured = cpus; 676 mem->vm[0].cpus_standby = 0; 677 mem->vm[0].cpus_reserved = 0; 678 mem->vm[0].caf = 1000; 679 memcpy(mem->vm[0].name, "KVMguest", 8); 680 ASCEBC(mem->vm[0].name, 8); 681 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); 682 ASCEBC(mem->vm[0].cpi, 16); 683 } 684 685 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar, 686 u8 fc, u8 sel1, u16 sel2) 687 { 688 vcpu->run->exit_reason = KVM_EXIT_S390_STSI; 689 vcpu->run->s390_stsi.addr = addr; 690 vcpu->run->s390_stsi.ar = ar; 691 vcpu->run->s390_stsi.fc = fc; 692 vcpu->run->s390_stsi.sel1 = sel1; 693 vcpu->run->s390_stsi.sel2 = sel2; 694 } 695 696 static int handle_stsi(struct kvm_vcpu *vcpu) 697 { 698 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; 699 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; 700 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; 701 unsigned long mem = 0; 702 u64 operand2; 703 int rc = 0; 704 u8 ar; 705 706 vcpu->stat.instruction_stsi++; 707 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2); 708 709 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 710 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 711 712 if (fc > 3) { 713 kvm_s390_set_psw_cc(vcpu, 3); 714 return 0; 715 } 716 717 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 718 || vcpu->run->s.regs.gprs[1] & 0xffff0000) 719 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 720 721 if (fc == 0) { 722 vcpu->run->s.regs.gprs[0] = 3 << 28; 723 kvm_s390_set_psw_cc(vcpu, 0); 724 return 0; 725 } 726 727 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 728 729 if (operand2 & 0xfff) 730 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 731 732 switch (fc) { 733 case 1: /* same handling for 1 and 2 */ 734 case 2: 735 mem = get_zeroed_page(GFP_KERNEL); 736 if (!mem) 737 goto out_no_data; 738 if (stsi((void *) mem, fc, sel1, sel2)) 739 goto out_no_data; 740 break; 741 case 3: 742 if (sel1 != 2 || sel2 != 2) 743 goto out_no_data; 744 mem = get_zeroed_page(GFP_KERNEL); 745 if (!mem) 746 goto out_no_data; 747 handle_stsi_3_2_2(vcpu, (void *) mem); 748 break; 749 } 750 751 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); 752 if (rc) { 753 rc = kvm_s390_inject_prog_cond(vcpu, rc); 754 goto out; 755 } 756 if (vcpu->kvm->arch.user_stsi) { 757 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); 758 rc = -EREMOTE; 759 } 760 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 761 free_page(mem); 762 kvm_s390_set_psw_cc(vcpu, 0); 763 vcpu->run->s.regs.gprs[0] = 0; 764 return rc; 765 out_no_data: 766 kvm_s390_set_psw_cc(vcpu, 3); 767 out: 768 free_page(mem); 769 return rc; 770 } 771 772 static const intercept_handler_t b2_handlers[256] = { 773 [0x02] = handle_stidp, 774 [0x04] = handle_set_clock, 775 [0x10] = handle_set_prefix, 776 [0x11] = handle_store_prefix, 777 [0x12] = handle_store_cpu_address, 778 [0x14] = kvm_s390_handle_vsie, 779 [0x21] = handle_ipte_interlock, 780 [0x29] = handle_iske, 781 [0x2a] = handle_rrbe, 782 [0x2b] = handle_sske, 783 [0x2c] = handle_test_block, 784 [0x30] = handle_io_inst, 785 [0x31] = handle_io_inst, 786 [0x32] = handle_io_inst, 787 [0x33] = handle_io_inst, 788 [0x34] = handle_io_inst, 789 [0x35] = handle_io_inst, 790 [0x36] = handle_io_inst, 791 [0x37] = handle_io_inst, 792 [0x38] = handle_io_inst, 793 [0x39] = handle_io_inst, 794 [0x3a] = handle_io_inst, 795 [0x3b] = handle_io_inst, 796 [0x3c] = handle_io_inst, 797 [0x50] = handle_ipte_interlock, 798 [0x56] = handle_sthyi, 799 [0x5f] = handle_io_inst, 800 [0x74] = handle_io_inst, 801 [0x76] = handle_io_inst, 802 [0x7d] = handle_stsi, 803 [0xb1] = handle_stfl, 804 [0xb2] = handle_lpswe, 805 }; 806 807 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) 808 { 809 intercept_handler_t handler; 810 811 /* 812 * A lot of B2 instructions are priviledged. Here we check for 813 * the privileged ones, that we can handle in the kernel. 814 * Anything else goes to userspace. 815 */ 816 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 817 if (handler) 818 return handler(vcpu); 819 820 return -EOPNOTSUPP; 821 } 822 823 static int handle_epsw(struct kvm_vcpu *vcpu) 824 { 825 int reg1, reg2; 826 827 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 828 829 /* This basically extracts the mask half of the psw. */ 830 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; 831 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; 832 if (reg2) { 833 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; 834 vcpu->run->s.regs.gprs[reg2] |= 835 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; 836 } 837 return 0; 838 } 839 840 #define PFMF_RESERVED 0xfffc0101UL 841 #define PFMF_SK 0x00020000UL 842 #define PFMF_CF 0x00010000UL 843 #define PFMF_UI 0x00008000UL 844 #define PFMF_FSC 0x00007000UL 845 #define PFMF_NQ 0x00000800UL 846 #define PFMF_MR 0x00000400UL 847 #define PFMF_MC 0x00000200UL 848 #define PFMF_KEY 0x000000feUL 849 850 static int handle_pfmf(struct kvm_vcpu *vcpu) 851 { 852 bool mr = false, mc = false, nq; 853 int reg1, reg2; 854 unsigned long start, end; 855 unsigned char key; 856 857 vcpu->stat.instruction_pfmf++; 858 859 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 860 861 if (!test_kvm_facility(vcpu->kvm, 8)) 862 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 863 864 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 865 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 866 867 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) 868 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 869 870 /* Only provide non-quiescing support if enabled for the guest */ 871 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && 872 !test_kvm_facility(vcpu->kvm, 14)) 873 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 874 875 /* Only provide conditional-SSKE support if enabled for the guest */ 876 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK && 877 test_kvm_facility(vcpu->kvm, 10)) { 878 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR; 879 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC; 880 } 881 882 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ; 883 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY; 884 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 885 start = kvm_s390_logical_to_effective(vcpu, start); 886 887 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 888 if (kvm_s390_check_low_addr_prot_real(vcpu, start)) 889 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 890 } 891 892 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 893 case 0x00000000: 894 /* only 4k frames specify a real address */ 895 start = kvm_s390_real_to_abs(vcpu, start); 896 end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); 897 break; 898 case 0x00001000: 899 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1); 900 break; 901 case 0x00002000: 902 /* only support 2G frame size if EDAT2 is available and we are 903 not in 24-bit addressing mode */ 904 if (!test_kvm_facility(vcpu->kvm, 78) || 905 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT) 906 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 907 end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1); 908 break; 909 default: 910 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 911 } 912 913 while (start != end) { 914 unsigned long useraddr; 915 916 /* Translate guest address to host address */ 917 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); 918 if (kvm_is_error_hva(useraddr)) 919 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 920 921 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 922 if (clear_user((void __user *)useraddr, PAGE_SIZE)) 923 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 924 } 925 926 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { 927 int rc = kvm_s390_skey_check_enable(vcpu); 928 929 if (rc) 930 return rc; 931 down_read(¤t->mm->mmap_sem); 932 rc = cond_set_guest_storage_key(current->mm, useraddr, 933 key, NULL, nq, mr, mc); 934 up_read(¤t->mm->mmap_sem); 935 if (rc < 0) 936 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 937 } 938 939 start += PAGE_SIZE; 940 } 941 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 942 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) { 943 vcpu->run->s.regs.gprs[reg2] = end; 944 } else { 945 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL; 946 end = kvm_s390_logical_to_effective(vcpu, end); 947 vcpu->run->s.regs.gprs[reg2] |= end; 948 } 949 } 950 return 0; 951 } 952 953 static inline int do_essa(struct kvm_vcpu *vcpu, const int orc) 954 { 955 struct kvm_s390_migration_state *ms = vcpu->kvm->arch.migration_state; 956 int r1, r2, nappended, entries; 957 unsigned long gfn, hva, res, pgstev, ptev; 958 unsigned long *cbrlo; 959 960 /* 961 * We don't need to set SD.FPF.SK to 1 here, because if we have a 962 * machine check here we either handle it or crash 963 */ 964 965 kvm_s390_get_regs_rre(vcpu, &r1, &r2); 966 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT; 967 hva = gfn_to_hva(vcpu->kvm, gfn); 968 entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; 969 970 if (kvm_is_error_hva(hva)) 971 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 972 973 nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev); 974 if (nappended < 0) { 975 res = orc ? 0x10 : 0; 976 vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */ 977 return 0; 978 } 979 res = (pgstev & _PGSTE_GPS_USAGE_MASK) >> 22; 980 /* 981 * Set the block-content state part of the result. 0 means resident, so 982 * nothing to do if the page is valid. 2 is for preserved pages 983 * (non-present and non-zero), and 3 for zero pages (non-present and 984 * zero). 985 */ 986 if (ptev & _PAGE_INVALID) { 987 res |= 2; 988 if (pgstev & _PGSTE_GPS_ZERO) 989 res |= 1; 990 } 991 if (pgstev & _PGSTE_GPS_NODAT) 992 res |= 0x20; 993 vcpu->run->s.regs.gprs[r1] = res; 994 /* 995 * It is possible that all the normal 511 slots were full, in which case 996 * we will now write in the 512th slot, which is reserved for host use. 997 * In both cases we let the normal essa handling code process all the 998 * slots, including the reserved one, if needed. 999 */ 1000 if (nappended > 0) { 1001 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK); 1002 cbrlo[entries] = gfn << PAGE_SHIFT; 1003 } 1004 1005 if (orc) { 1006 /* increment only if we are really flipping the bit to 1 */ 1007 if (!test_and_set_bit(gfn, ms->pgste_bitmap)) 1008 atomic64_inc(&ms->dirty_pages); 1009 } 1010 1011 return nappended; 1012 } 1013 1014 static int handle_essa(struct kvm_vcpu *vcpu) 1015 { 1016 /* entries expected to be 1FF */ 1017 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; 1018 unsigned long *cbrlo; 1019 struct gmap *gmap; 1020 int i, orc; 1021 1022 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries); 1023 gmap = vcpu->arch.gmap; 1024 vcpu->stat.instruction_essa++; 1025 if (!vcpu->kvm->arch.use_cmma) 1026 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 1027 1028 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1029 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1030 /* Check for invalid operation request code */ 1031 orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; 1032 /* ORCs 0-6 are always valid */ 1033 if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT 1034 : ESSA_SET_STABLE_IF_RESIDENT)) 1035 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1036 1037 if (likely(!vcpu->kvm->arch.migration_state)) { 1038 /* 1039 * CMMA is enabled in the KVM settings, but is disabled in 1040 * the SIE block and in the mm_context, and we are not doing 1041 * a migration. Enable CMMA in the mm_context. 1042 * Since we need to take a write lock to write to the context 1043 * to avoid races with storage keys handling, we check if the 1044 * value really needs to be written to; if the value is 1045 * already correct, we do nothing and avoid the lock. 1046 */ 1047 if (vcpu->kvm->mm->context.use_cmma == 0) { 1048 down_write(&vcpu->kvm->mm->mmap_sem); 1049 vcpu->kvm->mm->context.use_cmma = 1; 1050 up_write(&vcpu->kvm->mm->mmap_sem); 1051 } 1052 /* 1053 * If we are here, we are supposed to have CMMA enabled in 1054 * the SIE block. Enabling CMMA works on a per-CPU basis, 1055 * while the context use_cmma flag is per process. 1056 * It's possible that the context flag is enabled and the 1057 * SIE flag is not, so we set the flag always; if it was 1058 * already set, nothing changes, otherwise we enable it 1059 * on this CPU too. 1060 */ 1061 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; 1062 /* Retry the ESSA instruction */ 1063 kvm_s390_retry_instr(vcpu); 1064 } else { 1065 /* Account for the possible extra cbrl entry */ 1066 i = do_essa(vcpu, orc); 1067 if (i < 0) 1068 return i; 1069 entries += i; 1070 } 1071 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ 1072 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); 1073 down_read(&gmap->mm->mmap_sem); 1074 for (i = 0; i < entries; ++i) 1075 __gmap_zap(gmap, cbrlo[i]); 1076 up_read(&gmap->mm->mmap_sem); 1077 return 0; 1078 } 1079 1080 static const intercept_handler_t b9_handlers[256] = { 1081 [0x8a] = handle_ipte_interlock, 1082 [0x8d] = handle_epsw, 1083 [0x8e] = handle_ipte_interlock, 1084 [0x8f] = handle_ipte_interlock, 1085 [0xab] = handle_essa, 1086 [0xaf] = handle_pfmf, 1087 }; 1088 1089 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) 1090 { 1091 intercept_handler_t handler; 1092 1093 /* This is handled just as for the B2 instructions. */ 1094 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 1095 if (handler) 1096 return handler(vcpu); 1097 1098 return -EOPNOTSUPP; 1099 } 1100 1101 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) 1102 { 1103 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1104 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1105 int reg, rc, nr_regs; 1106 u32 ctl_array[16]; 1107 u64 ga; 1108 u8 ar; 1109 1110 vcpu->stat.instruction_lctl++; 1111 1112 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1113 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1114 1115 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 1116 1117 if (ga & 3) 1118 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1119 1120 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1121 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 1122 1123 nr_regs = ((reg3 - reg1) & 0xf) + 1; 1124 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 1125 if (rc) 1126 return kvm_s390_inject_prog_cond(vcpu, rc); 1127 reg = reg1; 1128 nr_regs = 0; 1129 do { 1130 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; 1131 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; 1132 if (reg == reg3) 1133 break; 1134 reg = (reg + 1) % 16; 1135 } while (1); 1136 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1137 return 0; 1138 } 1139 1140 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) 1141 { 1142 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1143 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1144 int reg, rc, nr_regs; 1145 u32 ctl_array[16]; 1146 u64 ga; 1147 u8 ar; 1148 1149 vcpu->stat.instruction_stctl++; 1150 1151 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1152 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1153 1154 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 1155 1156 if (ga & 3) 1157 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1158 1159 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1160 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); 1161 1162 reg = reg1; 1163 nr_regs = 0; 1164 do { 1165 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 1166 if (reg == reg3) 1167 break; 1168 reg = (reg + 1) % 16; 1169 } while (1); 1170 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 1171 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 1172 } 1173 1174 static int handle_lctlg(struct kvm_vcpu *vcpu) 1175 { 1176 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1177 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1178 int reg, rc, nr_regs; 1179 u64 ctl_array[16]; 1180 u64 ga; 1181 u8 ar; 1182 1183 vcpu->stat.instruction_lctlg++; 1184 1185 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1186 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1187 1188 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 1189 1190 if (ga & 7) 1191 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1192 1193 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1194 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 1195 1196 nr_regs = ((reg3 - reg1) & 0xf) + 1; 1197 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 1198 if (rc) 1199 return kvm_s390_inject_prog_cond(vcpu, rc); 1200 reg = reg1; 1201 nr_regs = 0; 1202 do { 1203 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; 1204 if (reg == reg3) 1205 break; 1206 reg = (reg + 1) % 16; 1207 } while (1); 1208 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1209 return 0; 1210 } 1211 1212 static int handle_stctg(struct kvm_vcpu *vcpu) 1213 { 1214 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1215 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1216 int reg, rc, nr_regs; 1217 u64 ctl_array[16]; 1218 u64 ga; 1219 u8 ar; 1220 1221 vcpu->stat.instruction_stctg++; 1222 1223 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1224 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1225 1226 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 1227 1228 if (ga & 7) 1229 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1230 1231 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1232 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); 1233 1234 reg = reg1; 1235 nr_regs = 0; 1236 do { 1237 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 1238 if (reg == reg3) 1239 break; 1240 reg = (reg + 1) % 16; 1241 } while (1); 1242 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 1243 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 1244 } 1245 1246 static const intercept_handler_t eb_handlers[256] = { 1247 [0x2f] = handle_lctlg, 1248 [0x25] = handle_stctg, 1249 [0x60] = handle_ri, 1250 [0x61] = handle_ri, 1251 [0x62] = handle_ri, 1252 }; 1253 1254 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) 1255 { 1256 intercept_handler_t handler; 1257 1258 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; 1259 if (handler) 1260 return handler(vcpu); 1261 return -EOPNOTSUPP; 1262 } 1263 1264 static int handle_tprot(struct kvm_vcpu *vcpu) 1265 { 1266 u64 address1, address2; 1267 unsigned long hva, gpa; 1268 int ret = 0, cc = 0; 1269 bool writable; 1270 u8 ar; 1271 1272 vcpu->stat.instruction_tprot++; 1273 1274 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1275 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1276 1277 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); 1278 1279 /* we only handle the Linux memory detection case: 1280 * access key == 0 1281 * everything else goes to userspace. */ 1282 if (address2 & 0xf0) 1283 return -EOPNOTSUPP; 1284 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 1285 ipte_lock(vcpu); 1286 ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE); 1287 if (ret == PGM_PROTECTION) { 1288 /* Write protected? Try again with read-only... */ 1289 cc = 1; 1290 ret = guest_translate_address(vcpu, address1, ar, &gpa, 1291 GACC_FETCH); 1292 } 1293 if (ret) { 1294 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { 1295 ret = kvm_s390_inject_program_int(vcpu, ret); 1296 } else if (ret > 0) { 1297 /* Translation not available */ 1298 kvm_s390_set_psw_cc(vcpu, 3); 1299 ret = 0; 1300 } 1301 goto out_unlock; 1302 } 1303 1304 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); 1305 if (kvm_is_error_hva(hva)) { 1306 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 1307 } else { 1308 if (!writable) 1309 cc = 1; /* Write not permitted ==> read-only */ 1310 kvm_s390_set_psw_cc(vcpu, cc); 1311 /* Note: CC2 only occurs for storage keys (not supported yet) */ 1312 } 1313 out_unlock: 1314 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 1315 ipte_unlock(vcpu); 1316 return ret; 1317 } 1318 1319 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) 1320 { 1321 /* For e5xx... instructions we only handle TPROT */ 1322 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) 1323 return handle_tprot(vcpu); 1324 return -EOPNOTSUPP; 1325 } 1326 1327 static int handle_sckpf(struct kvm_vcpu *vcpu) 1328 { 1329 u32 value; 1330 1331 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1332 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1333 1334 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) 1335 return kvm_s390_inject_program_int(vcpu, 1336 PGM_SPECIFICATION); 1337 1338 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; 1339 vcpu->arch.sie_block->todpr = value; 1340 1341 return 0; 1342 } 1343 1344 static int handle_ptff(struct kvm_vcpu *vcpu) 1345 { 1346 /* we don't emulate any control instructions yet */ 1347 kvm_s390_set_psw_cc(vcpu, 3); 1348 return 0; 1349 } 1350 1351 static const intercept_handler_t x01_handlers[256] = { 1352 [0x04] = handle_ptff, 1353 [0x07] = handle_sckpf, 1354 }; 1355 1356 int kvm_s390_handle_01(struct kvm_vcpu *vcpu) 1357 { 1358 intercept_handler_t handler; 1359 1360 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 1361 if (handler) 1362 return handler(vcpu); 1363 return -EOPNOTSUPP; 1364 } 1365