1 /* 2 * handling privileged instructions 3 * 4 * Copyright IBM Corp. 2008, 2013 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 */ 13 14 #include <linux/kvm.h> 15 #include <linux/gfp.h> 16 #include <linux/errno.h> 17 #include <linux/compat.h> 18 #include <asm/asm-offsets.h> 19 #include <asm/facility.h> 20 #include <asm/current.h> 21 #include <asm/debug.h> 22 #include <asm/ebcdic.h> 23 #include <asm/sysinfo.h> 24 #include <asm/pgtable.h> 25 #include <asm/pgalloc.h> 26 #include <asm/io.h> 27 #include <asm/ptrace.h> 28 #include <asm/compat.h> 29 #include "gaccess.h" 30 #include "kvm-s390.h" 31 #include "trace.h" 32 33 /* Handle SCK (SET CLOCK) interception */ 34 static int handle_set_clock(struct kvm_vcpu *vcpu) 35 { 36 struct kvm_vcpu *cpup; 37 s64 hostclk, val; 38 int i, rc; 39 u64 op2; 40 41 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 42 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 43 44 op2 = kvm_s390_get_base_disp_s(vcpu); 45 if (op2 & 7) /* Operand must be on a doubleword boundary */ 46 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 47 rc = read_guest(vcpu, op2, &val, sizeof(val)); 48 if (rc) 49 return kvm_s390_inject_prog_cond(vcpu, rc); 50 51 if (store_tod_clock(&hostclk)) { 52 kvm_s390_set_psw_cc(vcpu, 3); 53 return 0; 54 } 55 val = (val - hostclk) & ~0x3fUL; 56 57 mutex_lock(&vcpu->kvm->lock); 58 kvm_for_each_vcpu(i, cpup, vcpu->kvm) 59 cpup->arch.sie_block->epoch = val; 60 mutex_unlock(&vcpu->kvm->lock); 61 62 kvm_s390_set_psw_cc(vcpu, 0); 63 return 0; 64 } 65 66 static int handle_set_prefix(struct kvm_vcpu *vcpu) 67 { 68 u64 operand2; 69 u32 address; 70 int rc; 71 72 vcpu->stat.instruction_spx++; 73 74 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 75 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 76 77 operand2 = kvm_s390_get_base_disp_s(vcpu); 78 79 /* must be word boundary */ 80 if (operand2 & 3) 81 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 82 83 /* get the value */ 84 rc = read_guest(vcpu, operand2, &address, sizeof(address)); 85 if (rc) 86 return kvm_s390_inject_prog_cond(vcpu, rc); 87 88 address &= 0x7fffe000u; 89 90 /* 91 * Make sure the new value is valid memory. We only need to check the 92 * first page, since address is 8k aligned and memory pieces are always 93 * at least 1MB aligned and have at least a size of 1MB. 94 */ 95 if (kvm_is_error_gpa(vcpu->kvm, address)) 96 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 97 98 kvm_s390_set_prefix(vcpu, address); 99 100 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); 101 trace_kvm_s390_handle_prefix(vcpu, 1, address); 102 return 0; 103 } 104 105 static int handle_store_prefix(struct kvm_vcpu *vcpu) 106 { 107 u64 operand2; 108 u32 address; 109 int rc; 110 111 vcpu->stat.instruction_stpx++; 112 113 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 114 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 115 116 operand2 = kvm_s390_get_base_disp_s(vcpu); 117 118 /* must be word boundary */ 119 if (operand2 & 3) 120 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 121 122 address = kvm_s390_get_prefix(vcpu); 123 124 /* get the value */ 125 rc = write_guest(vcpu, operand2, &address, sizeof(address)); 126 if (rc) 127 return kvm_s390_inject_prog_cond(vcpu, rc); 128 129 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); 130 trace_kvm_s390_handle_prefix(vcpu, 0, address); 131 return 0; 132 } 133 134 static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 135 { 136 u16 vcpu_id = vcpu->vcpu_id; 137 u64 ga; 138 int rc; 139 140 vcpu->stat.instruction_stap++; 141 142 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 143 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 144 145 ga = kvm_s390_get_base_disp_s(vcpu); 146 147 if (ga & 1) 148 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 149 150 rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id)); 151 if (rc) 152 return kvm_s390_inject_prog_cond(vcpu, rc); 153 154 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga); 155 trace_kvm_s390_handle_stap(vcpu, ga); 156 return 0; 157 } 158 159 static int __skey_check_enable(struct kvm_vcpu *vcpu) 160 { 161 int rc = 0; 162 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) 163 return rc; 164 165 rc = s390_enable_skey(); 166 trace_kvm_s390_skey_related_inst(vcpu); 167 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); 168 return rc; 169 } 170 171 172 static int handle_skey(struct kvm_vcpu *vcpu) 173 { 174 int rc = __skey_check_enable(vcpu); 175 176 if (rc) 177 return rc; 178 vcpu->stat.instruction_storage_key++; 179 180 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 181 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 182 183 kvm_s390_rewind_psw(vcpu, 4); 184 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 185 return 0; 186 } 187 188 static int handle_ipte_interlock(struct kvm_vcpu *vcpu) 189 { 190 vcpu->stat.instruction_ipte_interlock++; 191 if (psw_bits(vcpu->arch.sie_block->gpsw).p) 192 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 193 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); 194 kvm_s390_rewind_psw(vcpu, 4); 195 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); 196 return 0; 197 } 198 199 static int handle_test_block(struct kvm_vcpu *vcpu) 200 { 201 gpa_t addr; 202 int reg2; 203 204 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 205 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 206 207 kvm_s390_get_regs_rre(vcpu, NULL, ®2); 208 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 209 addr = kvm_s390_logical_to_effective(vcpu, addr); 210 if (kvm_s390_check_low_addr_protection(vcpu, addr)) 211 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 212 addr = kvm_s390_real_to_abs(vcpu, addr); 213 214 if (kvm_is_error_gpa(vcpu->kvm, addr)) 215 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 216 /* 217 * We don't expect errors on modern systems, and do not care 218 * about storage keys (yet), so let's just clear the page. 219 */ 220 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) 221 return -EFAULT; 222 kvm_s390_set_psw_cc(vcpu, 0); 223 vcpu->run->s.regs.gprs[0] = 0; 224 return 0; 225 } 226 227 static int handle_tpi(struct kvm_vcpu *vcpu) 228 { 229 struct kvm_s390_interrupt_info *inti; 230 unsigned long len; 231 u32 tpi_data[3]; 232 int cc, rc; 233 u64 addr; 234 235 rc = 0; 236 addr = kvm_s390_get_base_disp_s(vcpu); 237 if (addr & 3) 238 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 239 cc = 0; 240 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); 241 if (!inti) 242 goto no_interrupt; 243 cc = 1; 244 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; 245 tpi_data[1] = inti->io.io_int_parm; 246 tpi_data[2] = inti->io.io_int_word; 247 if (addr) { 248 /* 249 * Store the two-word I/O interruption code into the 250 * provided area. 251 */ 252 len = sizeof(tpi_data) - 4; 253 rc = write_guest(vcpu, addr, &tpi_data, len); 254 if (rc) 255 return kvm_s390_inject_prog_cond(vcpu, rc); 256 } else { 257 /* 258 * Store the three-word I/O interruption code into 259 * the appropriate lowcore area. 260 */ 261 len = sizeof(tpi_data); 262 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) 263 rc = -EFAULT; 264 } 265 /* 266 * If we encounter a problem storing the interruption code, the 267 * instruction is suppressed from the guest's view: reinject the 268 * interrupt. 269 */ 270 if (!rc) 271 kfree(inti); 272 else 273 kvm_s390_reinject_io_int(vcpu->kvm, inti); 274 no_interrupt: 275 /* Set condition code and we're done. */ 276 if (!rc) 277 kvm_s390_set_psw_cc(vcpu, cc); 278 return rc ? -EFAULT : 0; 279 } 280 281 static int handle_tsch(struct kvm_vcpu *vcpu) 282 { 283 struct kvm_s390_interrupt_info *inti; 284 285 inti = kvm_s390_get_io_int(vcpu->kvm, 0, 286 vcpu->run->s.regs.gprs[1]); 287 288 /* 289 * Prepare exit to userspace. 290 * We indicate whether we dequeued a pending I/O interrupt 291 * so that userspace can re-inject it if the instruction gets 292 * a program check. While this may re-order the pending I/O 293 * interrupts, this is no problem since the priority is kept 294 * intact. 295 */ 296 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; 297 vcpu->run->s390_tsch.dequeued = !!inti; 298 if (inti) { 299 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; 300 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; 301 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; 302 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; 303 } 304 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; 305 kfree(inti); 306 return -EREMOTE; 307 } 308 309 static int handle_io_inst(struct kvm_vcpu *vcpu) 310 { 311 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); 312 313 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 314 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 315 316 if (vcpu->kvm->arch.css_support) { 317 /* 318 * Most I/O instructions will be handled by userspace. 319 * Exceptions are tpi and the interrupt portion of tsch. 320 */ 321 if (vcpu->arch.sie_block->ipa == 0xb236) 322 return handle_tpi(vcpu); 323 if (vcpu->arch.sie_block->ipa == 0xb235) 324 return handle_tsch(vcpu); 325 /* Handle in userspace. */ 326 return -EOPNOTSUPP; 327 } else { 328 /* 329 * Set condition code 3 to stop the guest from issuing channel 330 * I/O instructions. 331 */ 332 kvm_s390_set_psw_cc(vcpu, 3); 333 return 0; 334 } 335 } 336 337 static int handle_stfl(struct kvm_vcpu *vcpu) 338 { 339 int rc; 340 341 vcpu->stat.instruction_stfl++; 342 343 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 344 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 345 346 rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), 347 vfacilities, 4); 348 if (rc) 349 return rc; 350 VCPU_EVENT(vcpu, 5, "store facility list value %x", 351 *(unsigned int *) vfacilities); 352 trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities); 353 return 0; 354 } 355 356 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) 357 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL 358 #define PSW_ADDR_24 0x0000000000ffffffUL 359 #define PSW_ADDR_31 0x000000007fffffffUL 360 361 int is_valid_psw(psw_t *psw) 362 { 363 if (psw->mask & PSW_MASK_UNASSIGNED) 364 return 0; 365 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { 366 if (psw->addr & ~PSW_ADDR_31) 367 return 0; 368 } 369 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) 370 return 0; 371 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) 372 return 0; 373 if (psw->addr & 1) 374 return 0; 375 return 1; 376 } 377 378 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) 379 { 380 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; 381 psw_compat_t new_psw; 382 u64 addr; 383 int rc; 384 385 if (gpsw->mask & PSW_MASK_PSTATE) 386 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 387 388 addr = kvm_s390_get_base_disp_s(vcpu); 389 if (addr & 7) 390 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 391 392 rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); 393 if (rc) 394 return kvm_s390_inject_prog_cond(vcpu, rc); 395 if (!(new_psw.mask & PSW32_MASK_BASE)) 396 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 397 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; 398 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; 399 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; 400 if (!is_valid_psw(gpsw)) 401 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 402 return 0; 403 } 404 405 static int handle_lpswe(struct kvm_vcpu *vcpu) 406 { 407 psw_t new_psw; 408 u64 addr; 409 int rc; 410 411 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 412 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 413 414 addr = kvm_s390_get_base_disp_s(vcpu); 415 if (addr & 7) 416 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 417 rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); 418 if (rc) 419 return kvm_s390_inject_prog_cond(vcpu, rc); 420 vcpu->arch.sie_block->gpsw = new_psw; 421 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) 422 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 423 return 0; 424 } 425 426 static int handle_stidp(struct kvm_vcpu *vcpu) 427 { 428 u64 stidp_data = vcpu->arch.stidp_data; 429 u64 operand2; 430 int rc; 431 432 vcpu->stat.instruction_stidp++; 433 434 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 435 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 436 437 operand2 = kvm_s390_get_base_disp_s(vcpu); 438 439 if (operand2 & 7) 440 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 441 442 rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data)); 443 if (rc) 444 return kvm_s390_inject_prog_cond(vcpu, rc); 445 446 VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); 447 return 0; 448 } 449 450 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) 451 { 452 int cpus = 0; 453 int n; 454 455 cpus = atomic_read(&vcpu->kvm->online_vcpus); 456 457 /* deal with other level 3 hypervisors */ 458 if (stsi(mem, 3, 2, 2)) 459 mem->count = 0; 460 if (mem->count < 8) 461 mem->count++; 462 for (n = mem->count - 1; n > 0 ; n--) 463 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); 464 465 mem->vm[0].cpus_total = cpus; 466 mem->vm[0].cpus_configured = cpus; 467 mem->vm[0].cpus_standby = 0; 468 mem->vm[0].cpus_reserved = 0; 469 mem->vm[0].caf = 1000; 470 memcpy(mem->vm[0].name, "KVMguest", 8); 471 ASCEBC(mem->vm[0].name, 8); 472 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); 473 ASCEBC(mem->vm[0].cpi, 16); 474 } 475 476 static int handle_stsi(struct kvm_vcpu *vcpu) 477 { 478 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; 479 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; 480 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; 481 unsigned long mem = 0; 482 u64 operand2; 483 int rc = 0; 484 485 vcpu->stat.instruction_stsi++; 486 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 487 488 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 489 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 490 491 if (fc > 3) { 492 kvm_s390_set_psw_cc(vcpu, 3); 493 return 0; 494 } 495 496 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 497 || vcpu->run->s.regs.gprs[1] & 0xffff0000) 498 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 499 500 if (fc == 0) { 501 vcpu->run->s.regs.gprs[0] = 3 << 28; 502 kvm_s390_set_psw_cc(vcpu, 0); 503 return 0; 504 } 505 506 operand2 = kvm_s390_get_base_disp_s(vcpu); 507 508 if (operand2 & 0xfff) 509 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 510 511 switch (fc) { 512 case 1: /* same handling for 1 and 2 */ 513 case 2: 514 mem = get_zeroed_page(GFP_KERNEL); 515 if (!mem) 516 goto out_no_data; 517 if (stsi((void *) mem, fc, sel1, sel2)) 518 goto out_no_data; 519 break; 520 case 3: 521 if (sel1 != 2 || sel2 != 2) 522 goto out_no_data; 523 mem = get_zeroed_page(GFP_KERNEL); 524 if (!mem) 525 goto out_no_data; 526 handle_stsi_3_2_2(vcpu, (void *) mem); 527 break; 528 } 529 530 rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE); 531 if (rc) { 532 rc = kvm_s390_inject_prog_cond(vcpu, rc); 533 goto out; 534 } 535 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 536 free_page(mem); 537 kvm_s390_set_psw_cc(vcpu, 0); 538 vcpu->run->s.regs.gprs[0] = 0; 539 return 0; 540 out_no_data: 541 kvm_s390_set_psw_cc(vcpu, 3); 542 out: 543 free_page(mem); 544 return rc; 545 } 546 547 static const intercept_handler_t b2_handlers[256] = { 548 [0x02] = handle_stidp, 549 [0x04] = handle_set_clock, 550 [0x10] = handle_set_prefix, 551 [0x11] = handle_store_prefix, 552 [0x12] = handle_store_cpu_address, 553 [0x21] = handle_ipte_interlock, 554 [0x29] = handle_skey, 555 [0x2a] = handle_skey, 556 [0x2b] = handle_skey, 557 [0x2c] = handle_test_block, 558 [0x30] = handle_io_inst, 559 [0x31] = handle_io_inst, 560 [0x32] = handle_io_inst, 561 [0x33] = handle_io_inst, 562 [0x34] = handle_io_inst, 563 [0x35] = handle_io_inst, 564 [0x36] = handle_io_inst, 565 [0x37] = handle_io_inst, 566 [0x38] = handle_io_inst, 567 [0x39] = handle_io_inst, 568 [0x3a] = handle_io_inst, 569 [0x3b] = handle_io_inst, 570 [0x3c] = handle_io_inst, 571 [0x50] = handle_ipte_interlock, 572 [0x5f] = handle_io_inst, 573 [0x74] = handle_io_inst, 574 [0x76] = handle_io_inst, 575 [0x7d] = handle_stsi, 576 [0xb1] = handle_stfl, 577 [0xb2] = handle_lpswe, 578 }; 579 580 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) 581 { 582 intercept_handler_t handler; 583 584 /* 585 * A lot of B2 instructions are priviledged. Here we check for 586 * the privileged ones, that we can handle in the kernel. 587 * Anything else goes to userspace. 588 */ 589 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 590 if (handler) 591 return handler(vcpu); 592 593 return -EOPNOTSUPP; 594 } 595 596 static int handle_epsw(struct kvm_vcpu *vcpu) 597 { 598 int reg1, reg2; 599 600 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 601 602 /* This basically extracts the mask half of the psw. */ 603 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; 604 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; 605 if (reg2) { 606 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; 607 vcpu->run->s.regs.gprs[reg2] |= 608 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; 609 } 610 return 0; 611 } 612 613 #define PFMF_RESERVED 0xfffc0101UL 614 #define PFMF_SK 0x00020000UL 615 #define PFMF_CF 0x00010000UL 616 #define PFMF_UI 0x00008000UL 617 #define PFMF_FSC 0x00007000UL 618 #define PFMF_NQ 0x00000800UL 619 #define PFMF_MR 0x00000400UL 620 #define PFMF_MC 0x00000200UL 621 #define PFMF_KEY 0x000000feUL 622 623 static int handle_pfmf(struct kvm_vcpu *vcpu) 624 { 625 int reg1, reg2; 626 unsigned long start, end; 627 628 vcpu->stat.instruction_pfmf++; 629 630 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 631 632 if (!MACHINE_HAS_PFMF) 633 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 634 635 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 636 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 637 638 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) 639 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 640 641 /* Only provide non-quiescing support if the host supports it */ 642 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14)) 643 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 644 645 /* No support for conditional-SSKE */ 646 if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC)) 647 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 648 649 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 650 start = kvm_s390_logical_to_effective(vcpu, start); 651 652 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 653 case 0x00000000: 654 end = (start + (1UL << 12)) & ~((1UL << 12) - 1); 655 break; 656 case 0x00001000: 657 end = (start + (1UL << 20)) & ~((1UL << 20) - 1); 658 break; 659 /* We dont support EDAT2 660 case 0x00002000: 661 end = (start + (1UL << 31)) & ~((1UL << 31) - 1); 662 break;*/ 663 default: 664 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 665 } 666 667 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 668 if (kvm_s390_check_low_addr_protection(vcpu, start)) 669 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 670 } 671 672 while (start < end) { 673 unsigned long useraddr, abs_addr; 674 675 /* Translate guest address to host address */ 676 if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0) 677 abs_addr = kvm_s390_real_to_abs(vcpu, start); 678 else 679 abs_addr = start; 680 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr)); 681 if (kvm_is_error_hva(useraddr)) 682 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 683 684 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 685 if (clear_user((void __user *)useraddr, PAGE_SIZE)) 686 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 687 } 688 689 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { 690 int rc = __skey_check_enable(vcpu); 691 692 if (rc) 693 return rc; 694 if (set_guest_storage_key(current->mm, useraddr, 695 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, 696 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) 697 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 698 } 699 700 start += PAGE_SIZE; 701 } 702 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) 703 vcpu->run->s.regs.gprs[reg2] = end; 704 return 0; 705 } 706 707 static int handle_essa(struct kvm_vcpu *vcpu) 708 { 709 /* entries expected to be 1FF */ 710 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; 711 unsigned long *cbrlo, cbrle; 712 struct gmap *gmap; 713 int i; 714 715 VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); 716 gmap = vcpu->arch.gmap; 717 vcpu->stat.instruction_essa++; 718 if (!kvm_s390_cmma_enabled(vcpu->kvm)) 719 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 720 721 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 722 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 723 724 if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6) 725 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 726 727 /* Rewind PSW to repeat the ESSA instruction */ 728 kvm_s390_rewind_psw(vcpu, 4); 729 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ 730 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); 731 down_read(&gmap->mm->mmap_sem); 732 for (i = 0; i < entries; ++i) { 733 cbrle = cbrlo[i]; 734 if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE)) 735 /* invalid entry */ 736 break; 737 /* try to free backing */ 738 __gmap_zap(gmap, cbrle); 739 } 740 up_read(&gmap->mm->mmap_sem); 741 if (i < entries) 742 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 743 return 0; 744 } 745 746 static const intercept_handler_t b9_handlers[256] = { 747 [0x8a] = handle_ipte_interlock, 748 [0x8d] = handle_epsw, 749 [0x8e] = handle_ipte_interlock, 750 [0x8f] = handle_ipte_interlock, 751 [0xab] = handle_essa, 752 [0xaf] = handle_pfmf, 753 }; 754 755 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) 756 { 757 intercept_handler_t handler; 758 759 /* This is handled just as for the B2 instructions. */ 760 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 761 if (handler) 762 return handler(vcpu); 763 764 return -EOPNOTSUPP; 765 } 766 767 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) 768 { 769 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 770 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 771 int reg, rc, nr_regs; 772 u32 ctl_array[16]; 773 u64 ga; 774 775 vcpu->stat.instruction_lctl++; 776 777 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 778 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 779 780 ga = kvm_s390_get_base_disp_rs(vcpu); 781 782 if (ga & 3) 783 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 784 785 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 786 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 787 788 nr_regs = ((reg3 - reg1) & 0xf) + 1; 789 rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); 790 if (rc) 791 return kvm_s390_inject_prog_cond(vcpu, rc); 792 reg = reg1; 793 nr_regs = 0; 794 do { 795 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; 796 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; 797 if (reg == reg3) 798 break; 799 reg = (reg + 1) % 16; 800 } while (1); 801 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 802 return 0; 803 } 804 805 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) 806 { 807 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 808 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 809 int reg, rc, nr_regs; 810 u32 ctl_array[16]; 811 u64 ga; 812 813 vcpu->stat.instruction_stctl++; 814 815 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 816 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 817 818 ga = kvm_s390_get_base_disp_rs(vcpu); 819 820 if (ga & 3) 821 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 822 823 VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 824 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); 825 826 reg = reg1; 827 nr_regs = 0; 828 do { 829 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 830 if (reg == reg3) 831 break; 832 reg = (reg + 1) % 16; 833 } while (1); 834 rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); 835 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 836 } 837 838 static int handle_lctlg(struct kvm_vcpu *vcpu) 839 { 840 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 841 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 842 int reg, rc, nr_regs; 843 u64 ctl_array[16]; 844 u64 ga; 845 846 vcpu->stat.instruction_lctlg++; 847 848 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 849 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 850 851 ga = kvm_s390_get_base_disp_rsy(vcpu); 852 853 if (ga & 7) 854 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 855 856 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 857 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 858 859 nr_regs = ((reg3 - reg1) & 0xf) + 1; 860 rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); 861 if (rc) 862 return kvm_s390_inject_prog_cond(vcpu, rc); 863 reg = reg1; 864 nr_regs = 0; 865 do { 866 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; 867 if (reg == reg3) 868 break; 869 reg = (reg + 1) % 16; 870 } while (1); 871 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 872 return 0; 873 } 874 875 static int handle_stctg(struct kvm_vcpu *vcpu) 876 { 877 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 878 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 879 int reg, rc, nr_regs; 880 u64 ctl_array[16]; 881 u64 ga; 882 883 vcpu->stat.instruction_stctg++; 884 885 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 886 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 887 888 ga = kvm_s390_get_base_disp_rsy(vcpu); 889 890 if (ga & 7) 891 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 892 893 VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 894 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); 895 896 reg = reg1; 897 nr_regs = 0; 898 do { 899 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 900 if (reg == reg3) 901 break; 902 reg = (reg + 1) % 16; 903 } while (1); 904 rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); 905 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 906 } 907 908 static const intercept_handler_t eb_handlers[256] = { 909 [0x2f] = handle_lctlg, 910 [0x25] = handle_stctg, 911 }; 912 913 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) 914 { 915 intercept_handler_t handler; 916 917 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; 918 if (handler) 919 return handler(vcpu); 920 return -EOPNOTSUPP; 921 } 922 923 static int handle_tprot(struct kvm_vcpu *vcpu) 924 { 925 u64 address1, address2; 926 unsigned long hva, gpa; 927 int ret = 0, cc = 0; 928 bool writable; 929 930 vcpu->stat.instruction_tprot++; 931 932 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 933 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 934 935 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); 936 937 /* we only handle the Linux memory detection case: 938 * access key == 0 939 * everything else goes to userspace. */ 940 if (address2 & 0xf0) 941 return -EOPNOTSUPP; 942 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 943 ipte_lock(vcpu); 944 ret = guest_translate_address(vcpu, address1, &gpa, 1); 945 if (ret == PGM_PROTECTION) { 946 /* Write protected? Try again with read-only... */ 947 cc = 1; 948 ret = guest_translate_address(vcpu, address1, &gpa, 0); 949 } 950 if (ret) { 951 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { 952 ret = kvm_s390_inject_program_int(vcpu, ret); 953 } else if (ret > 0) { 954 /* Translation not available */ 955 kvm_s390_set_psw_cc(vcpu, 3); 956 ret = 0; 957 } 958 goto out_unlock; 959 } 960 961 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); 962 if (kvm_is_error_hva(hva)) { 963 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 964 } else { 965 if (!writable) 966 cc = 1; /* Write not permitted ==> read-only */ 967 kvm_s390_set_psw_cc(vcpu, cc); 968 /* Note: CC2 only occurs for storage keys (not supported yet) */ 969 } 970 out_unlock: 971 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 972 ipte_unlock(vcpu); 973 return ret; 974 } 975 976 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) 977 { 978 /* For e5xx... instructions we only handle TPROT */ 979 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) 980 return handle_tprot(vcpu); 981 return -EOPNOTSUPP; 982 } 983 984 static int handle_sckpf(struct kvm_vcpu *vcpu) 985 { 986 u32 value; 987 988 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 989 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 990 991 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) 992 return kvm_s390_inject_program_int(vcpu, 993 PGM_SPECIFICATION); 994 995 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; 996 vcpu->arch.sie_block->todpr = value; 997 998 return 0; 999 } 1000 1001 static const intercept_handler_t x01_handlers[256] = { 1002 [0x07] = handle_sckpf, 1003 }; 1004 1005 int kvm_s390_handle_01(struct kvm_vcpu *vcpu) 1006 { 1007 intercept_handler_t handler; 1008 1009 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 1010 if (handler) 1011 return handler(vcpu); 1012 return -EOPNOTSUPP; 1013 } 1014