1 /* 2 * handling privileged instructions 3 * 4 * Copyright IBM Corp. 2008, 2013 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 */ 13 14 #include <linux/kvm.h> 15 #include <linux/gfp.h> 16 #include <linux/errno.h> 17 #include <linux/compat.h> 18 #include <asm/asm-offsets.h> 19 #include <asm/facility.h> 20 #include <asm/current.h> 21 #include <asm/debug.h> 22 #include <asm/ebcdic.h> 23 #include <asm/sysinfo.h> 24 #include <asm/pgtable.h> 25 #include <asm/pgalloc.h> 26 #include <asm/io.h> 27 #include <asm/ptrace.h> 28 #include <asm/compat.h> 29 #include "gaccess.h" 30 #include "kvm-s390.h" 31 #include "trace.h" 32 33 /* Handle SCK (SET CLOCK) interception */ 34 static int handle_set_clock(struct kvm_vcpu *vcpu) 35 { 36 struct kvm_vcpu *cpup; 37 s64 hostclk, val; 38 int i, rc; 39 ar_t ar; 40 u64 op2; 41 42 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 43 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 44 45 op2 = kvm_s390_get_base_disp_s(vcpu, &ar); 46 if (op2 & 7) /* Operand must be on a doubleword boundary */ 47 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 48 rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); 49 if (rc) 50 return kvm_s390_inject_prog_cond(vcpu, rc); 51 52 if (store_tod_clock(&hostclk)) { 53 kvm_s390_set_psw_cc(vcpu, 3); 54 return 0; 55 } 56 val = (val - hostclk) & ~0x3fUL; 57 58 mutex_lock(&vcpu->kvm->lock); 59 kvm_for_each_vcpu(i, cpup, vcpu->kvm) 60 cpup->arch.sie_block->epoch = val; 61 mutex_unlock(&vcpu->kvm->lock); 62 63 kvm_s390_set_psw_cc(vcpu, 0); 64 return 0; 65 } 66 67 static int handle_set_prefix(struct kvm_vcpu *vcpu) 68 { 69 u64 operand2; 70 u32 address; 71 int rc; 72 ar_t ar; 73 74 vcpu->stat.instruction_spx++; 75 76 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 77 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 78 79 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 80 81 /* must be word boundary */ 82 if (operand2 & 3) 83 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 84 85 /* get the value */ 86 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); 87 if (rc) 88 return kvm_s390_inject_prog_cond(vcpu, rc); 89 90 address &= 0x7fffe000u; 91 92 /* 93 * Make sure the new value is valid memory. We only need to check the 94 * first page, since address is 8k aligned and memory pieces are always 95 * at least 1MB aligned and have at least a size of 1MB. 96 */ 97 if (kvm_is_error_gpa(vcpu->kvm, address)) 98 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 99 100 kvm_s390_set_prefix(vcpu, address); 101 102 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); 103 trace_kvm_s390_handle_prefix(vcpu, 1, address); 104 return 0; 105 } 106 107 static int handle_store_prefix(struct kvm_vcpu *vcpu) 108 { 109 u64 operand2; 110 u32 address; 111 int rc; 112 ar_t ar; 113 114 vcpu->stat.instruction_stpx++; 115 116 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 117 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 118 119 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 120 121 /* must be word boundary */ 122 if (operand2 & 3) 123 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 124 125 address = kvm_s390_get_prefix(vcpu); 126 127 /* get the value */ 128 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); 129 if (rc) 130 return kvm_s390_inject_prog_cond(vcpu, rc); 131 132 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); 133 trace_kvm_s390_handle_prefix(vcpu, 0, address); 134 return 0; 135 } 136 137 static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 138 { 139 u16 vcpu_id = vcpu->vcpu_id; 140 u64 ga; 141 int rc; 142 ar_t ar; 143 144 vcpu->stat.instruction_stap++; 145 146 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 147 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 148 149 ga = kvm_s390_get_base_disp_s(vcpu, &ar); 150 151 if (ga & 1) 152 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 153 154 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); 155 if (rc) 156 return kvm_s390_inject_prog_cond(vcpu, rc); 157 158 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga); 159 trace_kvm_s390_handle_stap(vcpu, ga); 160 return 0; 161 } 162 163 static int __skey_check_enable(struct kvm_vcpu *vcpu) 164 { 165 int rc = 0; 166 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) 167 return rc; 168 169 rc = s390_enable_skey(); 170 trace_kvm_s390_skey_related_inst(vcpu); 171 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); 172 return rc; 173 } 174 175 176 static int handle_skey(struct kvm_vcpu *vcpu) 177 { 178 int rc = __skey_check_enable(vcpu); 179 180 if (rc) 181 return rc; 182 vcpu->stat.instruction_storage_key++; 183 184 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 185 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 186 187 kvm_s390_rewind_psw(vcpu, 4); 188 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 189 return 0; 190 } 191 192 static int handle_ipte_interlock(struct kvm_vcpu *vcpu) 193 { 194 vcpu->stat.instruction_ipte_interlock++; 195 if (psw_bits(vcpu->arch.sie_block->gpsw).p) 196 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 197 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); 198 kvm_s390_rewind_psw(vcpu, 4); 199 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); 200 return 0; 201 } 202 203 static int handle_test_block(struct kvm_vcpu *vcpu) 204 { 205 gpa_t addr; 206 int reg2; 207 208 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 209 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 210 211 kvm_s390_get_regs_rre(vcpu, NULL, ®2); 212 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 213 addr = kvm_s390_logical_to_effective(vcpu, addr); 214 if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) 215 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 216 addr = kvm_s390_real_to_abs(vcpu, addr); 217 218 if (kvm_is_error_gpa(vcpu->kvm, addr)) 219 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 220 /* 221 * We don't expect errors on modern systems, and do not care 222 * about storage keys (yet), so let's just clear the page. 223 */ 224 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) 225 return -EFAULT; 226 kvm_s390_set_psw_cc(vcpu, 0); 227 vcpu->run->s.regs.gprs[0] = 0; 228 return 0; 229 } 230 231 static int handle_tpi(struct kvm_vcpu *vcpu) 232 { 233 struct kvm_s390_interrupt_info *inti; 234 unsigned long len; 235 u32 tpi_data[3]; 236 int rc; 237 u64 addr; 238 ar_t ar; 239 240 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 241 if (addr & 3) 242 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 243 244 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); 245 if (!inti) { 246 kvm_s390_set_psw_cc(vcpu, 0); 247 return 0; 248 } 249 250 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; 251 tpi_data[1] = inti->io.io_int_parm; 252 tpi_data[2] = inti->io.io_int_word; 253 if (addr) { 254 /* 255 * Store the two-word I/O interruption code into the 256 * provided area. 257 */ 258 len = sizeof(tpi_data) - 4; 259 rc = write_guest(vcpu, addr, ar, &tpi_data, len); 260 if (rc) { 261 rc = kvm_s390_inject_prog_cond(vcpu, rc); 262 goto reinject_interrupt; 263 } 264 } else { 265 /* 266 * Store the three-word I/O interruption code into 267 * the appropriate lowcore area. 268 */ 269 len = sizeof(tpi_data); 270 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { 271 /* failed writes to the low core are not recoverable */ 272 rc = -EFAULT; 273 goto reinject_interrupt; 274 } 275 } 276 277 /* irq was successfully handed to the guest */ 278 kfree(inti); 279 kvm_s390_set_psw_cc(vcpu, 1); 280 return 0; 281 reinject_interrupt: 282 /* 283 * If we encounter a problem storing the interruption code, the 284 * instruction is suppressed from the guest's view: reinject the 285 * interrupt. 286 */ 287 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { 288 kfree(inti); 289 rc = -EFAULT; 290 } 291 /* don't set the cc, a pgm irq was injected or we drop to user space */ 292 return rc ? -EFAULT : 0; 293 } 294 295 static int handle_tsch(struct kvm_vcpu *vcpu) 296 { 297 struct kvm_s390_interrupt_info *inti = NULL; 298 const u64 isc_mask = 0xffUL << 24; /* all iscs set */ 299 300 /* a valid schid has at least one bit set */ 301 if (vcpu->run->s.regs.gprs[1]) 302 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, 303 vcpu->run->s.regs.gprs[1]); 304 305 /* 306 * Prepare exit to userspace. 307 * We indicate whether we dequeued a pending I/O interrupt 308 * so that userspace can re-inject it if the instruction gets 309 * a program check. While this may re-order the pending I/O 310 * interrupts, this is no problem since the priority is kept 311 * intact. 312 */ 313 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; 314 vcpu->run->s390_tsch.dequeued = !!inti; 315 if (inti) { 316 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; 317 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; 318 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; 319 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; 320 } 321 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; 322 kfree(inti); 323 return -EREMOTE; 324 } 325 326 static int handle_io_inst(struct kvm_vcpu *vcpu) 327 { 328 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); 329 330 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 331 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 332 333 if (vcpu->kvm->arch.css_support) { 334 /* 335 * Most I/O instructions will be handled by userspace. 336 * Exceptions are tpi and the interrupt portion of tsch. 337 */ 338 if (vcpu->arch.sie_block->ipa == 0xb236) 339 return handle_tpi(vcpu); 340 if (vcpu->arch.sie_block->ipa == 0xb235) 341 return handle_tsch(vcpu); 342 /* Handle in userspace. */ 343 return -EOPNOTSUPP; 344 } else { 345 /* 346 * Set condition code 3 to stop the guest from issuing channel 347 * I/O instructions. 348 */ 349 kvm_s390_set_psw_cc(vcpu, 3); 350 return 0; 351 } 352 } 353 354 static int handle_stfl(struct kvm_vcpu *vcpu) 355 { 356 int rc; 357 unsigned int fac; 358 359 vcpu->stat.instruction_stfl++; 360 361 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 362 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 363 364 /* 365 * We need to shift the lower 32 facility bits (bit 0-31) from a u64 366 * into a u32 memory representation. They will remain bits 0-31. 367 */ 368 fac = *vcpu->kvm->arch.model.fac->list >> 32; 369 rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), 370 &fac, sizeof(fac)); 371 if (rc) 372 return rc; 373 VCPU_EVENT(vcpu, 5, "store facility list value %x", fac); 374 trace_kvm_s390_handle_stfl(vcpu, fac); 375 return 0; 376 } 377 378 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) 379 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL 380 #define PSW_ADDR_24 0x0000000000ffffffUL 381 #define PSW_ADDR_31 0x000000007fffffffUL 382 383 int is_valid_psw(psw_t *psw) 384 { 385 if (psw->mask & PSW_MASK_UNASSIGNED) 386 return 0; 387 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { 388 if (psw->addr & ~PSW_ADDR_31) 389 return 0; 390 } 391 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) 392 return 0; 393 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) 394 return 0; 395 if (psw->addr & 1) 396 return 0; 397 return 1; 398 } 399 400 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) 401 { 402 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; 403 psw_compat_t new_psw; 404 u64 addr; 405 int rc; 406 ar_t ar; 407 408 if (gpsw->mask & PSW_MASK_PSTATE) 409 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 410 411 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 412 if (addr & 7) 413 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 414 415 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 416 if (rc) 417 return kvm_s390_inject_prog_cond(vcpu, rc); 418 if (!(new_psw.mask & PSW32_MASK_BASE)) 419 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 420 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; 421 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; 422 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; 423 if (!is_valid_psw(gpsw)) 424 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 425 return 0; 426 } 427 428 static int handle_lpswe(struct kvm_vcpu *vcpu) 429 { 430 psw_t new_psw; 431 u64 addr; 432 int rc; 433 ar_t ar; 434 435 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 436 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 437 438 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 439 if (addr & 7) 440 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 441 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 442 if (rc) 443 return kvm_s390_inject_prog_cond(vcpu, rc); 444 vcpu->arch.sie_block->gpsw = new_psw; 445 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) 446 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 447 return 0; 448 } 449 450 static int handle_stidp(struct kvm_vcpu *vcpu) 451 { 452 u64 stidp_data = vcpu->arch.stidp_data; 453 u64 operand2; 454 int rc; 455 ar_t ar; 456 457 vcpu->stat.instruction_stidp++; 458 459 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 460 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 461 462 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 463 464 if (operand2 & 7) 465 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 466 467 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); 468 if (rc) 469 return kvm_s390_inject_prog_cond(vcpu, rc); 470 471 VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); 472 return 0; 473 } 474 475 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) 476 { 477 int cpus = 0; 478 int n; 479 480 cpus = atomic_read(&vcpu->kvm->online_vcpus); 481 482 /* deal with other level 3 hypervisors */ 483 if (stsi(mem, 3, 2, 2)) 484 mem->count = 0; 485 if (mem->count < 8) 486 mem->count++; 487 for (n = mem->count - 1; n > 0 ; n--) 488 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); 489 490 memset(&mem->vm[0], 0, sizeof(mem->vm[0])); 491 mem->vm[0].cpus_total = cpus; 492 mem->vm[0].cpus_configured = cpus; 493 mem->vm[0].cpus_standby = 0; 494 mem->vm[0].cpus_reserved = 0; 495 mem->vm[0].caf = 1000; 496 memcpy(mem->vm[0].name, "KVMguest", 8); 497 ASCEBC(mem->vm[0].name, 8); 498 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); 499 ASCEBC(mem->vm[0].cpi, 16); 500 } 501 502 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar, 503 u8 fc, u8 sel1, u16 sel2) 504 { 505 vcpu->run->exit_reason = KVM_EXIT_S390_STSI; 506 vcpu->run->s390_stsi.addr = addr; 507 vcpu->run->s390_stsi.ar = ar; 508 vcpu->run->s390_stsi.fc = fc; 509 vcpu->run->s390_stsi.sel1 = sel1; 510 vcpu->run->s390_stsi.sel2 = sel2; 511 } 512 513 static int handle_stsi(struct kvm_vcpu *vcpu) 514 { 515 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; 516 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; 517 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; 518 unsigned long mem = 0; 519 u64 operand2; 520 int rc = 0; 521 ar_t ar; 522 523 vcpu->stat.instruction_stsi++; 524 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 525 526 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 527 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 528 529 if (fc > 3) { 530 kvm_s390_set_psw_cc(vcpu, 3); 531 return 0; 532 } 533 534 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 535 || vcpu->run->s.regs.gprs[1] & 0xffff0000) 536 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 537 538 if (fc == 0) { 539 vcpu->run->s.regs.gprs[0] = 3 << 28; 540 kvm_s390_set_psw_cc(vcpu, 0); 541 return 0; 542 } 543 544 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 545 546 if (operand2 & 0xfff) 547 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 548 549 switch (fc) { 550 case 1: /* same handling for 1 and 2 */ 551 case 2: 552 mem = get_zeroed_page(GFP_KERNEL); 553 if (!mem) 554 goto out_no_data; 555 if (stsi((void *) mem, fc, sel1, sel2)) 556 goto out_no_data; 557 break; 558 case 3: 559 if (sel1 != 2 || sel2 != 2) 560 goto out_no_data; 561 mem = get_zeroed_page(GFP_KERNEL); 562 if (!mem) 563 goto out_no_data; 564 handle_stsi_3_2_2(vcpu, (void *) mem); 565 break; 566 } 567 568 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); 569 if (rc) { 570 rc = kvm_s390_inject_prog_cond(vcpu, rc); 571 goto out; 572 } 573 if (vcpu->kvm->arch.user_stsi) { 574 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); 575 rc = -EREMOTE; 576 } 577 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 578 free_page(mem); 579 kvm_s390_set_psw_cc(vcpu, 0); 580 vcpu->run->s.regs.gprs[0] = 0; 581 return rc; 582 out_no_data: 583 kvm_s390_set_psw_cc(vcpu, 3); 584 out: 585 free_page(mem); 586 return rc; 587 } 588 589 static const intercept_handler_t b2_handlers[256] = { 590 [0x02] = handle_stidp, 591 [0x04] = handle_set_clock, 592 [0x10] = handle_set_prefix, 593 [0x11] = handle_store_prefix, 594 [0x12] = handle_store_cpu_address, 595 [0x21] = handle_ipte_interlock, 596 [0x29] = handle_skey, 597 [0x2a] = handle_skey, 598 [0x2b] = handle_skey, 599 [0x2c] = handle_test_block, 600 [0x30] = handle_io_inst, 601 [0x31] = handle_io_inst, 602 [0x32] = handle_io_inst, 603 [0x33] = handle_io_inst, 604 [0x34] = handle_io_inst, 605 [0x35] = handle_io_inst, 606 [0x36] = handle_io_inst, 607 [0x37] = handle_io_inst, 608 [0x38] = handle_io_inst, 609 [0x39] = handle_io_inst, 610 [0x3a] = handle_io_inst, 611 [0x3b] = handle_io_inst, 612 [0x3c] = handle_io_inst, 613 [0x50] = handle_ipte_interlock, 614 [0x5f] = handle_io_inst, 615 [0x74] = handle_io_inst, 616 [0x76] = handle_io_inst, 617 [0x7d] = handle_stsi, 618 [0xb1] = handle_stfl, 619 [0xb2] = handle_lpswe, 620 }; 621 622 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) 623 { 624 intercept_handler_t handler; 625 626 /* 627 * A lot of B2 instructions are priviledged. Here we check for 628 * the privileged ones, that we can handle in the kernel. 629 * Anything else goes to userspace. 630 */ 631 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 632 if (handler) 633 return handler(vcpu); 634 635 return -EOPNOTSUPP; 636 } 637 638 static int handle_epsw(struct kvm_vcpu *vcpu) 639 { 640 int reg1, reg2; 641 642 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 643 644 /* This basically extracts the mask half of the psw. */ 645 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; 646 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; 647 if (reg2) { 648 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; 649 vcpu->run->s.regs.gprs[reg2] |= 650 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; 651 } 652 return 0; 653 } 654 655 #define PFMF_RESERVED 0xfffc0101UL 656 #define PFMF_SK 0x00020000UL 657 #define PFMF_CF 0x00010000UL 658 #define PFMF_UI 0x00008000UL 659 #define PFMF_FSC 0x00007000UL 660 #define PFMF_NQ 0x00000800UL 661 #define PFMF_MR 0x00000400UL 662 #define PFMF_MC 0x00000200UL 663 #define PFMF_KEY 0x000000feUL 664 665 static int handle_pfmf(struct kvm_vcpu *vcpu) 666 { 667 int reg1, reg2; 668 unsigned long start, end; 669 670 vcpu->stat.instruction_pfmf++; 671 672 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 673 674 if (!MACHINE_HAS_PFMF) 675 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 676 677 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 678 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 679 680 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) 681 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 682 683 /* Only provide non-quiescing support if the host supports it */ 684 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14)) 685 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 686 687 /* No support for conditional-SSKE */ 688 if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC)) 689 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 690 691 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 692 start = kvm_s390_logical_to_effective(vcpu, start); 693 694 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 695 case 0x00000000: 696 end = (start + (1UL << 12)) & ~((1UL << 12) - 1); 697 break; 698 case 0x00001000: 699 end = (start + (1UL << 20)) & ~((1UL << 20) - 1); 700 break; 701 /* We dont support EDAT2 702 case 0x00002000: 703 end = (start + (1UL << 31)) & ~((1UL << 31) - 1); 704 break;*/ 705 default: 706 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 707 } 708 709 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 710 if (kvm_s390_check_low_addr_prot_real(vcpu, start)) 711 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 712 } 713 714 while (start < end) { 715 unsigned long useraddr, abs_addr; 716 717 /* Translate guest address to host address */ 718 if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0) 719 abs_addr = kvm_s390_real_to_abs(vcpu, start); 720 else 721 abs_addr = start; 722 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr)); 723 if (kvm_is_error_hva(useraddr)) 724 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 725 726 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 727 if (clear_user((void __user *)useraddr, PAGE_SIZE)) 728 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 729 } 730 731 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { 732 int rc = __skey_check_enable(vcpu); 733 734 if (rc) 735 return rc; 736 if (set_guest_storage_key(current->mm, useraddr, 737 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, 738 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) 739 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 740 } 741 742 start += PAGE_SIZE; 743 } 744 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) 745 vcpu->run->s.regs.gprs[reg2] = end; 746 return 0; 747 } 748 749 static int handle_essa(struct kvm_vcpu *vcpu) 750 { 751 /* entries expected to be 1FF */ 752 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; 753 unsigned long *cbrlo, cbrle; 754 struct gmap *gmap; 755 int i; 756 757 VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); 758 gmap = vcpu->arch.gmap; 759 vcpu->stat.instruction_essa++; 760 if (!kvm_s390_cmma_enabled(vcpu->kvm)) 761 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 762 763 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 764 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 765 766 if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6) 767 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 768 769 /* Rewind PSW to repeat the ESSA instruction */ 770 kvm_s390_rewind_psw(vcpu, 4); 771 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ 772 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); 773 down_read(&gmap->mm->mmap_sem); 774 for (i = 0; i < entries; ++i) { 775 cbrle = cbrlo[i]; 776 if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE)) 777 /* invalid entry */ 778 break; 779 /* try to free backing */ 780 __gmap_zap(gmap, cbrle); 781 } 782 up_read(&gmap->mm->mmap_sem); 783 if (i < entries) 784 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 785 return 0; 786 } 787 788 static const intercept_handler_t b9_handlers[256] = { 789 [0x8a] = handle_ipte_interlock, 790 [0x8d] = handle_epsw, 791 [0x8e] = handle_ipte_interlock, 792 [0x8f] = handle_ipte_interlock, 793 [0xab] = handle_essa, 794 [0xaf] = handle_pfmf, 795 }; 796 797 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) 798 { 799 intercept_handler_t handler; 800 801 /* This is handled just as for the B2 instructions. */ 802 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 803 if (handler) 804 return handler(vcpu); 805 806 return -EOPNOTSUPP; 807 } 808 809 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) 810 { 811 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 812 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 813 int reg, rc, nr_regs; 814 u32 ctl_array[16]; 815 u64 ga; 816 ar_t ar; 817 818 vcpu->stat.instruction_lctl++; 819 820 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 821 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 822 823 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 824 825 if (ga & 3) 826 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 827 828 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 829 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 830 831 nr_regs = ((reg3 - reg1) & 0xf) + 1; 832 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 833 if (rc) 834 return kvm_s390_inject_prog_cond(vcpu, rc); 835 reg = reg1; 836 nr_regs = 0; 837 do { 838 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; 839 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; 840 if (reg == reg3) 841 break; 842 reg = (reg + 1) % 16; 843 } while (1); 844 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 845 return 0; 846 } 847 848 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) 849 { 850 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 851 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 852 int reg, rc, nr_regs; 853 u32 ctl_array[16]; 854 u64 ga; 855 ar_t ar; 856 857 vcpu->stat.instruction_stctl++; 858 859 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 860 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 861 862 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 863 864 if (ga & 3) 865 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 866 867 VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 868 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); 869 870 reg = reg1; 871 nr_regs = 0; 872 do { 873 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 874 if (reg == reg3) 875 break; 876 reg = (reg + 1) % 16; 877 } while (1); 878 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 879 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 880 } 881 882 static int handle_lctlg(struct kvm_vcpu *vcpu) 883 { 884 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 885 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 886 int reg, rc, nr_regs; 887 u64 ctl_array[16]; 888 u64 ga; 889 ar_t ar; 890 891 vcpu->stat.instruction_lctlg++; 892 893 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 894 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 895 896 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 897 898 if (ga & 7) 899 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 900 901 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 902 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 903 904 nr_regs = ((reg3 - reg1) & 0xf) + 1; 905 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 906 if (rc) 907 return kvm_s390_inject_prog_cond(vcpu, rc); 908 reg = reg1; 909 nr_regs = 0; 910 do { 911 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; 912 if (reg == reg3) 913 break; 914 reg = (reg + 1) % 16; 915 } while (1); 916 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 917 return 0; 918 } 919 920 static int handle_stctg(struct kvm_vcpu *vcpu) 921 { 922 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 923 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 924 int reg, rc, nr_regs; 925 u64 ctl_array[16]; 926 u64 ga; 927 ar_t ar; 928 929 vcpu->stat.instruction_stctg++; 930 931 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 932 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 933 934 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 935 936 if (ga & 7) 937 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 938 939 VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 940 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); 941 942 reg = reg1; 943 nr_regs = 0; 944 do { 945 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 946 if (reg == reg3) 947 break; 948 reg = (reg + 1) % 16; 949 } while (1); 950 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 951 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 952 } 953 954 static const intercept_handler_t eb_handlers[256] = { 955 [0x2f] = handle_lctlg, 956 [0x25] = handle_stctg, 957 }; 958 959 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) 960 { 961 intercept_handler_t handler; 962 963 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; 964 if (handler) 965 return handler(vcpu); 966 return -EOPNOTSUPP; 967 } 968 969 static int handle_tprot(struct kvm_vcpu *vcpu) 970 { 971 u64 address1, address2; 972 unsigned long hva, gpa; 973 int ret = 0, cc = 0; 974 bool writable; 975 ar_t ar; 976 977 vcpu->stat.instruction_tprot++; 978 979 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 980 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 981 982 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); 983 984 /* we only handle the Linux memory detection case: 985 * access key == 0 986 * everything else goes to userspace. */ 987 if (address2 & 0xf0) 988 return -EOPNOTSUPP; 989 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 990 ipte_lock(vcpu); 991 ret = guest_translate_address(vcpu, address1, ar, &gpa, 1); 992 if (ret == PGM_PROTECTION) { 993 /* Write protected? Try again with read-only... */ 994 cc = 1; 995 ret = guest_translate_address(vcpu, address1, ar, &gpa, 0); 996 } 997 if (ret) { 998 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { 999 ret = kvm_s390_inject_program_int(vcpu, ret); 1000 } else if (ret > 0) { 1001 /* Translation not available */ 1002 kvm_s390_set_psw_cc(vcpu, 3); 1003 ret = 0; 1004 } 1005 goto out_unlock; 1006 } 1007 1008 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); 1009 if (kvm_is_error_hva(hva)) { 1010 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 1011 } else { 1012 if (!writable) 1013 cc = 1; /* Write not permitted ==> read-only */ 1014 kvm_s390_set_psw_cc(vcpu, cc); 1015 /* Note: CC2 only occurs for storage keys (not supported yet) */ 1016 } 1017 out_unlock: 1018 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 1019 ipte_unlock(vcpu); 1020 return ret; 1021 } 1022 1023 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) 1024 { 1025 /* For e5xx... instructions we only handle TPROT */ 1026 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) 1027 return handle_tprot(vcpu); 1028 return -EOPNOTSUPP; 1029 } 1030 1031 static int handle_sckpf(struct kvm_vcpu *vcpu) 1032 { 1033 u32 value; 1034 1035 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1036 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1037 1038 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) 1039 return kvm_s390_inject_program_int(vcpu, 1040 PGM_SPECIFICATION); 1041 1042 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; 1043 vcpu->arch.sie_block->todpr = value; 1044 1045 return 0; 1046 } 1047 1048 static const intercept_handler_t x01_handlers[256] = { 1049 [0x07] = handle_sckpf, 1050 }; 1051 1052 int kvm_s390_handle_01(struct kvm_vcpu *vcpu) 1053 { 1054 intercept_handler_t handler; 1055 1056 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 1057 if (handler) 1058 return handler(vcpu); 1059 return -EOPNOTSUPP; 1060 } 1061