1 /* 2 * handling privileged instructions 3 * 4 * Copyright IBM Corp. 2008, 2013 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 */ 13 14 #include <linux/kvm.h> 15 #include <linux/gfp.h> 16 #include <linux/errno.h> 17 #include <linux/compat.h> 18 #include <asm/asm-offsets.h> 19 #include <asm/facility.h> 20 #include <asm/current.h> 21 #include <asm/debug.h> 22 #include <asm/ebcdic.h> 23 #include <asm/sysinfo.h> 24 #include <asm/pgtable.h> 25 #include <asm/pgalloc.h> 26 #include <asm/io.h> 27 #include <asm/ptrace.h> 28 #include <asm/compat.h> 29 #include "gaccess.h" 30 #include "kvm-s390.h" 31 #include "trace.h" 32 33 /* Handle SCK (SET CLOCK) interception */ 34 static int handle_set_clock(struct kvm_vcpu *vcpu) 35 { 36 struct kvm_vcpu *cpup; 37 s64 hostclk, val; 38 int i, rc; 39 ar_t ar; 40 u64 op2; 41 42 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 43 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 44 45 op2 = kvm_s390_get_base_disp_s(vcpu, &ar); 46 if (op2 & 7) /* Operand must be on a doubleword boundary */ 47 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 48 rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); 49 if (rc) 50 return kvm_s390_inject_prog_cond(vcpu, rc); 51 52 if (store_tod_clock(&hostclk)) { 53 kvm_s390_set_psw_cc(vcpu, 3); 54 return 0; 55 } 56 val = (val - hostclk) & ~0x3fUL; 57 58 mutex_lock(&vcpu->kvm->lock); 59 kvm_for_each_vcpu(i, cpup, vcpu->kvm) 60 cpup->arch.sie_block->epoch = val; 61 mutex_unlock(&vcpu->kvm->lock); 62 63 kvm_s390_set_psw_cc(vcpu, 0); 64 return 0; 65 } 66 67 static int handle_set_prefix(struct kvm_vcpu *vcpu) 68 { 69 u64 operand2; 70 u32 address; 71 int rc; 72 ar_t ar; 73 74 vcpu->stat.instruction_spx++; 75 76 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 77 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 78 79 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 80 81 /* must be word boundary */ 82 if (operand2 & 3) 83 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 84 85 /* get the value */ 86 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); 87 if (rc) 88 return kvm_s390_inject_prog_cond(vcpu, rc); 89 90 address &= 0x7fffe000u; 91 92 /* 93 * Make sure the new value is valid memory. We only need to check the 94 * first page, since address is 8k aligned and memory pieces are always 95 * at least 1MB aligned and have at least a size of 1MB. 96 */ 97 if (kvm_is_error_gpa(vcpu->kvm, address)) 98 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 99 100 kvm_s390_set_prefix(vcpu, address); 101 102 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); 103 trace_kvm_s390_handle_prefix(vcpu, 1, address); 104 return 0; 105 } 106 107 static int handle_store_prefix(struct kvm_vcpu *vcpu) 108 { 109 u64 operand2; 110 u32 address; 111 int rc; 112 ar_t ar; 113 114 vcpu->stat.instruction_stpx++; 115 116 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 117 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 118 119 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 120 121 /* must be word boundary */ 122 if (operand2 & 3) 123 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 124 125 address = kvm_s390_get_prefix(vcpu); 126 127 /* get the value */ 128 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); 129 if (rc) 130 return kvm_s390_inject_prog_cond(vcpu, rc); 131 132 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); 133 trace_kvm_s390_handle_prefix(vcpu, 0, address); 134 return 0; 135 } 136 137 static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 138 { 139 u16 vcpu_id = vcpu->vcpu_id; 140 u64 ga; 141 int rc; 142 ar_t ar; 143 144 vcpu->stat.instruction_stap++; 145 146 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 147 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 148 149 ga = kvm_s390_get_base_disp_s(vcpu, &ar); 150 151 if (ga & 1) 152 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 153 154 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); 155 if (rc) 156 return kvm_s390_inject_prog_cond(vcpu, rc); 157 158 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga); 159 trace_kvm_s390_handle_stap(vcpu, ga); 160 return 0; 161 } 162 163 static int __skey_check_enable(struct kvm_vcpu *vcpu) 164 { 165 int rc = 0; 166 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) 167 return rc; 168 169 rc = s390_enable_skey(); 170 trace_kvm_s390_skey_related_inst(vcpu); 171 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); 172 return rc; 173 } 174 175 176 static int handle_skey(struct kvm_vcpu *vcpu) 177 { 178 int rc = __skey_check_enable(vcpu); 179 180 if (rc) 181 return rc; 182 vcpu->stat.instruction_storage_key++; 183 184 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 185 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 186 187 kvm_s390_rewind_psw(vcpu, 4); 188 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 189 return 0; 190 } 191 192 static int handle_ipte_interlock(struct kvm_vcpu *vcpu) 193 { 194 vcpu->stat.instruction_ipte_interlock++; 195 if (psw_bits(vcpu->arch.sie_block->gpsw).p) 196 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 197 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); 198 kvm_s390_rewind_psw(vcpu, 4); 199 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); 200 return 0; 201 } 202 203 static int handle_test_block(struct kvm_vcpu *vcpu) 204 { 205 gpa_t addr; 206 int reg2; 207 208 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 209 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 210 211 kvm_s390_get_regs_rre(vcpu, NULL, ®2); 212 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 213 addr = kvm_s390_logical_to_effective(vcpu, addr); 214 if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) 215 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 216 addr = kvm_s390_real_to_abs(vcpu, addr); 217 218 if (kvm_is_error_gpa(vcpu->kvm, addr)) 219 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 220 /* 221 * We don't expect errors on modern systems, and do not care 222 * about storage keys (yet), so let's just clear the page. 223 */ 224 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) 225 return -EFAULT; 226 kvm_s390_set_psw_cc(vcpu, 0); 227 vcpu->run->s.regs.gprs[0] = 0; 228 return 0; 229 } 230 231 static int handle_tpi(struct kvm_vcpu *vcpu) 232 { 233 struct kvm_s390_interrupt_info *inti; 234 unsigned long len; 235 u32 tpi_data[3]; 236 int rc; 237 u64 addr; 238 ar_t ar; 239 240 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 241 if (addr & 3) 242 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 243 244 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); 245 if (!inti) { 246 kvm_s390_set_psw_cc(vcpu, 0); 247 return 0; 248 } 249 250 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; 251 tpi_data[1] = inti->io.io_int_parm; 252 tpi_data[2] = inti->io.io_int_word; 253 if (addr) { 254 /* 255 * Store the two-word I/O interruption code into the 256 * provided area. 257 */ 258 len = sizeof(tpi_data) - 4; 259 rc = write_guest(vcpu, addr, ar, &tpi_data, len); 260 if (rc) { 261 rc = kvm_s390_inject_prog_cond(vcpu, rc); 262 goto reinject_interrupt; 263 } 264 } else { 265 /* 266 * Store the three-word I/O interruption code into 267 * the appropriate lowcore area. 268 */ 269 len = sizeof(tpi_data); 270 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { 271 /* failed writes to the low core are not recoverable */ 272 rc = -EFAULT; 273 goto reinject_interrupt; 274 } 275 } 276 277 /* irq was successfully handed to the guest */ 278 kfree(inti); 279 kvm_s390_set_psw_cc(vcpu, 1); 280 return 0; 281 reinject_interrupt: 282 /* 283 * If we encounter a problem storing the interruption code, the 284 * instruction is suppressed from the guest's view: reinject the 285 * interrupt. 286 */ 287 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { 288 kfree(inti); 289 rc = -EFAULT; 290 } 291 /* don't set the cc, a pgm irq was injected or we drop to user space */ 292 return rc ? -EFAULT : 0; 293 } 294 295 static int handle_tsch(struct kvm_vcpu *vcpu) 296 { 297 struct kvm_s390_interrupt_info *inti = NULL; 298 const u64 isc_mask = 0xffUL << 24; /* all iscs set */ 299 300 /* a valid schid has at least one bit set */ 301 if (vcpu->run->s.regs.gprs[1]) 302 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, 303 vcpu->run->s.regs.gprs[1]); 304 305 /* 306 * Prepare exit to userspace. 307 * We indicate whether we dequeued a pending I/O interrupt 308 * so that userspace can re-inject it if the instruction gets 309 * a program check. While this may re-order the pending I/O 310 * interrupts, this is no problem since the priority is kept 311 * intact. 312 */ 313 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; 314 vcpu->run->s390_tsch.dequeued = !!inti; 315 if (inti) { 316 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; 317 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; 318 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; 319 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; 320 } 321 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; 322 kfree(inti); 323 return -EREMOTE; 324 } 325 326 static int handle_io_inst(struct kvm_vcpu *vcpu) 327 { 328 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); 329 330 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 331 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 332 333 if (vcpu->kvm->arch.css_support) { 334 /* 335 * Most I/O instructions will be handled by userspace. 336 * Exceptions are tpi and the interrupt portion of tsch. 337 */ 338 if (vcpu->arch.sie_block->ipa == 0xb236) 339 return handle_tpi(vcpu); 340 if (vcpu->arch.sie_block->ipa == 0xb235) 341 return handle_tsch(vcpu); 342 /* Handle in userspace. */ 343 return -EOPNOTSUPP; 344 } else { 345 /* 346 * Set condition code 3 to stop the guest from issuing channel 347 * I/O instructions. 348 */ 349 kvm_s390_set_psw_cc(vcpu, 3); 350 return 0; 351 } 352 } 353 354 static int handle_stfl(struct kvm_vcpu *vcpu) 355 { 356 int rc; 357 unsigned int fac; 358 359 vcpu->stat.instruction_stfl++; 360 361 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 362 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 363 364 /* 365 * We need to shift the lower 32 facility bits (bit 0-31) from a u64 366 * into a u32 memory representation. They will remain bits 0-31. 367 */ 368 fac = *vcpu->kvm->arch.model.fac->list >> 32; 369 rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), 370 &fac, sizeof(fac)); 371 if (rc) 372 return rc; 373 VCPU_EVENT(vcpu, 5, "store facility list value %x", fac); 374 trace_kvm_s390_handle_stfl(vcpu, fac); 375 return 0; 376 } 377 378 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) 379 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL 380 #define PSW_ADDR_24 0x0000000000ffffffUL 381 #define PSW_ADDR_31 0x000000007fffffffUL 382 383 int is_valid_psw(psw_t *psw) 384 { 385 if (psw->mask & PSW_MASK_UNASSIGNED) 386 return 0; 387 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { 388 if (psw->addr & ~PSW_ADDR_31) 389 return 0; 390 } 391 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) 392 return 0; 393 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) 394 return 0; 395 if (psw->addr & 1) 396 return 0; 397 return 1; 398 } 399 400 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) 401 { 402 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; 403 psw_compat_t new_psw; 404 u64 addr; 405 int rc; 406 ar_t ar; 407 408 if (gpsw->mask & PSW_MASK_PSTATE) 409 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 410 411 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 412 if (addr & 7) 413 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 414 415 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 416 if (rc) 417 return kvm_s390_inject_prog_cond(vcpu, rc); 418 if (!(new_psw.mask & PSW32_MASK_BASE)) 419 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 420 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; 421 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; 422 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; 423 if (!is_valid_psw(gpsw)) 424 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 425 return 0; 426 } 427 428 static int handle_lpswe(struct kvm_vcpu *vcpu) 429 { 430 psw_t new_psw; 431 u64 addr; 432 int rc; 433 ar_t ar; 434 435 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 436 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 437 438 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 439 if (addr & 7) 440 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 441 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 442 if (rc) 443 return kvm_s390_inject_prog_cond(vcpu, rc); 444 vcpu->arch.sie_block->gpsw = new_psw; 445 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) 446 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 447 return 0; 448 } 449 450 static int handle_stidp(struct kvm_vcpu *vcpu) 451 { 452 u64 stidp_data = vcpu->arch.stidp_data; 453 u64 operand2; 454 int rc; 455 ar_t ar; 456 457 vcpu->stat.instruction_stidp++; 458 459 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 460 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 461 462 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 463 464 if (operand2 & 7) 465 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 466 467 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); 468 if (rc) 469 return kvm_s390_inject_prog_cond(vcpu, rc); 470 471 VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); 472 return 0; 473 } 474 475 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) 476 { 477 int cpus = 0; 478 int n; 479 480 cpus = atomic_read(&vcpu->kvm->online_vcpus); 481 482 /* deal with other level 3 hypervisors */ 483 if (stsi(mem, 3, 2, 2)) 484 mem->count = 0; 485 if (mem->count < 8) 486 mem->count++; 487 for (n = mem->count - 1; n > 0 ; n--) 488 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); 489 490 memset(&mem->vm[0], 0, sizeof(mem->vm[0])); 491 mem->vm[0].cpus_total = cpus; 492 mem->vm[0].cpus_configured = cpus; 493 mem->vm[0].cpus_standby = 0; 494 mem->vm[0].cpus_reserved = 0; 495 mem->vm[0].caf = 1000; 496 memcpy(mem->vm[0].name, "KVMguest", 8); 497 ASCEBC(mem->vm[0].name, 8); 498 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); 499 ASCEBC(mem->vm[0].cpi, 16); 500 } 501 502 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar, 503 u8 fc, u8 sel1, u16 sel2) 504 { 505 vcpu->run->exit_reason = KVM_EXIT_S390_STSI; 506 vcpu->run->s390_stsi.addr = addr; 507 vcpu->run->s390_stsi.ar = ar; 508 vcpu->run->s390_stsi.fc = fc; 509 vcpu->run->s390_stsi.sel1 = sel1; 510 vcpu->run->s390_stsi.sel2 = sel2; 511 } 512 513 static int handle_stsi(struct kvm_vcpu *vcpu) 514 { 515 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; 516 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; 517 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; 518 unsigned long mem = 0; 519 u64 operand2; 520 int rc = 0; 521 ar_t ar; 522 523 vcpu->stat.instruction_stsi++; 524 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 525 526 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 527 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 528 529 if (fc > 3) { 530 kvm_s390_set_psw_cc(vcpu, 3); 531 return 0; 532 } 533 534 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 535 || vcpu->run->s.regs.gprs[1] & 0xffff0000) 536 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 537 538 if (fc == 0) { 539 vcpu->run->s.regs.gprs[0] = 3 << 28; 540 kvm_s390_set_psw_cc(vcpu, 0); 541 return 0; 542 } 543 544 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 545 546 if (operand2 & 0xfff) 547 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 548 549 switch (fc) { 550 case 1: /* same handling for 1 and 2 */ 551 case 2: 552 mem = get_zeroed_page(GFP_KERNEL); 553 if (!mem) 554 goto out_no_data; 555 if (stsi((void *) mem, fc, sel1, sel2)) 556 goto out_no_data; 557 break; 558 case 3: 559 if (sel1 != 2 || sel2 != 2) 560 goto out_no_data; 561 mem = get_zeroed_page(GFP_KERNEL); 562 if (!mem) 563 goto out_no_data; 564 handle_stsi_3_2_2(vcpu, (void *) mem); 565 break; 566 } 567 568 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); 569 if (rc) { 570 rc = kvm_s390_inject_prog_cond(vcpu, rc); 571 goto out; 572 } 573 if (vcpu->kvm->arch.user_stsi) { 574 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); 575 rc = -EREMOTE; 576 } 577 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 578 free_page(mem); 579 kvm_s390_set_psw_cc(vcpu, 0); 580 vcpu->run->s.regs.gprs[0] = 0; 581 return rc; 582 out_no_data: 583 kvm_s390_set_psw_cc(vcpu, 3); 584 out: 585 free_page(mem); 586 return rc; 587 } 588 589 static const intercept_handler_t b2_handlers[256] = { 590 [0x02] = handle_stidp, 591 [0x04] = handle_set_clock, 592 [0x10] = handle_set_prefix, 593 [0x11] = handle_store_prefix, 594 [0x12] = handle_store_cpu_address, 595 [0x21] = handle_ipte_interlock, 596 [0x29] = handle_skey, 597 [0x2a] = handle_skey, 598 [0x2b] = handle_skey, 599 [0x2c] = handle_test_block, 600 [0x30] = handle_io_inst, 601 [0x31] = handle_io_inst, 602 [0x32] = handle_io_inst, 603 [0x33] = handle_io_inst, 604 [0x34] = handle_io_inst, 605 [0x35] = handle_io_inst, 606 [0x36] = handle_io_inst, 607 [0x37] = handle_io_inst, 608 [0x38] = handle_io_inst, 609 [0x39] = handle_io_inst, 610 [0x3a] = handle_io_inst, 611 [0x3b] = handle_io_inst, 612 [0x3c] = handle_io_inst, 613 [0x50] = handle_ipte_interlock, 614 [0x5f] = handle_io_inst, 615 [0x74] = handle_io_inst, 616 [0x76] = handle_io_inst, 617 [0x7d] = handle_stsi, 618 [0xb1] = handle_stfl, 619 [0xb2] = handle_lpswe, 620 }; 621 622 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) 623 { 624 intercept_handler_t handler; 625 626 /* 627 * A lot of B2 instructions are priviledged. Here we check for 628 * the privileged ones, that we can handle in the kernel. 629 * Anything else goes to userspace. 630 */ 631 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 632 if (handler) 633 return handler(vcpu); 634 635 return -EOPNOTSUPP; 636 } 637 638 static int handle_epsw(struct kvm_vcpu *vcpu) 639 { 640 int reg1, reg2; 641 642 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 643 644 /* This basically extracts the mask half of the psw. */ 645 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; 646 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; 647 if (reg2) { 648 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; 649 vcpu->run->s.regs.gprs[reg2] |= 650 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; 651 } 652 return 0; 653 } 654 655 #define PFMF_RESERVED 0xfffc0101UL 656 #define PFMF_SK 0x00020000UL 657 #define PFMF_CF 0x00010000UL 658 #define PFMF_UI 0x00008000UL 659 #define PFMF_FSC 0x00007000UL 660 #define PFMF_NQ 0x00000800UL 661 #define PFMF_MR 0x00000400UL 662 #define PFMF_MC 0x00000200UL 663 #define PFMF_KEY 0x000000feUL 664 665 static int handle_pfmf(struct kvm_vcpu *vcpu) 666 { 667 int reg1, reg2; 668 unsigned long start, end; 669 670 vcpu->stat.instruction_pfmf++; 671 672 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 673 674 if (!MACHINE_HAS_PFMF) 675 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 676 677 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 678 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 679 680 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) 681 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 682 683 /* Only provide non-quiescing support if the host supports it */ 684 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14)) 685 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 686 687 /* No support for conditional-SSKE */ 688 if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC)) 689 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 690 691 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 692 start = kvm_s390_logical_to_effective(vcpu, start); 693 694 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 695 case 0x00000000: 696 end = (start + (1UL << 12)) & ~((1UL << 12) - 1); 697 break; 698 case 0x00001000: 699 end = (start + (1UL << 20)) & ~((1UL << 20) - 1); 700 break; 701 case 0x00002000: 702 /* only support 2G frame size if EDAT2 is available and we are 703 not in 24-bit addressing mode */ 704 if (!test_kvm_facility(vcpu->kvm, 78) || 705 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT) 706 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 707 end = (start + (1UL << 31)) & ~((1UL << 31) - 1); 708 break; 709 default: 710 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 711 } 712 713 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 714 if (kvm_s390_check_low_addr_prot_real(vcpu, start)) 715 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 716 } 717 718 while (start < end) { 719 unsigned long useraddr, abs_addr; 720 721 /* Translate guest address to host address */ 722 if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0) 723 abs_addr = kvm_s390_real_to_abs(vcpu, start); 724 else 725 abs_addr = start; 726 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr)); 727 if (kvm_is_error_hva(useraddr)) 728 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 729 730 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 731 if (clear_user((void __user *)useraddr, PAGE_SIZE)) 732 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 733 } 734 735 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { 736 int rc = __skey_check_enable(vcpu); 737 738 if (rc) 739 return rc; 740 if (set_guest_storage_key(current->mm, useraddr, 741 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, 742 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) 743 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 744 } 745 746 start += PAGE_SIZE; 747 } 748 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) 749 vcpu->run->s.regs.gprs[reg2] = end; 750 return 0; 751 } 752 753 static int handle_essa(struct kvm_vcpu *vcpu) 754 { 755 /* entries expected to be 1FF */ 756 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; 757 unsigned long *cbrlo, cbrle; 758 struct gmap *gmap; 759 int i; 760 761 VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); 762 gmap = vcpu->arch.gmap; 763 vcpu->stat.instruction_essa++; 764 if (!kvm_s390_cmma_enabled(vcpu->kvm)) 765 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 766 767 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 768 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 769 770 if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6) 771 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 772 773 /* Rewind PSW to repeat the ESSA instruction */ 774 kvm_s390_rewind_psw(vcpu, 4); 775 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ 776 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); 777 down_read(&gmap->mm->mmap_sem); 778 for (i = 0; i < entries; ++i) { 779 cbrle = cbrlo[i]; 780 if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE)) 781 /* invalid entry */ 782 break; 783 /* try to free backing */ 784 __gmap_zap(gmap, cbrle); 785 } 786 up_read(&gmap->mm->mmap_sem); 787 if (i < entries) 788 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 789 return 0; 790 } 791 792 static const intercept_handler_t b9_handlers[256] = { 793 [0x8a] = handle_ipte_interlock, 794 [0x8d] = handle_epsw, 795 [0x8e] = handle_ipte_interlock, 796 [0x8f] = handle_ipte_interlock, 797 [0xab] = handle_essa, 798 [0xaf] = handle_pfmf, 799 }; 800 801 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) 802 { 803 intercept_handler_t handler; 804 805 /* This is handled just as for the B2 instructions. */ 806 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 807 if (handler) 808 return handler(vcpu); 809 810 return -EOPNOTSUPP; 811 } 812 813 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) 814 { 815 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 816 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 817 int reg, rc, nr_regs; 818 u32 ctl_array[16]; 819 u64 ga; 820 ar_t ar; 821 822 vcpu->stat.instruction_lctl++; 823 824 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 825 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 826 827 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 828 829 if (ga & 3) 830 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 831 832 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 833 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 834 835 nr_regs = ((reg3 - reg1) & 0xf) + 1; 836 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 837 if (rc) 838 return kvm_s390_inject_prog_cond(vcpu, rc); 839 reg = reg1; 840 nr_regs = 0; 841 do { 842 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; 843 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; 844 if (reg == reg3) 845 break; 846 reg = (reg + 1) % 16; 847 } while (1); 848 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 849 return 0; 850 } 851 852 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) 853 { 854 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 855 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 856 int reg, rc, nr_regs; 857 u32 ctl_array[16]; 858 u64 ga; 859 ar_t ar; 860 861 vcpu->stat.instruction_stctl++; 862 863 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 864 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 865 866 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 867 868 if (ga & 3) 869 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 870 871 VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 872 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); 873 874 reg = reg1; 875 nr_regs = 0; 876 do { 877 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 878 if (reg == reg3) 879 break; 880 reg = (reg + 1) % 16; 881 } while (1); 882 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 883 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 884 } 885 886 static int handle_lctlg(struct kvm_vcpu *vcpu) 887 { 888 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 889 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 890 int reg, rc, nr_regs; 891 u64 ctl_array[16]; 892 u64 ga; 893 ar_t ar; 894 895 vcpu->stat.instruction_lctlg++; 896 897 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 898 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 899 900 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 901 902 if (ga & 7) 903 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 904 905 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 906 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 907 908 nr_regs = ((reg3 - reg1) & 0xf) + 1; 909 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 910 if (rc) 911 return kvm_s390_inject_prog_cond(vcpu, rc); 912 reg = reg1; 913 nr_regs = 0; 914 do { 915 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; 916 if (reg == reg3) 917 break; 918 reg = (reg + 1) % 16; 919 } while (1); 920 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 921 return 0; 922 } 923 924 static int handle_stctg(struct kvm_vcpu *vcpu) 925 { 926 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 927 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 928 int reg, rc, nr_regs; 929 u64 ctl_array[16]; 930 u64 ga; 931 ar_t ar; 932 933 vcpu->stat.instruction_stctg++; 934 935 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 936 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 937 938 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 939 940 if (ga & 7) 941 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 942 943 VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 944 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); 945 946 reg = reg1; 947 nr_regs = 0; 948 do { 949 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 950 if (reg == reg3) 951 break; 952 reg = (reg + 1) % 16; 953 } while (1); 954 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 955 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 956 } 957 958 static const intercept_handler_t eb_handlers[256] = { 959 [0x2f] = handle_lctlg, 960 [0x25] = handle_stctg, 961 }; 962 963 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) 964 { 965 intercept_handler_t handler; 966 967 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; 968 if (handler) 969 return handler(vcpu); 970 return -EOPNOTSUPP; 971 } 972 973 static int handle_tprot(struct kvm_vcpu *vcpu) 974 { 975 u64 address1, address2; 976 unsigned long hva, gpa; 977 int ret = 0, cc = 0; 978 bool writable; 979 ar_t ar; 980 981 vcpu->stat.instruction_tprot++; 982 983 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 984 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 985 986 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); 987 988 /* we only handle the Linux memory detection case: 989 * access key == 0 990 * everything else goes to userspace. */ 991 if (address2 & 0xf0) 992 return -EOPNOTSUPP; 993 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 994 ipte_lock(vcpu); 995 ret = guest_translate_address(vcpu, address1, ar, &gpa, 1); 996 if (ret == PGM_PROTECTION) { 997 /* Write protected? Try again with read-only... */ 998 cc = 1; 999 ret = guest_translate_address(vcpu, address1, ar, &gpa, 0); 1000 } 1001 if (ret) { 1002 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { 1003 ret = kvm_s390_inject_program_int(vcpu, ret); 1004 } else if (ret > 0) { 1005 /* Translation not available */ 1006 kvm_s390_set_psw_cc(vcpu, 3); 1007 ret = 0; 1008 } 1009 goto out_unlock; 1010 } 1011 1012 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); 1013 if (kvm_is_error_hva(hva)) { 1014 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 1015 } else { 1016 if (!writable) 1017 cc = 1; /* Write not permitted ==> read-only */ 1018 kvm_s390_set_psw_cc(vcpu, cc); 1019 /* Note: CC2 only occurs for storage keys (not supported yet) */ 1020 } 1021 out_unlock: 1022 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 1023 ipte_unlock(vcpu); 1024 return ret; 1025 } 1026 1027 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) 1028 { 1029 /* For e5xx... instructions we only handle TPROT */ 1030 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) 1031 return handle_tprot(vcpu); 1032 return -EOPNOTSUPP; 1033 } 1034 1035 static int handle_sckpf(struct kvm_vcpu *vcpu) 1036 { 1037 u32 value; 1038 1039 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1040 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1041 1042 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) 1043 return kvm_s390_inject_program_int(vcpu, 1044 PGM_SPECIFICATION); 1045 1046 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; 1047 vcpu->arch.sie_block->todpr = value; 1048 1049 return 0; 1050 } 1051 1052 static const intercept_handler_t x01_handlers[256] = { 1053 [0x07] = handle_sckpf, 1054 }; 1055 1056 int kvm_s390_handle_01(struct kvm_vcpu *vcpu) 1057 { 1058 intercept_handler_t handler; 1059 1060 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 1061 if (handler) 1062 return handler(vcpu); 1063 return -EOPNOTSUPP; 1064 } 1065