1 /* 2 * handling privileged instructions 3 * 4 * Copyright IBM Corp. 2008, 2013 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 */ 13 14 #include <linux/kvm.h> 15 #include <linux/gfp.h> 16 #include <linux/errno.h> 17 #include <linux/compat.h> 18 #include <asm/asm-offsets.h> 19 #include <asm/facility.h> 20 #include <asm/current.h> 21 #include <asm/debug.h> 22 #include <asm/ebcdic.h> 23 #include <asm/sysinfo.h> 24 #include <asm/pgtable.h> 25 #include <asm/pgalloc.h> 26 #include <asm/gmap.h> 27 #include <asm/io.h> 28 #include <asm/ptrace.h> 29 #include <asm/compat.h> 30 #include "gaccess.h" 31 #include "kvm-s390.h" 32 #include "trace.h" 33 34 /* Handle SCK (SET CLOCK) interception */ 35 static int handle_set_clock(struct kvm_vcpu *vcpu) 36 { 37 int rc; 38 ar_t ar; 39 u64 op2, val; 40 41 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 42 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 43 44 op2 = kvm_s390_get_base_disp_s(vcpu, &ar); 45 if (op2 & 7) /* Operand must be on a doubleword boundary */ 46 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 47 rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); 48 if (rc) 49 return kvm_s390_inject_prog_cond(vcpu, rc); 50 51 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); 52 kvm_s390_set_tod_clock(vcpu->kvm, val); 53 54 kvm_s390_set_psw_cc(vcpu, 0); 55 return 0; 56 } 57 58 static int handle_set_prefix(struct kvm_vcpu *vcpu) 59 { 60 u64 operand2; 61 u32 address; 62 int rc; 63 ar_t ar; 64 65 vcpu->stat.instruction_spx++; 66 67 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 68 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 69 70 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 71 72 /* must be word boundary */ 73 if (operand2 & 3) 74 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 75 76 /* get the value */ 77 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); 78 if (rc) 79 return kvm_s390_inject_prog_cond(vcpu, rc); 80 81 address &= 0x7fffe000u; 82 83 /* 84 * Make sure the new value is valid memory. We only need to check the 85 * first page, since address is 8k aligned and memory pieces are always 86 * at least 1MB aligned and have at least a size of 1MB. 87 */ 88 if (kvm_is_error_gpa(vcpu->kvm, address)) 89 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 90 91 kvm_s390_set_prefix(vcpu, address); 92 trace_kvm_s390_handle_prefix(vcpu, 1, address); 93 return 0; 94 } 95 96 static int handle_store_prefix(struct kvm_vcpu *vcpu) 97 { 98 u64 operand2; 99 u32 address; 100 int rc; 101 ar_t ar; 102 103 vcpu->stat.instruction_stpx++; 104 105 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 106 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 107 108 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 109 110 /* must be word boundary */ 111 if (operand2 & 3) 112 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 113 114 address = kvm_s390_get_prefix(vcpu); 115 116 /* get the value */ 117 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); 118 if (rc) 119 return kvm_s390_inject_prog_cond(vcpu, rc); 120 121 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2); 122 trace_kvm_s390_handle_prefix(vcpu, 0, address); 123 return 0; 124 } 125 126 static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 127 { 128 u16 vcpu_id = vcpu->vcpu_id; 129 u64 ga; 130 int rc; 131 ar_t ar; 132 133 vcpu->stat.instruction_stap++; 134 135 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 136 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 137 138 ga = kvm_s390_get_base_disp_s(vcpu, &ar); 139 140 if (ga & 1) 141 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 142 143 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); 144 if (rc) 145 return kvm_s390_inject_prog_cond(vcpu, rc); 146 147 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga); 148 trace_kvm_s390_handle_stap(vcpu, ga); 149 return 0; 150 } 151 152 static int __skey_check_enable(struct kvm_vcpu *vcpu) 153 { 154 int rc = 0; 155 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) 156 return rc; 157 158 rc = s390_enable_skey(); 159 VCPU_EVENT(vcpu, 3, "%s", "enabling storage keys for guest"); 160 trace_kvm_s390_skey_related_inst(vcpu); 161 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); 162 return rc; 163 } 164 165 166 static int handle_skey(struct kvm_vcpu *vcpu) 167 { 168 int rc = __skey_check_enable(vcpu); 169 170 if (rc) 171 return rc; 172 vcpu->stat.instruction_storage_key++; 173 174 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 175 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 176 177 kvm_s390_retry_instr(vcpu); 178 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 179 return 0; 180 } 181 182 static int handle_ipte_interlock(struct kvm_vcpu *vcpu) 183 { 184 vcpu->stat.instruction_ipte_interlock++; 185 if (psw_bits(vcpu->arch.sie_block->gpsw).p) 186 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 187 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); 188 kvm_s390_retry_instr(vcpu); 189 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); 190 return 0; 191 } 192 193 static int handle_test_block(struct kvm_vcpu *vcpu) 194 { 195 gpa_t addr; 196 int reg2; 197 198 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 199 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 200 201 kvm_s390_get_regs_rre(vcpu, NULL, ®2); 202 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 203 addr = kvm_s390_logical_to_effective(vcpu, addr); 204 if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) 205 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 206 addr = kvm_s390_real_to_abs(vcpu, addr); 207 208 if (kvm_is_error_gpa(vcpu->kvm, addr)) 209 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 210 /* 211 * We don't expect errors on modern systems, and do not care 212 * about storage keys (yet), so let's just clear the page. 213 */ 214 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) 215 return -EFAULT; 216 kvm_s390_set_psw_cc(vcpu, 0); 217 vcpu->run->s.regs.gprs[0] = 0; 218 return 0; 219 } 220 221 static int handle_tpi(struct kvm_vcpu *vcpu) 222 { 223 struct kvm_s390_interrupt_info *inti; 224 unsigned long len; 225 u32 tpi_data[3]; 226 int rc; 227 u64 addr; 228 ar_t ar; 229 230 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 231 if (addr & 3) 232 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 233 234 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); 235 if (!inti) { 236 kvm_s390_set_psw_cc(vcpu, 0); 237 return 0; 238 } 239 240 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; 241 tpi_data[1] = inti->io.io_int_parm; 242 tpi_data[2] = inti->io.io_int_word; 243 if (addr) { 244 /* 245 * Store the two-word I/O interruption code into the 246 * provided area. 247 */ 248 len = sizeof(tpi_data) - 4; 249 rc = write_guest(vcpu, addr, ar, &tpi_data, len); 250 if (rc) { 251 rc = kvm_s390_inject_prog_cond(vcpu, rc); 252 goto reinject_interrupt; 253 } 254 } else { 255 /* 256 * Store the three-word I/O interruption code into 257 * the appropriate lowcore area. 258 */ 259 len = sizeof(tpi_data); 260 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { 261 /* failed writes to the low core are not recoverable */ 262 rc = -EFAULT; 263 goto reinject_interrupt; 264 } 265 } 266 267 /* irq was successfully handed to the guest */ 268 kfree(inti); 269 kvm_s390_set_psw_cc(vcpu, 1); 270 return 0; 271 reinject_interrupt: 272 /* 273 * If we encounter a problem storing the interruption code, the 274 * instruction is suppressed from the guest's view: reinject the 275 * interrupt. 276 */ 277 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { 278 kfree(inti); 279 rc = -EFAULT; 280 } 281 /* don't set the cc, a pgm irq was injected or we drop to user space */ 282 return rc ? -EFAULT : 0; 283 } 284 285 static int handle_tsch(struct kvm_vcpu *vcpu) 286 { 287 struct kvm_s390_interrupt_info *inti = NULL; 288 const u64 isc_mask = 0xffUL << 24; /* all iscs set */ 289 290 /* a valid schid has at least one bit set */ 291 if (vcpu->run->s.regs.gprs[1]) 292 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, 293 vcpu->run->s.regs.gprs[1]); 294 295 /* 296 * Prepare exit to userspace. 297 * We indicate whether we dequeued a pending I/O interrupt 298 * so that userspace can re-inject it if the instruction gets 299 * a program check. While this may re-order the pending I/O 300 * interrupts, this is no problem since the priority is kept 301 * intact. 302 */ 303 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; 304 vcpu->run->s390_tsch.dequeued = !!inti; 305 if (inti) { 306 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; 307 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; 308 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; 309 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; 310 } 311 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; 312 kfree(inti); 313 return -EREMOTE; 314 } 315 316 static int handle_io_inst(struct kvm_vcpu *vcpu) 317 { 318 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); 319 320 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 321 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 322 323 if (vcpu->kvm->arch.css_support) { 324 /* 325 * Most I/O instructions will be handled by userspace. 326 * Exceptions are tpi and the interrupt portion of tsch. 327 */ 328 if (vcpu->arch.sie_block->ipa == 0xb236) 329 return handle_tpi(vcpu); 330 if (vcpu->arch.sie_block->ipa == 0xb235) 331 return handle_tsch(vcpu); 332 /* Handle in userspace. */ 333 return -EOPNOTSUPP; 334 } else { 335 /* 336 * Set condition code 3 to stop the guest from issuing channel 337 * I/O instructions. 338 */ 339 kvm_s390_set_psw_cc(vcpu, 3); 340 return 0; 341 } 342 } 343 344 static int handle_stfl(struct kvm_vcpu *vcpu) 345 { 346 int rc; 347 unsigned int fac; 348 349 vcpu->stat.instruction_stfl++; 350 351 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 352 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 353 354 /* 355 * We need to shift the lower 32 facility bits (bit 0-31) from a u64 356 * into a u32 memory representation. They will remain bits 0-31. 357 */ 358 fac = *vcpu->kvm->arch.model.fac_list >> 32; 359 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list), 360 &fac, sizeof(fac)); 361 if (rc) 362 return rc; 363 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac); 364 trace_kvm_s390_handle_stfl(vcpu, fac); 365 return 0; 366 } 367 368 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) 369 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL 370 #define PSW_ADDR_24 0x0000000000ffffffUL 371 #define PSW_ADDR_31 0x000000007fffffffUL 372 373 int is_valid_psw(psw_t *psw) 374 { 375 if (psw->mask & PSW_MASK_UNASSIGNED) 376 return 0; 377 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { 378 if (psw->addr & ~PSW_ADDR_31) 379 return 0; 380 } 381 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) 382 return 0; 383 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) 384 return 0; 385 if (psw->addr & 1) 386 return 0; 387 return 1; 388 } 389 390 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) 391 { 392 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; 393 psw_compat_t new_psw; 394 u64 addr; 395 int rc; 396 ar_t ar; 397 398 if (gpsw->mask & PSW_MASK_PSTATE) 399 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 400 401 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 402 if (addr & 7) 403 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 404 405 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 406 if (rc) 407 return kvm_s390_inject_prog_cond(vcpu, rc); 408 if (!(new_psw.mask & PSW32_MASK_BASE)) 409 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 410 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; 411 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; 412 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; 413 if (!is_valid_psw(gpsw)) 414 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 415 return 0; 416 } 417 418 static int handle_lpswe(struct kvm_vcpu *vcpu) 419 { 420 psw_t new_psw; 421 u64 addr; 422 int rc; 423 ar_t ar; 424 425 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 426 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 427 428 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 429 if (addr & 7) 430 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 431 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 432 if (rc) 433 return kvm_s390_inject_prog_cond(vcpu, rc); 434 vcpu->arch.sie_block->gpsw = new_psw; 435 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) 436 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 437 return 0; 438 } 439 440 static int handle_stidp(struct kvm_vcpu *vcpu) 441 { 442 u64 stidp_data = vcpu->kvm->arch.model.cpuid; 443 u64 operand2; 444 int rc; 445 ar_t ar; 446 447 vcpu->stat.instruction_stidp++; 448 449 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 450 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 451 452 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 453 454 if (operand2 & 7) 455 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 456 457 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); 458 if (rc) 459 return kvm_s390_inject_prog_cond(vcpu, rc); 460 461 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data); 462 return 0; 463 } 464 465 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) 466 { 467 int cpus = 0; 468 int n; 469 470 cpus = atomic_read(&vcpu->kvm->online_vcpus); 471 472 /* deal with other level 3 hypervisors */ 473 if (stsi(mem, 3, 2, 2)) 474 mem->count = 0; 475 if (mem->count < 8) 476 mem->count++; 477 for (n = mem->count - 1; n > 0 ; n--) 478 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); 479 480 memset(&mem->vm[0], 0, sizeof(mem->vm[0])); 481 mem->vm[0].cpus_total = cpus; 482 mem->vm[0].cpus_configured = cpus; 483 mem->vm[0].cpus_standby = 0; 484 mem->vm[0].cpus_reserved = 0; 485 mem->vm[0].caf = 1000; 486 memcpy(mem->vm[0].name, "KVMguest", 8); 487 ASCEBC(mem->vm[0].name, 8); 488 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); 489 ASCEBC(mem->vm[0].cpi, 16); 490 } 491 492 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar, 493 u8 fc, u8 sel1, u16 sel2) 494 { 495 vcpu->run->exit_reason = KVM_EXIT_S390_STSI; 496 vcpu->run->s390_stsi.addr = addr; 497 vcpu->run->s390_stsi.ar = ar; 498 vcpu->run->s390_stsi.fc = fc; 499 vcpu->run->s390_stsi.sel1 = sel1; 500 vcpu->run->s390_stsi.sel2 = sel2; 501 } 502 503 static int handle_stsi(struct kvm_vcpu *vcpu) 504 { 505 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; 506 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; 507 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; 508 unsigned long mem = 0; 509 u64 operand2; 510 int rc = 0; 511 ar_t ar; 512 513 vcpu->stat.instruction_stsi++; 514 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2); 515 516 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 517 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 518 519 if (fc > 3) { 520 kvm_s390_set_psw_cc(vcpu, 3); 521 return 0; 522 } 523 524 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 525 || vcpu->run->s.regs.gprs[1] & 0xffff0000) 526 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 527 528 if (fc == 0) { 529 vcpu->run->s.regs.gprs[0] = 3 << 28; 530 kvm_s390_set_psw_cc(vcpu, 0); 531 return 0; 532 } 533 534 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 535 536 if (operand2 & 0xfff) 537 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 538 539 switch (fc) { 540 case 1: /* same handling for 1 and 2 */ 541 case 2: 542 mem = get_zeroed_page(GFP_KERNEL); 543 if (!mem) 544 goto out_no_data; 545 if (stsi((void *) mem, fc, sel1, sel2)) 546 goto out_no_data; 547 break; 548 case 3: 549 if (sel1 != 2 || sel2 != 2) 550 goto out_no_data; 551 mem = get_zeroed_page(GFP_KERNEL); 552 if (!mem) 553 goto out_no_data; 554 handle_stsi_3_2_2(vcpu, (void *) mem); 555 break; 556 } 557 558 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); 559 if (rc) { 560 rc = kvm_s390_inject_prog_cond(vcpu, rc); 561 goto out; 562 } 563 if (vcpu->kvm->arch.user_stsi) { 564 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); 565 rc = -EREMOTE; 566 } 567 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 568 free_page(mem); 569 kvm_s390_set_psw_cc(vcpu, 0); 570 vcpu->run->s.regs.gprs[0] = 0; 571 return rc; 572 out_no_data: 573 kvm_s390_set_psw_cc(vcpu, 3); 574 out: 575 free_page(mem); 576 return rc; 577 } 578 579 static const intercept_handler_t b2_handlers[256] = { 580 [0x02] = handle_stidp, 581 [0x04] = handle_set_clock, 582 [0x10] = handle_set_prefix, 583 [0x11] = handle_store_prefix, 584 [0x12] = handle_store_cpu_address, 585 [0x21] = handle_ipte_interlock, 586 [0x29] = handle_skey, 587 [0x2a] = handle_skey, 588 [0x2b] = handle_skey, 589 [0x2c] = handle_test_block, 590 [0x30] = handle_io_inst, 591 [0x31] = handle_io_inst, 592 [0x32] = handle_io_inst, 593 [0x33] = handle_io_inst, 594 [0x34] = handle_io_inst, 595 [0x35] = handle_io_inst, 596 [0x36] = handle_io_inst, 597 [0x37] = handle_io_inst, 598 [0x38] = handle_io_inst, 599 [0x39] = handle_io_inst, 600 [0x3a] = handle_io_inst, 601 [0x3b] = handle_io_inst, 602 [0x3c] = handle_io_inst, 603 [0x50] = handle_ipte_interlock, 604 [0x5f] = handle_io_inst, 605 [0x74] = handle_io_inst, 606 [0x76] = handle_io_inst, 607 [0x7d] = handle_stsi, 608 [0xb1] = handle_stfl, 609 [0xb2] = handle_lpswe, 610 }; 611 612 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) 613 { 614 intercept_handler_t handler; 615 616 /* 617 * A lot of B2 instructions are priviledged. Here we check for 618 * the privileged ones, that we can handle in the kernel. 619 * Anything else goes to userspace. 620 */ 621 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 622 if (handler) 623 return handler(vcpu); 624 625 return -EOPNOTSUPP; 626 } 627 628 static int handle_epsw(struct kvm_vcpu *vcpu) 629 { 630 int reg1, reg2; 631 632 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 633 634 /* This basically extracts the mask half of the psw. */ 635 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; 636 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; 637 if (reg2) { 638 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; 639 vcpu->run->s.regs.gprs[reg2] |= 640 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; 641 } 642 return 0; 643 } 644 645 #define PFMF_RESERVED 0xfffc0101UL 646 #define PFMF_SK 0x00020000UL 647 #define PFMF_CF 0x00010000UL 648 #define PFMF_UI 0x00008000UL 649 #define PFMF_FSC 0x00007000UL 650 #define PFMF_NQ 0x00000800UL 651 #define PFMF_MR 0x00000400UL 652 #define PFMF_MC 0x00000200UL 653 #define PFMF_KEY 0x000000feUL 654 655 static int handle_pfmf(struct kvm_vcpu *vcpu) 656 { 657 int reg1, reg2; 658 unsigned long start, end; 659 660 vcpu->stat.instruction_pfmf++; 661 662 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 663 664 if (!test_kvm_facility(vcpu->kvm, 8)) 665 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 666 667 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 668 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 669 670 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) 671 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 672 673 /* Only provide non-quiescing support if enabled for the guest */ 674 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && 675 !test_kvm_facility(vcpu->kvm, 14)) 676 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 677 678 /* No support for conditional-SSKE */ 679 if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC)) 680 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 681 682 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 683 start = kvm_s390_logical_to_effective(vcpu, start); 684 685 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 686 case 0x00000000: 687 end = (start + (1UL << 12)) & ~((1UL << 12) - 1); 688 break; 689 case 0x00001000: 690 end = (start + (1UL << 20)) & ~((1UL << 20) - 1); 691 break; 692 case 0x00002000: 693 /* only support 2G frame size if EDAT2 is available and we are 694 not in 24-bit addressing mode */ 695 if (!test_kvm_facility(vcpu->kvm, 78) || 696 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT) 697 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 698 end = (start + (1UL << 31)) & ~((1UL << 31) - 1); 699 break; 700 default: 701 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 702 } 703 704 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 705 if (kvm_s390_check_low_addr_prot_real(vcpu, start)) 706 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 707 } 708 709 while (start < end) { 710 unsigned long useraddr, abs_addr; 711 712 /* Translate guest address to host address */ 713 if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0) 714 abs_addr = kvm_s390_real_to_abs(vcpu, start); 715 else 716 abs_addr = start; 717 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr)); 718 if (kvm_is_error_hva(useraddr)) 719 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 720 721 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 722 if (clear_user((void __user *)useraddr, PAGE_SIZE)) 723 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 724 } 725 726 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { 727 int rc = __skey_check_enable(vcpu); 728 729 if (rc) 730 return rc; 731 if (set_guest_storage_key(current->mm, useraddr, 732 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, 733 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) 734 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 735 } 736 737 start += PAGE_SIZE; 738 } 739 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) 740 vcpu->run->s.regs.gprs[reg2] = end; 741 return 0; 742 } 743 744 static int handle_essa(struct kvm_vcpu *vcpu) 745 { 746 /* entries expected to be 1FF */ 747 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; 748 unsigned long *cbrlo; 749 struct gmap *gmap; 750 int i; 751 752 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries); 753 gmap = vcpu->arch.gmap; 754 vcpu->stat.instruction_essa++; 755 if (!vcpu->kvm->arch.use_cmma) 756 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 757 758 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 759 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 760 761 if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6) 762 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 763 764 /* Retry the ESSA instruction */ 765 kvm_s390_retry_instr(vcpu); 766 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ 767 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); 768 down_read(&gmap->mm->mmap_sem); 769 for (i = 0; i < entries; ++i) 770 __gmap_zap(gmap, cbrlo[i]); 771 up_read(&gmap->mm->mmap_sem); 772 return 0; 773 } 774 775 static const intercept_handler_t b9_handlers[256] = { 776 [0x8a] = handle_ipte_interlock, 777 [0x8d] = handle_epsw, 778 [0x8e] = handle_ipte_interlock, 779 [0x8f] = handle_ipte_interlock, 780 [0xab] = handle_essa, 781 [0xaf] = handle_pfmf, 782 }; 783 784 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) 785 { 786 intercept_handler_t handler; 787 788 /* This is handled just as for the B2 instructions. */ 789 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 790 if (handler) 791 return handler(vcpu); 792 793 return -EOPNOTSUPP; 794 } 795 796 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) 797 { 798 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 799 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 800 int reg, rc, nr_regs; 801 u32 ctl_array[16]; 802 u64 ga; 803 ar_t ar; 804 805 vcpu->stat.instruction_lctl++; 806 807 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 808 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 809 810 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 811 812 if (ga & 3) 813 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 814 815 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 816 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 817 818 nr_regs = ((reg3 - reg1) & 0xf) + 1; 819 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 820 if (rc) 821 return kvm_s390_inject_prog_cond(vcpu, rc); 822 reg = reg1; 823 nr_regs = 0; 824 do { 825 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; 826 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; 827 if (reg == reg3) 828 break; 829 reg = (reg + 1) % 16; 830 } while (1); 831 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 832 return 0; 833 } 834 835 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) 836 { 837 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 838 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 839 int reg, rc, nr_regs; 840 u32 ctl_array[16]; 841 u64 ga; 842 ar_t ar; 843 844 vcpu->stat.instruction_stctl++; 845 846 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 847 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 848 849 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 850 851 if (ga & 3) 852 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 853 854 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 855 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); 856 857 reg = reg1; 858 nr_regs = 0; 859 do { 860 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 861 if (reg == reg3) 862 break; 863 reg = (reg + 1) % 16; 864 } while (1); 865 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 866 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 867 } 868 869 static int handle_lctlg(struct kvm_vcpu *vcpu) 870 { 871 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 872 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 873 int reg, rc, nr_regs; 874 u64 ctl_array[16]; 875 u64 ga; 876 ar_t ar; 877 878 vcpu->stat.instruction_lctlg++; 879 880 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 881 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 882 883 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 884 885 if (ga & 7) 886 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 887 888 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 889 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 890 891 nr_regs = ((reg3 - reg1) & 0xf) + 1; 892 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 893 if (rc) 894 return kvm_s390_inject_prog_cond(vcpu, rc); 895 reg = reg1; 896 nr_regs = 0; 897 do { 898 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; 899 if (reg == reg3) 900 break; 901 reg = (reg + 1) % 16; 902 } while (1); 903 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 904 return 0; 905 } 906 907 static int handle_stctg(struct kvm_vcpu *vcpu) 908 { 909 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 910 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 911 int reg, rc, nr_regs; 912 u64 ctl_array[16]; 913 u64 ga; 914 ar_t ar; 915 916 vcpu->stat.instruction_stctg++; 917 918 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 919 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 920 921 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 922 923 if (ga & 7) 924 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 925 926 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 927 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); 928 929 reg = reg1; 930 nr_regs = 0; 931 do { 932 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 933 if (reg == reg3) 934 break; 935 reg = (reg + 1) % 16; 936 } while (1); 937 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 938 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 939 } 940 941 static const intercept_handler_t eb_handlers[256] = { 942 [0x2f] = handle_lctlg, 943 [0x25] = handle_stctg, 944 }; 945 946 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) 947 { 948 intercept_handler_t handler; 949 950 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; 951 if (handler) 952 return handler(vcpu); 953 return -EOPNOTSUPP; 954 } 955 956 static int handle_tprot(struct kvm_vcpu *vcpu) 957 { 958 u64 address1, address2; 959 unsigned long hva, gpa; 960 int ret = 0, cc = 0; 961 bool writable; 962 ar_t ar; 963 964 vcpu->stat.instruction_tprot++; 965 966 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 967 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 968 969 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); 970 971 /* we only handle the Linux memory detection case: 972 * access key == 0 973 * everything else goes to userspace. */ 974 if (address2 & 0xf0) 975 return -EOPNOTSUPP; 976 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 977 ipte_lock(vcpu); 978 ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE); 979 if (ret == PGM_PROTECTION) { 980 /* Write protected? Try again with read-only... */ 981 cc = 1; 982 ret = guest_translate_address(vcpu, address1, ar, &gpa, 983 GACC_FETCH); 984 } 985 if (ret) { 986 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { 987 ret = kvm_s390_inject_program_int(vcpu, ret); 988 } else if (ret > 0) { 989 /* Translation not available */ 990 kvm_s390_set_psw_cc(vcpu, 3); 991 ret = 0; 992 } 993 goto out_unlock; 994 } 995 996 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); 997 if (kvm_is_error_hva(hva)) { 998 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 999 } else { 1000 if (!writable) 1001 cc = 1; /* Write not permitted ==> read-only */ 1002 kvm_s390_set_psw_cc(vcpu, cc); 1003 /* Note: CC2 only occurs for storage keys (not supported yet) */ 1004 } 1005 out_unlock: 1006 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 1007 ipte_unlock(vcpu); 1008 return ret; 1009 } 1010 1011 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) 1012 { 1013 /* For e5xx... instructions we only handle TPROT */ 1014 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) 1015 return handle_tprot(vcpu); 1016 return -EOPNOTSUPP; 1017 } 1018 1019 static int handle_sckpf(struct kvm_vcpu *vcpu) 1020 { 1021 u32 value; 1022 1023 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1024 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1025 1026 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) 1027 return kvm_s390_inject_program_int(vcpu, 1028 PGM_SPECIFICATION); 1029 1030 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; 1031 vcpu->arch.sie_block->todpr = value; 1032 1033 return 0; 1034 } 1035 1036 static const intercept_handler_t x01_handlers[256] = { 1037 [0x07] = handle_sckpf, 1038 }; 1039 1040 int kvm_s390_handle_01(struct kvm_vcpu *vcpu) 1041 { 1042 intercept_handler_t handler; 1043 1044 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 1045 if (handler) 1046 return handler(vcpu); 1047 return -EOPNOTSUPP; 1048 } 1049