1 /* 2 * handling privileged instructions 3 * 4 * Copyright IBM Corp. 2008, 2013 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 */ 13 14 #include <linux/kvm.h> 15 #include <linux/gfp.h> 16 #include <linux/errno.h> 17 #include <linux/compat.h> 18 #include <asm/asm-offsets.h> 19 #include <asm/current.h> 20 #include <asm/debug.h> 21 #include <asm/ebcdic.h> 22 #include <asm/sysinfo.h> 23 #include <asm/pgtable.h> 24 #include <asm/pgalloc.h> 25 #include <asm/io.h> 26 #include <asm/ptrace.h> 27 #include <asm/compat.h> 28 #include "gaccess.h" 29 #include "kvm-s390.h" 30 #include "trace.h" 31 32 static int handle_set_prefix(struct kvm_vcpu *vcpu) 33 { 34 u64 operand2; 35 u32 address = 0; 36 u8 tmp; 37 38 vcpu->stat.instruction_spx++; 39 40 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 41 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 42 43 operand2 = kvm_s390_get_base_disp_s(vcpu); 44 45 /* must be word boundary */ 46 if (operand2 & 3) 47 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 48 49 /* get the value */ 50 if (get_guest(vcpu, address, (u32 __user *) operand2)) 51 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 52 53 address = address & 0x7fffe000u; 54 55 /* make sure that the new value is valid memory */ 56 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || 57 (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) 58 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 59 60 kvm_s390_set_prefix(vcpu, address); 61 62 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); 63 trace_kvm_s390_handle_prefix(vcpu, 1, address); 64 return 0; 65 } 66 67 static int handle_store_prefix(struct kvm_vcpu *vcpu) 68 { 69 u64 operand2; 70 u32 address; 71 72 vcpu->stat.instruction_stpx++; 73 74 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 75 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 76 77 operand2 = kvm_s390_get_base_disp_s(vcpu); 78 79 /* must be word boundary */ 80 if (operand2 & 3) 81 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 82 83 address = vcpu->arch.sie_block->prefix; 84 address = address & 0x7fffe000u; 85 86 /* get the value */ 87 if (put_guest(vcpu, address, (u32 __user *)operand2)) 88 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 89 90 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); 91 trace_kvm_s390_handle_prefix(vcpu, 0, address); 92 return 0; 93 } 94 95 static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 96 { 97 u64 useraddr; 98 99 vcpu->stat.instruction_stap++; 100 101 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 102 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 103 104 useraddr = kvm_s390_get_base_disp_s(vcpu); 105 106 if (useraddr & 1) 107 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 108 109 if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr)) 110 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 111 112 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr); 113 trace_kvm_s390_handle_stap(vcpu, useraddr); 114 return 0; 115 } 116 117 static int handle_skey(struct kvm_vcpu *vcpu) 118 { 119 vcpu->stat.instruction_storage_key++; 120 121 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 122 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 123 124 vcpu->arch.sie_block->gpsw.addr = 125 __rewind_psw(vcpu->arch.sie_block->gpsw, 4); 126 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 127 return 0; 128 } 129 130 static int handle_tpi(struct kvm_vcpu *vcpu) 131 { 132 struct kvm_s390_interrupt_info *inti; 133 u64 addr; 134 int cc; 135 136 addr = kvm_s390_get_base_disp_s(vcpu); 137 if (addr & 3) 138 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 139 cc = 0; 140 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0); 141 if (!inti) 142 goto no_interrupt; 143 cc = 1; 144 if (addr) { 145 /* 146 * Store the two-word I/O interruption code into the 147 * provided area. 148 */ 149 if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr) 150 || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2)) 151 || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4))) 152 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 153 } else { 154 /* 155 * Store the three-word I/O interruption code into 156 * the appropriate lowcore area. 157 */ 158 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID); 159 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR); 160 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM); 161 put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD); 162 } 163 kfree(inti); 164 no_interrupt: 165 /* Set condition code and we're done. */ 166 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 167 vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44; 168 return 0; 169 } 170 171 static int handle_tsch(struct kvm_vcpu *vcpu) 172 { 173 struct kvm_s390_interrupt_info *inti; 174 175 inti = kvm_s390_get_io_int(vcpu->kvm, 0, 176 vcpu->run->s.regs.gprs[1]); 177 178 /* 179 * Prepare exit to userspace. 180 * We indicate whether we dequeued a pending I/O interrupt 181 * so that userspace can re-inject it if the instruction gets 182 * a program check. While this may re-order the pending I/O 183 * interrupts, this is no problem since the priority is kept 184 * intact. 185 */ 186 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; 187 vcpu->run->s390_tsch.dequeued = !!inti; 188 if (inti) { 189 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; 190 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; 191 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; 192 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; 193 } 194 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; 195 kfree(inti); 196 return -EREMOTE; 197 } 198 199 static int handle_io_inst(struct kvm_vcpu *vcpu) 200 { 201 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); 202 203 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 204 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 205 206 if (vcpu->kvm->arch.css_support) { 207 /* 208 * Most I/O instructions will be handled by userspace. 209 * Exceptions are tpi and the interrupt portion of tsch. 210 */ 211 if (vcpu->arch.sie_block->ipa == 0xb236) 212 return handle_tpi(vcpu); 213 if (vcpu->arch.sie_block->ipa == 0xb235) 214 return handle_tsch(vcpu); 215 /* Handle in userspace. */ 216 return -EOPNOTSUPP; 217 } else { 218 /* 219 * Set condition code 3 to stop the guest from issueing channel 220 * I/O instructions. 221 */ 222 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 223 vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44; 224 return 0; 225 } 226 } 227 228 static int handle_stfl(struct kvm_vcpu *vcpu) 229 { 230 unsigned int facility_list; 231 int rc; 232 233 vcpu->stat.instruction_stfl++; 234 235 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 236 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 237 238 /* only pass the facility bits, which we can handle */ 239 facility_list = S390_lowcore.stfl_fac_list & 0xff82fff3; 240 241 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), 242 &facility_list, sizeof(facility_list)); 243 if (rc) 244 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 245 VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list); 246 trace_kvm_s390_handle_stfl(vcpu, facility_list); 247 return 0; 248 } 249 250 static void handle_new_psw(struct kvm_vcpu *vcpu) 251 { 252 /* Check whether the new psw is enabled for machine checks. */ 253 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK) 254 kvm_s390_deliver_pending_machine_checks(vcpu); 255 } 256 257 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) 258 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL 259 #define PSW_ADDR_24 0x0000000000ffffffUL 260 #define PSW_ADDR_31 0x000000007fffffffUL 261 262 static int is_valid_psw(psw_t *psw) { 263 if (psw->mask & PSW_MASK_UNASSIGNED) 264 return 0; 265 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { 266 if (psw->addr & ~PSW_ADDR_31) 267 return 0; 268 } 269 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) 270 return 0; 271 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) 272 return 0; 273 return 1; 274 } 275 276 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) 277 { 278 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; 279 psw_compat_t new_psw; 280 u64 addr; 281 282 if (gpsw->mask & PSW_MASK_PSTATE) 283 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 284 285 addr = kvm_s390_get_base_disp_s(vcpu); 286 if (addr & 7) 287 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 288 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) 289 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 290 if (!(new_psw.mask & PSW32_MASK_BASE)) 291 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 292 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; 293 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; 294 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; 295 if (!is_valid_psw(gpsw)) 296 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 297 handle_new_psw(vcpu); 298 return 0; 299 } 300 301 static int handle_lpswe(struct kvm_vcpu *vcpu) 302 { 303 psw_t new_psw; 304 u64 addr; 305 306 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 307 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 308 309 addr = kvm_s390_get_base_disp_s(vcpu); 310 if (addr & 7) 311 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 312 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) 313 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 314 vcpu->arch.sie_block->gpsw = new_psw; 315 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) 316 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 317 handle_new_psw(vcpu); 318 return 0; 319 } 320 321 static int handle_stidp(struct kvm_vcpu *vcpu) 322 { 323 u64 operand2; 324 325 vcpu->stat.instruction_stidp++; 326 327 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 328 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 329 330 operand2 = kvm_s390_get_base_disp_s(vcpu); 331 332 if (operand2 & 7) 333 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 334 335 if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2)) 336 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 337 338 VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); 339 return 0; 340 } 341 342 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) 343 { 344 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 345 int cpus = 0; 346 int n; 347 348 spin_lock(&fi->lock); 349 for (n = 0; n < KVM_MAX_VCPUS; n++) 350 if (fi->local_int[n]) 351 cpus++; 352 spin_unlock(&fi->lock); 353 354 /* deal with other level 3 hypervisors */ 355 if (stsi(mem, 3, 2, 2)) 356 mem->count = 0; 357 if (mem->count < 8) 358 mem->count++; 359 for (n = mem->count - 1; n > 0 ; n--) 360 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); 361 362 mem->vm[0].cpus_total = cpus; 363 mem->vm[0].cpus_configured = cpus; 364 mem->vm[0].cpus_standby = 0; 365 mem->vm[0].cpus_reserved = 0; 366 mem->vm[0].caf = 1000; 367 memcpy(mem->vm[0].name, "KVMguest", 8); 368 ASCEBC(mem->vm[0].name, 8); 369 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); 370 ASCEBC(mem->vm[0].cpi, 16); 371 } 372 373 static int handle_stsi(struct kvm_vcpu *vcpu) 374 { 375 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; 376 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; 377 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; 378 unsigned long mem = 0; 379 u64 operand2; 380 int rc = 0; 381 382 vcpu->stat.instruction_stsi++; 383 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 384 385 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 386 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 387 388 if (fc > 3) { 389 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; /* cc 3 */ 390 return 0; 391 } 392 393 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 394 || vcpu->run->s.regs.gprs[1] & 0xffff0000) 395 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 396 397 if (fc == 0) { 398 vcpu->run->s.regs.gprs[0] = 3 << 28; 399 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); /* cc 0 */ 400 return 0; 401 } 402 403 operand2 = kvm_s390_get_base_disp_s(vcpu); 404 405 if (operand2 & 0xfff) 406 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 407 408 switch (fc) { 409 case 1: /* same handling for 1 and 2 */ 410 case 2: 411 mem = get_zeroed_page(GFP_KERNEL); 412 if (!mem) 413 goto out_no_data; 414 if (stsi((void *) mem, fc, sel1, sel2)) 415 goto out_no_data; 416 break; 417 case 3: 418 if (sel1 != 2 || sel2 != 2) 419 goto out_no_data; 420 mem = get_zeroed_page(GFP_KERNEL); 421 if (!mem) 422 goto out_no_data; 423 handle_stsi_3_2_2(vcpu, (void *) mem); 424 break; 425 } 426 427 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { 428 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 429 goto out_exception; 430 } 431 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 432 free_page(mem); 433 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 434 vcpu->run->s.regs.gprs[0] = 0; 435 return 0; 436 out_no_data: 437 /* condition code 3 */ 438 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; 439 out_exception: 440 free_page(mem); 441 return rc; 442 } 443 444 static const intercept_handler_t b2_handlers[256] = { 445 [0x02] = handle_stidp, 446 [0x10] = handle_set_prefix, 447 [0x11] = handle_store_prefix, 448 [0x12] = handle_store_cpu_address, 449 [0x29] = handle_skey, 450 [0x2a] = handle_skey, 451 [0x2b] = handle_skey, 452 [0x30] = handle_io_inst, 453 [0x31] = handle_io_inst, 454 [0x32] = handle_io_inst, 455 [0x33] = handle_io_inst, 456 [0x34] = handle_io_inst, 457 [0x35] = handle_io_inst, 458 [0x36] = handle_io_inst, 459 [0x37] = handle_io_inst, 460 [0x38] = handle_io_inst, 461 [0x39] = handle_io_inst, 462 [0x3a] = handle_io_inst, 463 [0x3b] = handle_io_inst, 464 [0x3c] = handle_io_inst, 465 [0x5f] = handle_io_inst, 466 [0x74] = handle_io_inst, 467 [0x76] = handle_io_inst, 468 [0x7d] = handle_stsi, 469 [0xb1] = handle_stfl, 470 [0xb2] = handle_lpswe, 471 }; 472 473 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) 474 { 475 intercept_handler_t handler; 476 477 /* 478 * A lot of B2 instructions are priviledged. Here we check for 479 * the privileged ones, that we can handle in the kernel. 480 * Anything else goes to userspace. 481 */ 482 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 483 if (handler) 484 return handler(vcpu); 485 486 return -EOPNOTSUPP; 487 } 488 489 static int handle_epsw(struct kvm_vcpu *vcpu) 490 { 491 int reg1, reg2; 492 493 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 494 495 /* This basically extracts the mask half of the psw. */ 496 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000; 497 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; 498 if (reg2) { 499 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000; 500 vcpu->run->s.regs.gprs[reg2] |= 501 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff; 502 } 503 return 0; 504 } 505 506 #define PFMF_RESERVED 0xfffc0101UL 507 #define PFMF_SK 0x00020000UL 508 #define PFMF_CF 0x00010000UL 509 #define PFMF_UI 0x00008000UL 510 #define PFMF_FSC 0x00007000UL 511 #define PFMF_NQ 0x00000800UL 512 #define PFMF_MR 0x00000400UL 513 #define PFMF_MC 0x00000200UL 514 #define PFMF_KEY 0x000000feUL 515 516 static int handle_pfmf(struct kvm_vcpu *vcpu) 517 { 518 int reg1, reg2; 519 unsigned long start, end; 520 521 vcpu->stat.instruction_pfmf++; 522 523 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 524 525 if (!MACHINE_HAS_PFMF) 526 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 527 528 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 529 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 530 531 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) 532 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 533 534 /* Only provide non-quiescing support if the host supports it */ 535 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && 536 S390_lowcore.stfl_fac_list & 0x00020000) 537 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 538 539 /* No support for conditional-SSKE */ 540 if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC)) 541 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 542 543 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 544 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 545 case 0x00000000: 546 end = (start + (1UL << 12)) & ~((1UL << 12) - 1); 547 break; 548 case 0x00001000: 549 end = (start + (1UL << 20)) & ~((1UL << 20) - 1); 550 break; 551 /* We dont support EDAT2 552 case 0x00002000: 553 end = (start + (1UL << 31)) & ~((1UL << 31) - 1); 554 break;*/ 555 default: 556 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 557 } 558 while (start < end) { 559 unsigned long useraddr; 560 561 useraddr = gmap_translate(start, vcpu->arch.gmap); 562 if (IS_ERR((void *)useraddr)) 563 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 564 565 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 566 if (clear_user((void __user *)useraddr, PAGE_SIZE)) 567 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 568 } 569 570 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { 571 if (set_guest_storage_key(current->mm, useraddr, 572 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, 573 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) 574 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 575 } 576 577 start += PAGE_SIZE; 578 } 579 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) 580 vcpu->run->s.regs.gprs[reg2] = end; 581 return 0; 582 } 583 584 static const intercept_handler_t b9_handlers[256] = { 585 [0x8d] = handle_epsw, 586 [0x9c] = handle_io_inst, 587 [0xaf] = handle_pfmf, 588 }; 589 590 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) 591 { 592 intercept_handler_t handler; 593 594 /* This is handled just as for the B2 instructions. */ 595 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 596 if (handler) 597 return handler(vcpu); 598 599 return -EOPNOTSUPP; 600 } 601 602 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) 603 { 604 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 605 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 606 u64 useraddr; 607 u32 val = 0; 608 int reg, rc; 609 610 vcpu->stat.instruction_lctl++; 611 612 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 613 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 614 615 useraddr = kvm_s390_get_base_disp_rs(vcpu); 616 617 if (useraddr & 3) 618 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 619 620 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, 621 useraddr); 622 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr); 623 624 reg = reg1; 625 do { 626 rc = get_guest(vcpu, val, (u32 __user *) useraddr); 627 if (rc) 628 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 629 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; 630 vcpu->arch.sie_block->gcr[reg] |= val; 631 useraddr += 4; 632 if (reg == reg3) 633 break; 634 reg = (reg + 1) % 16; 635 } while (1); 636 637 return 0; 638 } 639 640 static int handle_lctlg(struct kvm_vcpu *vcpu) 641 { 642 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 643 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 644 u64 useraddr; 645 int reg, rc; 646 647 vcpu->stat.instruction_lctlg++; 648 649 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 650 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 651 652 useraddr = kvm_s390_get_base_disp_rsy(vcpu); 653 654 if (useraddr & 7) 655 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 656 657 reg = reg1; 658 659 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, 660 useraddr); 661 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr); 662 663 do { 664 rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg], 665 (u64 __user *) useraddr); 666 if (rc) 667 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 668 useraddr += 8; 669 if (reg == reg3) 670 break; 671 reg = (reg + 1) % 16; 672 } while (1); 673 674 return 0; 675 } 676 677 static const intercept_handler_t eb_handlers[256] = { 678 [0x2f] = handle_lctlg, 679 [0x8a] = handle_io_inst, 680 }; 681 682 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) 683 { 684 intercept_handler_t handler; 685 686 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; 687 if (handler) 688 return handler(vcpu); 689 return -EOPNOTSUPP; 690 } 691 692 static int handle_tprot(struct kvm_vcpu *vcpu) 693 { 694 u64 address1, address2; 695 struct vm_area_struct *vma; 696 unsigned long user_address; 697 698 vcpu->stat.instruction_tprot++; 699 700 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 701 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 702 703 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); 704 705 /* we only handle the Linux memory detection case: 706 * access key == 0 707 * guest DAT == off 708 * everything else goes to userspace. */ 709 if (address2 & 0xf0) 710 return -EOPNOTSUPP; 711 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 712 return -EOPNOTSUPP; 713 714 down_read(¤t->mm->mmap_sem); 715 user_address = __gmap_translate(address1, vcpu->arch.gmap); 716 if (IS_ERR_VALUE(user_address)) 717 goto out_inject; 718 vma = find_vma(current->mm, user_address); 719 if (!vma) 720 goto out_inject; 721 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 722 if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ)) 723 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44); 724 if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ)) 725 vcpu->arch.sie_block->gpsw.mask |= (2ul << 44); 726 727 up_read(¤t->mm->mmap_sem); 728 return 0; 729 730 out_inject: 731 up_read(¤t->mm->mmap_sem); 732 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 733 } 734 735 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) 736 { 737 /* For e5xx... instructions we only handle TPROT */ 738 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) 739 return handle_tprot(vcpu); 740 return -EOPNOTSUPP; 741 } 742 743 static int handle_sckpf(struct kvm_vcpu *vcpu) 744 { 745 u32 value; 746 747 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 748 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 749 750 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) 751 return kvm_s390_inject_program_int(vcpu, 752 PGM_SPECIFICATION); 753 754 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; 755 vcpu->arch.sie_block->todpr = value; 756 757 return 0; 758 } 759 760 static const intercept_handler_t x01_handlers[256] = { 761 [0x07] = handle_sckpf, 762 }; 763 764 int kvm_s390_handle_01(struct kvm_vcpu *vcpu) 765 { 766 intercept_handler_t handler; 767 768 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 769 if (handler) 770 return handler(vcpu); 771 return -EOPNOTSUPP; 772 } 773