1 /* 2 * S/390 misc helper routines 3 * 4 * Copyright (c) 2009 Ulrich Hecht 5 * Copyright (c) 2009 Alexander Graf 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qemu/cutils.h" 23 #include "qemu/main-loop.h" 24 #include "cpu.h" 25 #include "s390x-internal.h" 26 #include "exec/memory.h" 27 #include "qemu/host-utils.h" 28 #include "exec/helper-proto.h" 29 #include "qemu/timer.h" 30 #include "exec/exec-all.h" 31 #include "exec/cpu_ldst.h" 32 #include "qapi/error.h" 33 #include "tcg_s390x.h" 34 #include "s390-tod.h" 35 36 #if !defined(CONFIG_USER_ONLY) 37 #include "sysemu/cpus.h" 38 #include "sysemu/sysemu.h" 39 #include "hw/s390x/ebcdic.h" 40 #include "hw/s390x/s390-virtio-hcall.h" 41 #include "hw/s390x/sclp.h" 42 #include "hw/s390x/s390_flic.h" 43 #include "hw/s390x/ioinst.h" 44 #include "hw/s390x/s390-pci-inst.h" 45 #include "hw/boards.h" 46 #include "hw/s390x/tod.h" 47 #endif 48 49 /* #define DEBUG_HELPER */ 50 #ifdef DEBUG_HELPER 51 #define HELPER_LOG(x...) qemu_log(x) 52 #else 53 #define HELPER_LOG(x...) 54 #endif 55 56 /* Raise an exception statically from a TB. */ 57 void HELPER(exception)(CPUS390XState *env, uint32_t excp) 58 { 59 CPUState *cs = env_cpu(env); 60 61 HELPER_LOG("%s: exception %d\n", __func__, excp); 62 cs->exception_index = excp; 63 cpu_loop_exit(cs); 64 } 65 66 /* Store CPU Timer (also used for EXTRACT CPU TIME) */ 67 uint64_t HELPER(stpt)(CPUS390XState *env) 68 { 69 #if defined(CONFIG_USER_ONLY) 70 /* 71 * Fake a descending CPU timer. We could get negative values here, 72 * but we don't care as it is up to the OS when to process that 73 * interrupt and reset to > 0. 74 */ 75 return UINT64_MAX - (uint64_t)cpu_get_host_ticks(); 76 #else 77 return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 78 #endif 79 } 80 81 /* Store Clock */ 82 uint64_t HELPER(stck)(CPUS390XState *env) 83 { 84 #ifdef CONFIG_USER_ONLY 85 struct timespec ts; 86 uint64_t ns; 87 88 clock_gettime(CLOCK_REALTIME, &ts); 89 ns = ts.tv_sec * NANOSECONDS_PER_SECOND + ts.tv_nsec; 90 91 return TOD_UNIX_EPOCH + time2tod(ns); 92 #else 93 S390TODState *td = s390_get_todstate(); 94 S390TODClass *tdc = S390_TOD_GET_CLASS(td); 95 S390TOD tod; 96 97 tdc->get(td, &tod, &error_abort); 98 return tod.low; 99 #endif 100 } 101 102 #ifndef CONFIG_USER_ONLY 103 /* SCLP service call */ 104 uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2) 105 { 106 qemu_mutex_lock_iothread(); 107 int r = sclp_service_call(env, r1, r2); 108 qemu_mutex_unlock_iothread(); 109 if (r < 0) { 110 tcg_s390_program_interrupt(env, -r, GETPC()); 111 } 112 return r; 113 } 114 115 void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num) 116 { 117 uint64_t r; 118 119 switch (num) { 120 case 0x500: 121 /* KVM hypercall */ 122 qemu_mutex_lock_iothread(); 123 r = s390_virtio_hypercall(env); 124 qemu_mutex_unlock_iothread(); 125 break; 126 case 0x44: 127 /* yield */ 128 r = 0; 129 break; 130 case 0x308: 131 /* ipl */ 132 qemu_mutex_lock_iothread(); 133 handle_diag_308(env, r1, r3, GETPC()); 134 qemu_mutex_unlock_iothread(); 135 r = 0; 136 break; 137 case 0x288: 138 /* time bomb (watchdog) */ 139 r = handle_diag_288(env, r1, r3); 140 break; 141 default: 142 r = -1; 143 break; 144 } 145 146 if (r) { 147 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); 148 } 149 } 150 151 /* Set Prefix */ 152 void HELPER(spx)(CPUS390XState *env, uint64_t a1) 153 { 154 CPUState *cs = env_cpu(env); 155 uint32_t prefix = a1 & 0x7fffe000; 156 157 env->psa = prefix; 158 HELPER_LOG("prefix: %#x\n", prefix); 159 tlb_flush_page(cs, 0); 160 tlb_flush_page(cs, TARGET_PAGE_SIZE); 161 } 162 163 static void update_ckc_timer(CPUS390XState *env) 164 { 165 S390TODState *td = s390_get_todstate(); 166 uint64_t time; 167 168 /* stop the timer and remove pending CKC IRQs */ 169 timer_del(env->tod_timer); 170 g_assert(qemu_mutex_iothread_locked()); 171 env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR; 172 173 /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */ 174 if (env->ckc == -1ULL) { 175 return; 176 } 177 178 /* difference between origins */ 179 time = env->ckc - td->base.low; 180 181 /* nanoseconds */ 182 time = tod2time(time); 183 184 timer_mod(env->tod_timer, time); 185 } 186 187 /* Set Clock Comparator */ 188 void HELPER(sckc)(CPUS390XState *env, uint64_t ckc) 189 { 190 env->ckc = ckc; 191 192 qemu_mutex_lock_iothread(); 193 update_ckc_timer(env); 194 qemu_mutex_unlock_iothread(); 195 } 196 197 void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque) 198 { 199 S390CPU *cpu = S390_CPU(cs); 200 201 update_ckc_timer(&cpu->env); 202 } 203 204 /* Set Clock */ 205 uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low) 206 { 207 S390TODState *td = s390_get_todstate(); 208 S390TODClass *tdc = S390_TOD_GET_CLASS(td); 209 S390TOD tod = { 210 .high = 0, 211 .low = tod_low, 212 }; 213 214 qemu_mutex_lock_iothread(); 215 tdc->set(td, &tod, &error_abort); 216 qemu_mutex_unlock_iothread(); 217 return 0; 218 } 219 220 /* Set Tod Programmable Field */ 221 void HELPER(sckpf)(CPUS390XState *env, uint64_t r0) 222 { 223 uint32_t val = r0; 224 225 if (val & 0xffff0000) { 226 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); 227 } 228 env->todpr = val; 229 } 230 231 /* Store Clock Comparator */ 232 uint64_t HELPER(stckc)(CPUS390XState *env) 233 { 234 return env->ckc; 235 } 236 237 /* Set CPU Timer */ 238 void HELPER(spt)(CPUS390XState *env, uint64_t time) 239 { 240 if (time == -1ULL) { 241 return; 242 } 243 244 /* nanoseconds */ 245 time = tod2time(time); 246 247 env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time; 248 249 timer_mod(env->cpu_timer, env->cputm); 250 } 251 252 /* Store System Information */ 253 uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1) 254 { 255 const uintptr_t ra = GETPC(); 256 const uint32_t sel1 = r0 & STSI_R0_SEL1_MASK; 257 const uint32_t sel2 = r1 & STSI_R1_SEL2_MASK; 258 const MachineState *ms = MACHINE(qdev_get_machine()); 259 uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0; 260 S390CPU *cpu = env_archcpu(env); 261 SysIB sysib = { }; 262 int i, cc = 0; 263 264 if ((r0 & STSI_R0_FC_MASK) > STSI_R0_FC_LEVEL_3) { 265 /* invalid function code: no other checks are performed */ 266 return 3; 267 } 268 269 if ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK)) { 270 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); 271 } 272 273 if ((r0 & STSI_R0_FC_MASK) == STSI_R0_FC_CURRENT) { 274 /* query the current level: no further checks are performed */ 275 env->regs[0] = STSI_R0_FC_LEVEL_3; 276 return 0; 277 } 278 279 if (a0 & ~TARGET_PAGE_MASK) { 280 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); 281 } 282 283 /* count the cpus and split them into configured and reserved ones */ 284 for (i = 0; i < ms->possible_cpus->len; i++) { 285 total_cpus++; 286 if (ms->possible_cpus->cpus[i].cpu) { 287 conf_cpus++; 288 } else { 289 reserved_cpus++; 290 } 291 } 292 293 /* 294 * In theory, we could report Level 1 / Level 2 as current. However, 295 * the Linux kernel will detect this as running under LPAR and assume 296 * that we have a sclp linemode console (which is always present on 297 * LPAR, but not the default for QEMU), therefore not displaying boot 298 * messages and making booting a Linux kernel under TCG harder. 299 * 300 * For now we fake the same SMP configuration on all levels. 301 * 302 * TODO: We could later make the level configurable via the machine 303 * and change defaults (linemode console) based on machine type 304 * and accelerator. 305 */ 306 switch (r0 & STSI_R0_FC_MASK) { 307 case STSI_R0_FC_LEVEL_1: 308 if ((sel1 == 1) && (sel2 == 1)) { 309 /* Basic Machine Configuration */ 310 char type[5] = {}; 311 312 ebcdic_put(sysib.sysib_111.manuf, "QEMU ", 16); 313 /* same as machine type number in STORE CPU ID, but in EBCDIC */ 314 snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type); 315 ebcdic_put(sysib.sysib_111.type, type, 4); 316 /* model number (not stored in STORE CPU ID for z/Architecure) */ 317 ebcdic_put(sysib.sysib_111.model, "QEMU ", 16); 318 ebcdic_put(sysib.sysib_111.sequence, "QEMU ", 16); 319 ebcdic_put(sysib.sysib_111.plant, "QEMU", 4); 320 } else if ((sel1 == 2) && (sel2 == 1)) { 321 /* Basic Machine CPU */ 322 ebcdic_put(sysib.sysib_121.sequence, "QEMUQEMUQEMUQEMU", 16); 323 ebcdic_put(sysib.sysib_121.plant, "QEMU", 4); 324 sysib.sysib_121.cpu_addr = cpu_to_be16(env->core_id); 325 } else if ((sel1 == 2) && (sel2 == 2)) { 326 /* Basic Machine CPUs */ 327 sysib.sysib_122.capability = cpu_to_be32(0x443afc29); 328 sysib.sysib_122.total_cpus = cpu_to_be16(total_cpus); 329 sysib.sysib_122.conf_cpus = cpu_to_be16(conf_cpus); 330 sysib.sysib_122.reserved_cpus = cpu_to_be16(reserved_cpus); 331 } else { 332 cc = 3; 333 } 334 break; 335 case STSI_R0_FC_LEVEL_2: 336 if ((sel1 == 2) && (sel2 == 1)) { 337 /* LPAR CPU */ 338 ebcdic_put(sysib.sysib_221.sequence, "QEMUQEMUQEMUQEMU", 16); 339 ebcdic_put(sysib.sysib_221.plant, "QEMU", 4); 340 sysib.sysib_221.cpu_addr = cpu_to_be16(env->core_id); 341 } else if ((sel1 == 2) && (sel2 == 2)) { 342 /* LPAR CPUs */ 343 sysib.sysib_222.lcpuc = 0x80; /* dedicated */ 344 sysib.sysib_222.total_cpus = cpu_to_be16(total_cpus); 345 sysib.sysib_222.conf_cpus = cpu_to_be16(conf_cpus); 346 sysib.sysib_222.reserved_cpus = cpu_to_be16(reserved_cpus); 347 ebcdic_put(sysib.sysib_222.name, "QEMU ", 8); 348 sysib.sysib_222.caf = cpu_to_be32(1000); 349 sysib.sysib_222.dedicated_cpus = cpu_to_be16(conf_cpus); 350 } else { 351 cc = 3; 352 } 353 break; 354 case STSI_R0_FC_LEVEL_3: 355 if ((sel1 == 2) && (sel2 == 2)) { 356 /* VM CPUs */ 357 sysib.sysib_322.count = 1; 358 sysib.sysib_322.vm[0].total_cpus = cpu_to_be16(total_cpus); 359 sysib.sysib_322.vm[0].conf_cpus = cpu_to_be16(conf_cpus); 360 sysib.sysib_322.vm[0].reserved_cpus = cpu_to_be16(reserved_cpus); 361 sysib.sysib_322.vm[0].caf = cpu_to_be32(1000); 362 /* Linux kernel uses this to distinguish us from z/VM */ 363 ebcdic_put(sysib.sysib_322.vm[0].cpi, "KVM/Linux ", 16); 364 sysib.sysib_322.vm[0].ext_name_encoding = 2; /* UTF-8 */ 365 366 /* If our VM has a name, use the real name */ 367 if (qemu_name) { 368 memset(sysib.sysib_322.vm[0].name, 0x40, 369 sizeof(sysib.sysib_322.vm[0].name)); 370 ebcdic_put(sysib.sysib_322.vm[0].name, qemu_name, 371 MIN(sizeof(sysib.sysib_322.vm[0].name), 372 strlen(qemu_name))); 373 strpadcpy((char *)sysib.sysib_322.ext_names[0], 374 sizeof(sysib.sysib_322.ext_names[0]), 375 qemu_name, '\0'); 376 377 } else { 378 ebcdic_put(sysib.sysib_322.vm[0].name, "TCGguest", 8); 379 strcpy((char *)sysib.sysib_322.ext_names[0], "TCGguest"); 380 } 381 382 /* add the uuid */ 383 memcpy(sysib.sysib_322.vm[0].uuid, &qemu_uuid, 384 sizeof(sysib.sysib_322.vm[0].uuid)); 385 } else { 386 cc = 3; 387 } 388 break; 389 } 390 391 if (cc == 0) { 392 if (s390_cpu_virt_mem_write(cpu, a0, 0, &sysib, sizeof(sysib))) { 393 s390_cpu_virt_mem_handle_exc(cpu, ra); 394 } 395 } 396 397 return cc; 398 } 399 400 uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1, 401 uint32_t r3) 402 { 403 int cc; 404 405 /* TODO: needed to inject interrupts - push further down */ 406 qemu_mutex_lock_iothread(); 407 cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3); 408 qemu_mutex_unlock_iothread(); 409 410 return cc; 411 } 412 #endif 413 414 #ifndef CONFIG_USER_ONLY 415 void HELPER(xsch)(CPUS390XState *env, uint64_t r1) 416 { 417 S390CPU *cpu = env_archcpu(env); 418 qemu_mutex_lock_iothread(); 419 ioinst_handle_xsch(cpu, r1, GETPC()); 420 qemu_mutex_unlock_iothread(); 421 } 422 423 void HELPER(csch)(CPUS390XState *env, uint64_t r1) 424 { 425 S390CPU *cpu = env_archcpu(env); 426 qemu_mutex_lock_iothread(); 427 ioinst_handle_csch(cpu, r1, GETPC()); 428 qemu_mutex_unlock_iothread(); 429 } 430 431 void HELPER(hsch)(CPUS390XState *env, uint64_t r1) 432 { 433 S390CPU *cpu = env_archcpu(env); 434 qemu_mutex_lock_iothread(); 435 ioinst_handle_hsch(cpu, r1, GETPC()); 436 qemu_mutex_unlock_iothread(); 437 } 438 439 void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst) 440 { 441 S390CPU *cpu = env_archcpu(env); 442 qemu_mutex_lock_iothread(); 443 ioinst_handle_msch(cpu, r1, inst >> 16, GETPC()); 444 qemu_mutex_unlock_iothread(); 445 } 446 447 void HELPER(rchp)(CPUS390XState *env, uint64_t r1) 448 { 449 S390CPU *cpu = env_archcpu(env); 450 qemu_mutex_lock_iothread(); 451 ioinst_handle_rchp(cpu, r1, GETPC()); 452 qemu_mutex_unlock_iothread(); 453 } 454 455 void HELPER(rsch)(CPUS390XState *env, uint64_t r1) 456 { 457 S390CPU *cpu = env_archcpu(env); 458 qemu_mutex_lock_iothread(); 459 ioinst_handle_rsch(cpu, r1, GETPC()); 460 qemu_mutex_unlock_iothread(); 461 } 462 463 void HELPER(sal)(CPUS390XState *env, uint64_t r1) 464 { 465 S390CPU *cpu = env_archcpu(env); 466 467 qemu_mutex_lock_iothread(); 468 ioinst_handle_sal(cpu, r1, GETPC()); 469 qemu_mutex_unlock_iothread(); 470 } 471 472 void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst) 473 { 474 S390CPU *cpu = env_archcpu(env); 475 476 qemu_mutex_lock_iothread(); 477 ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC()); 478 qemu_mutex_unlock_iothread(); 479 } 480 481 void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst) 482 { 483 S390CPU *cpu = env_archcpu(env); 484 qemu_mutex_lock_iothread(); 485 ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC()); 486 qemu_mutex_unlock_iothread(); 487 } 488 489 void HELPER(stcrw)(CPUS390XState *env, uint64_t inst) 490 { 491 S390CPU *cpu = env_archcpu(env); 492 493 qemu_mutex_lock_iothread(); 494 ioinst_handle_stcrw(cpu, inst >> 16, GETPC()); 495 qemu_mutex_unlock_iothread(); 496 } 497 498 void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) 499 { 500 S390CPU *cpu = env_archcpu(env); 501 qemu_mutex_lock_iothread(); 502 ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC()); 503 qemu_mutex_unlock_iothread(); 504 } 505 506 uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) 507 { 508 const uintptr_t ra = GETPC(); 509 S390CPU *cpu = env_archcpu(env); 510 QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic()); 511 QEMUS390FlicIO *io = NULL; 512 LowCore *lowcore; 513 514 if (addr & 0x3) { 515 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); 516 } 517 518 qemu_mutex_lock_iothread(); 519 io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]); 520 if (!io) { 521 qemu_mutex_unlock_iothread(); 522 return 0; 523 } 524 525 if (addr) { 526 struct { 527 uint16_t id; 528 uint16_t nr; 529 uint32_t parm; 530 } intc = { 531 .id = cpu_to_be16(io->id), 532 .nr = cpu_to_be16(io->nr), 533 .parm = cpu_to_be32(io->parm), 534 }; 535 536 if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) { 537 /* writing failed, reinject and properly clean up */ 538 s390_io_interrupt(io->id, io->nr, io->parm, io->word); 539 qemu_mutex_unlock_iothread(); 540 g_free(io); 541 s390_cpu_virt_mem_handle_exc(cpu, ra); 542 return 0; 543 } 544 } else { 545 /* no protection applies */ 546 lowcore = cpu_map_lowcore(env); 547 lowcore->subchannel_id = cpu_to_be16(io->id); 548 lowcore->subchannel_nr = cpu_to_be16(io->nr); 549 lowcore->io_int_parm = cpu_to_be32(io->parm); 550 lowcore->io_int_word = cpu_to_be32(io->word); 551 cpu_unmap_lowcore(lowcore); 552 } 553 554 g_free(io); 555 qemu_mutex_unlock_iothread(); 556 return 1; 557 } 558 559 void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) 560 { 561 S390CPU *cpu = env_archcpu(env); 562 qemu_mutex_lock_iothread(); 563 ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC()); 564 qemu_mutex_unlock_iothread(); 565 } 566 567 void HELPER(chsc)(CPUS390XState *env, uint64_t inst) 568 { 569 S390CPU *cpu = env_archcpu(env); 570 qemu_mutex_lock_iothread(); 571 ioinst_handle_chsc(cpu, inst >> 16, GETPC()); 572 qemu_mutex_unlock_iothread(); 573 } 574 #endif 575 576 #ifndef CONFIG_USER_ONLY 577 void HELPER(per_check_exception)(CPUS390XState *env) 578 { 579 if (env->per_perc_atmid) { 580 tcg_s390_program_interrupt(env, PGM_PER, GETPC()); 581 } 582 } 583 584 /* Check if an address is within the PER starting address and the PER 585 ending address. The address range might loop. */ 586 static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr) 587 { 588 if (env->cregs[10] <= env->cregs[11]) { 589 return env->cregs[10] <= addr && addr <= env->cregs[11]; 590 } else { 591 return env->cregs[10] <= addr || addr <= env->cregs[11]; 592 } 593 } 594 595 void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to) 596 { 597 if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) { 598 if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS) 599 || get_per_in_range(env, to)) { 600 env->per_address = from; 601 env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env); 602 } 603 } 604 } 605 606 void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr) 607 { 608 if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) { 609 env->per_address = addr; 610 env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env); 611 612 /* If the instruction has to be nullified, trigger the 613 exception immediately. */ 614 if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) { 615 CPUState *cs = env_cpu(env); 616 617 env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION; 618 env->int_pgm_code = PGM_PER; 619 env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr)); 620 621 cs->exception_index = EXCP_PGM; 622 cpu_loop_exit(cs); 623 } 624 } 625 } 626 627 void HELPER(per_store_real)(CPUS390XState *env) 628 { 629 if ((env->cregs[9] & PER_CR9_EVENT_STORE) && 630 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) { 631 /* PSW is saved just before calling the helper. */ 632 env->per_address = env->psw.addr; 633 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env); 634 } 635 } 636 #endif 637 638 static uint8_t stfl_bytes[2048]; 639 static unsigned int used_stfl_bytes; 640 641 static void prepare_stfl(void) 642 { 643 static bool initialized; 644 int i; 645 646 /* racy, but we don't care, the same values are always written */ 647 if (initialized) { 648 return; 649 } 650 651 s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes); 652 for (i = 0; i < sizeof(stfl_bytes); i++) { 653 if (stfl_bytes[i]) { 654 used_stfl_bytes = i + 1; 655 } 656 } 657 initialized = true; 658 } 659 660 #ifndef CONFIG_USER_ONLY 661 void HELPER(stfl)(CPUS390XState *env) 662 { 663 LowCore *lowcore; 664 665 lowcore = cpu_map_lowcore(env); 666 prepare_stfl(); 667 memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list)); 668 cpu_unmap_lowcore(lowcore); 669 } 670 #endif 671 672 uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr) 673 { 674 const uintptr_t ra = GETPC(); 675 const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8; 676 int max_bytes; 677 int i; 678 679 if (addr & 0x7) { 680 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); 681 } 682 683 prepare_stfl(); 684 max_bytes = ROUND_UP(used_stfl_bytes, 8); 685 686 /* 687 * The PoP says that doublewords beyond the highest-numbered facility 688 * bit may or may not be stored. However, existing hardware appears to 689 * not store the words, and existing software depend on that. 690 */ 691 for (i = 0; i < MIN(count_bytes, max_bytes); ++i) { 692 cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra); 693 } 694 695 env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1); 696 return count_bytes >= max_bytes ? 0 : 3; 697 } 698 699 #ifndef CONFIG_USER_ONLY 700 /* 701 * Note: we ignore any return code of the functions called for the pci 702 * instructions, as the only time they return !0 is when the stub is 703 * called, and in that case we didn't even offer the zpci facility. 704 * The only exception is SIC, where program checks need to be handled 705 * by the caller. 706 */ 707 void HELPER(clp)(CPUS390XState *env, uint32_t r2) 708 { 709 S390CPU *cpu = env_archcpu(env); 710 711 qemu_mutex_lock_iothread(); 712 clp_service_call(cpu, r2, GETPC()); 713 qemu_mutex_unlock_iothread(); 714 } 715 716 void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2) 717 { 718 S390CPU *cpu = env_archcpu(env); 719 720 qemu_mutex_lock_iothread(); 721 pcilg_service_call(cpu, r1, r2, GETPC()); 722 qemu_mutex_unlock_iothread(); 723 } 724 725 void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2) 726 { 727 S390CPU *cpu = env_archcpu(env); 728 729 qemu_mutex_lock_iothread(); 730 pcistg_service_call(cpu, r1, r2, GETPC()); 731 qemu_mutex_unlock_iothread(); 732 } 733 734 void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, 735 uint32_t ar) 736 { 737 S390CPU *cpu = env_archcpu(env); 738 739 qemu_mutex_lock_iothread(); 740 stpcifc_service_call(cpu, r1, fiba, ar, GETPC()); 741 qemu_mutex_unlock_iothread(); 742 } 743 744 void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3) 745 { 746 int r; 747 748 qemu_mutex_lock_iothread(); 749 r = css_do_sic(env, (r3 >> 27) & 0x7, r1 & 0xffff); 750 qemu_mutex_unlock_iothread(); 751 /* css_do_sic() may actually return a PGM_xxx value to inject */ 752 if (r) { 753 tcg_s390_program_interrupt(env, -r, GETPC()); 754 } 755 } 756 757 void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2) 758 { 759 S390CPU *cpu = env_archcpu(env); 760 761 qemu_mutex_lock_iothread(); 762 rpcit_service_call(cpu, r1, r2, GETPC()); 763 qemu_mutex_unlock_iothread(); 764 } 765 766 void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3, 767 uint64_t gaddr, uint32_t ar) 768 { 769 S390CPU *cpu = env_archcpu(env); 770 771 qemu_mutex_lock_iothread(); 772 pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC()); 773 qemu_mutex_unlock_iothread(); 774 } 775 776 void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, 777 uint32_t ar) 778 { 779 S390CPU *cpu = env_archcpu(env); 780 781 qemu_mutex_lock_iothread(); 782 mpcifc_service_call(cpu, r1, fiba, ar, GETPC()); 783 qemu_mutex_unlock_iothread(); 784 } 785 #endif 786