1 /* 2 * S/390 virtual CPU header 3 * 4 * Copyright (c) 2009 Ulrich Hecht 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * Contributions after 2012-10-29 are licensed under the terms of the 17 * GNU GPL, version 2 or (at your option) any later version. 18 * 19 * You should have received a copy of the GNU (Lesser) General Public 20 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 21 */ 22 23 #ifndef S390X_CPU_H 24 #define S390X_CPU_H 25 26 #include "qemu-common.h" 27 #include "cpu-qom.h" 28 29 #define TARGET_LONG_BITS 64 30 31 #define ELF_MACHINE_UNAME "S390X" 32 33 #define CPUArchState struct CPUS390XState 34 35 #include "exec/cpu-defs.h" 36 #define TARGET_PAGE_BITS 12 37 38 #define TARGET_PHYS_ADDR_SPACE_BITS 64 39 #define TARGET_VIRT_ADDR_SPACE_BITS 64 40 41 #include "exec/cpu-all.h" 42 43 #include "fpu/softfloat.h" 44 45 #define NB_MMU_MODES 3 46 #define TARGET_INSN_START_EXTRA_WORDS 1 47 48 #define MMU_MODE0_SUFFIX _primary 49 #define MMU_MODE1_SUFFIX _secondary 50 #define MMU_MODE2_SUFFIX _home 51 52 #define MMU_USER_IDX 0 53 54 #define MAX_EXT_QUEUE 16 55 #define MAX_IO_QUEUE 16 56 #define MAX_MCHK_QUEUE 16 57 58 #define PSW_MCHK_MASK 0x0004000000000000 59 #define PSW_IO_MASK 0x0200000000000000 60 61 typedef struct PSW { 62 uint64_t mask; 63 uint64_t addr; 64 } PSW; 65 66 typedef struct ExtQueue { 67 uint32_t code; 68 uint32_t param; 69 uint32_t param64; 70 } ExtQueue; 71 72 typedef struct IOIntQueue { 73 uint16_t id; 74 uint16_t nr; 75 uint32_t parm; 76 uint32_t word; 77 } IOIntQueue; 78 79 typedef struct MchkQueue { 80 uint16_t type; 81 } MchkQueue; 82 83 typedef struct CPUS390XState { 84 uint64_t regs[16]; /* GP registers */ 85 /* 86 * The floating point registers are part of the vector registers. 87 * vregs[0][0] -> vregs[15][0] are 16 floating point registers 88 */ 89 CPU_DoubleU vregs[32][2]; /* vector registers */ 90 uint32_t aregs[16]; /* access registers */ 91 uint8_t riccb[64]; /* runtime instrumentation control */ 92 93 /* Fields up to this point are not cleared by initial CPU reset */ 94 struct {} start_initial_reset_fields; 95 96 uint32_t fpc; /* floating-point control register */ 97 uint32_t cc_op; 98 99 float_status fpu_status; /* passed to softfloat lib */ 100 101 /* The low part of a 128-bit return, or remainder of a divide. */ 102 uint64_t retxl; 103 104 PSW psw; 105 106 uint64_t cc_src; 107 uint64_t cc_dst; 108 uint64_t cc_vr; 109 110 uint64_t ex_value; 111 112 uint64_t __excp_addr; 113 uint64_t psa; 114 115 uint32_t int_pgm_code; 116 uint32_t int_pgm_ilen; 117 118 uint32_t int_svc_code; 119 uint32_t int_svc_ilen; 120 121 uint64_t per_address; 122 uint16_t per_perc_atmid; 123 124 uint64_t cregs[16]; /* control registers */ 125 126 ExtQueue ext_queue[MAX_EXT_QUEUE]; 127 IOIntQueue io_queue[MAX_IO_QUEUE][8]; 128 MchkQueue mchk_queue[MAX_MCHK_QUEUE]; 129 130 int pending_int; 131 int ext_index; 132 int io_index[8]; 133 int mchk_index; 134 135 uint64_t ckc; 136 uint64_t cputm; 137 uint32_t todpr; 138 139 uint64_t pfault_token; 140 uint64_t pfault_compare; 141 uint64_t pfault_select; 142 143 uint64_t gbea; 144 uint64_t pp; 145 146 /* Fields up to this point are cleared by a CPU reset */ 147 struct {} end_reset_fields; 148 149 CPU_COMMON 150 151 uint32_t cpu_num; 152 uint32_t machine_type; 153 154 uint64_t tod_offset; 155 uint64_t tod_basetime; 156 QEMUTimer *tod_timer; 157 158 QEMUTimer *cpu_timer; 159 160 /* 161 * The cpu state represents the logical state of a cpu. In contrast to other 162 * architectures, there is a difference between a halt and a stop on s390. 163 * If all cpus are either stopped (including check stop) or in the disabled 164 * wait state, the vm can be shut down. 165 */ 166 #define CPU_STATE_UNINITIALIZED 0x00 167 #define CPU_STATE_STOPPED 0x01 168 #define CPU_STATE_CHECK_STOP 0x02 169 #define CPU_STATE_OPERATING 0x03 170 #define CPU_STATE_LOAD 0x04 171 uint8_t cpu_state; 172 173 /* currently processed sigp order */ 174 uint8_t sigp_order; 175 176 } CPUS390XState; 177 178 static inline CPU_DoubleU *get_freg(CPUS390XState *cs, int nr) 179 { 180 return &cs->vregs[nr][0]; 181 } 182 183 /** 184 * S390CPU: 185 * @env: #CPUS390XState. 186 * 187 * An S/390 CPU. 188 */ 189 struct S390CPU { 190 /*< private >*/ 191 CPUState parent_obj; 192 /*< public >*/ 193 194 CPUS390XState env; 195 int64_t id; 196 S390CPUModel *model; 197 /* needed for live migration */ 198 void *irqstate; 199 uint32_t irqstate_saved_size; 200 }; 201 202 static inline S390CPU *s390_env_get_cpu(CPUS390XState *env) 203 { 204 return container_of(env, S390CPU, env); 205 } 206 207 #define ENV_GET_CPU(e) CPU(s390_env_get_cpu(e)) 208 209 #define ENV_OFFSET offsetof(S390CPU, env) 210 211 #ifndef CONFIG_USER_ONLY 212 extern const struct VMStateDescription vmstate_s390_cpu; 213 #endif 214 215 void s390_cpu_do_interrupt(CPUState *cpu); 216 bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req); 217 void s390_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, 218 int flags); 219 int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, 220 int cpuid, void *opaque); 221 222 hwaddr s390_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); 223 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cpu, vaddr addr); 224 int s390_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); 225 int s390_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 226 void s390_cpu_gdb_init(CPUState *cs); 227 void s390x_cpu_debug_excp_handler(CPUState *cs); 228 229 #include "sysemu/kvm.h" 230 231 /* distinguish between 24 bit and 31 bit addressing */ 232 #define HIGH_ORDER_BIT 0x80000000 233 234 /* Interrupt Codes */ 235 /* Program Interrupts */ 236 #define PGM_OPERATION 0x0001 237 #define PGM_PRIVILEGED 0x0002 238 #define PGM_EXECUTE 0x0003 239 #define PGM_PROTECTION 0x0004 240 #define PGM_ADDRESSING 0x0005 241 #define PGM_SPECIFICATION 0x0006 242 #define PGM_DATA 0x0007 243 #define PGM_FIXPT_OVERFLOW 0x0008 244 #define PGM_FIXPT_DIVIDE 0x0009 245 #define PGM_DEC_OVERFLOW 0x000a 246 #define PGM_DEC_DIVIDE 0x000b 247 #define PGM_HFP_EXP_OVERFLOW 0x000c 248 #define PGM_HFP_EXP_UNDERFLOW 0x000d 249 #define PGM_HFP_SIGNIFICANCE 0x000e 250 #define PGM_HFP_DIVIDE 0x000f 251 #define PGM_SEGMENT_TRANS 0x0010 252 #define PGM_PAGE_TRANS 0x0011 253 #define PGM_TRANS_SPEC 0x0012 254 #define PGM_SPECIAL_OP 0x0013 255 #define PGM_OPERAND 0x0015 256 #define PGM_TRACE_TABLE 0x0016 257 #define PGM_SPACE_SWITCH 0x001c 258 #define PGM_HFP_SQRT 0x001d 259 #define PGM_PC_TRANS_SPEC 0x001f 260 #define PGM_AFX_TRANS 0x0020 261 #define PGM_ASX_TRANS 0x0021 262 #define PGM_LX_TRANS 0x0022 263 #define PGM_EX_TRANS 0x0023 264 #define PGM_PRIM_AUTH 0x0024 265 #define PGM_SEC_AUTH 0x0025 266 #define PGM_ALET_SPEC 0x0028 267 #define PGM_ALEN_SPEC 0x0029 268 #define PGM_ALE_SEQ 0x002a 269 #define PGM_ASTE_VALID 0x002b 270 #define PGM_ASTE_SEQ 0x002c 271 #define PGM_EXT_AUTH 0x002d 272 #define PGM_STACK_FULL 0x0030 273 #define PGM_STACK_EMPTY 0x0031 274 #define PGM_STACK_SPEC 0x0032 275 #define PGM_STACK_TYPE 0x0033 276 #define PGM_STACK_OP 0x0034 277 #define PGM_ASCE_TYPE 0x0038 278 #define PGM_REG_FIRST_TRANS 0x0039 279 #define PGM_REG_SEC_TRANS 0x003a 280 #define PGM_REG_THIRD_TRANS 0x003b 281 #define PGM_MONITOR 0x0040 282 #define PGM_PER 0x0080 283 #define PGM_CRYPTO 0x0119 284 285 /* External Interrupts */ 286 #define EXT_INTERRUPT_KEY 0x0040 287 #define EXT_CLOCK_COMP 0x1004 288 #define EXT_CPU_TIMER 0x1005 289 #define EXT_MALFUNCTION 0x1200 290 #define EXT_EMERGENCY 0x1201 291 #define EXT_EXTERNAL_CALL 0x1202 292 #define EXT_ETR 0x1406 293 #define EXT_SERVICE 0x2401 294 #define EXT_VIRTIO 0x2603 295 296 /* PSW defines */ 297 #undef PSW_MASK_PER 298 #undef PSW_MASK_DAT 299 #undef PSW_MASK_IO 300 #undef PSW_MASK_EXT 301 #undef PSW_MASK_KEY 302 #undef PSW_SHIFT_KEY 303 #undef PSW_MASK_MCHECK 304 #undef PSW_MASK_WAIT 305 #undef PSW_MASK_PSTATE 306 #undef PSW_MASK_ASC 307 #undef PSW_MASK_CC 308 #undef PSW_MASK_PM 309 #undef PSW_MASK_64 310 #undef PSW_MASK_32 311 #undef PSW_MASK_ESA_ADDR 312 313 #define PSW_MASK_PER 0x4000000000000000ULL 314 #define PSW_MASK_DAT 0x0400000000000000ULL 315 #define PSW_MASK_IO 0x0200000000000000ULL 316 #define PSW_MASK_EXT 0x0100000000000000ULL 317 #define PSW_MASK_KEY 0x00F0000000000000ULL 318 #define PSW_SHIFT_KEY 56 319 #define PSW_MASK_MCHECK 0x0004000000000000ULL 320 #define PSW_MASK_WAIT 0x0002000000000000ULL 321 #define PSW_MASK_PSTATE 0x0001000000000000ULL 322 #define PSW_MASK_ASC 0x0000C00000000000ULL 323 #define PSW_MASK_CC 0x0000300000000000ULL 324 #define PSW_MASK_PM 0x00000F0000000000ULL 325 #define PSW_MASK_64 0x0000000100000000ULL 326 #define PSW_MASK_32 0x0000000080000000ULL 327 #define PSW_MASK_ESA_ADDR 0x000000007fffffffULL 328 329 #undef PSW_ASC_PRIMARY 330 #undef PSW_ASC_ACCREG 331 #undef PSW_ASC_SECONDARY 332 #undef PSW_ASC_HOME 333 334 #define PSW_ASC_PRIMARY 0x0000000000000000ULL 335 #define PSW_ASC_ACCREG 0x0000400000000000ULL 336 #define PSW_ASC_SECONDARY 0x0000800000000000ULL 337 #define PSW_ASC_HOME 0x0000C00000000000ULL 338 339 /* tb flags */ 340 341 #define FLAG_MASK_PER (PSW_MASK_PER >> 32) 342 #define FLAG_MASK_DAT (PSW_MASK_DAT >> 32) 343 #define FLAG_MASK_IO (PSW_MASK_IO >> 32) 344 #define FLAG_MASK_EXT (PSW_MASK_EXT >> 32) 345 #define FLAG_MASK_KEY (PSW_MASK_KEY >> 32) 346 #define FLAG_MASK_MCHECK (PSW_MASK_MCHECK >> 32) 347 #define FLAG_MASK_WAIT (PSW_MASK_WAIT >> 32) 348 #define FLAG_MASK_PSTATE (PSW_MASK_PSTATE >> 32) 349 #define FLAG_MASK_ASC (PSW_MASK_ASC >> 32) 350 #define FLAG_MASK_CC (PSW_MASK_CC >> 32) 351 #define FLAG_MASK_PM (PSW_MASK_PM >> 32) 352 #define FLAG_MASK_64 (PSW_MASK_64 >> 32) 353 #define FLAG_MASK_32 0x00001000 354 355 /* Control register 0 bits */ 356 #define CR0_LOWPROT 0x0000000010000000ULL 357 #define CR0_EDAT 0x0000000000800000ULL 358 359 /* MMU */ 360 #define MMU_PRIMARY_IDX 0 361 #define MMU_SECONDARY_IDX 1 362 #define MMU_HOME_IDX 2 363 364 static inline int cpu_mmu_index (CPUS390XState *env, bool ifetch) 365 { 366 switch (env->psw.mask & PSW_MASK_ASC) { 367 case PSW_ASC_PRIMARY: 368 return MMU_PRIMARY_IDX; 369 case PSW_ASC_SECONDARY: 370 return MMU_SECONDARY_IDX; 371 case PSW_ASC_HOME: 372 return MMU_HOME_IDX; 373 case PSW_ASC_ACCREG: 374 /* Fallthrough: access register mode is not yet supported */ 375 default: 376 abort(); 377 } 378 } 379 380 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx) 381 { 382 switch (mmu_idx) { 383 case MMU_PRIMARY_IDX: 384 return PSW_ASC_PRIMARY; 385 case MMU_SECONDARY_IDX: 386 return PSW_ASC_SECONDARY; 387 case MMU_HOME_IDX: 388 return PSW_ASC_HOME; 389 default: 390 abort(); 391 } 392 } 393 394 static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc, 395 target_ulong *cs_base, uint32_t *flags) 396 { 397 *pc = env->psw.addr; 398 *cs_base = env->ex_value; 399 *flags = ((env->psw.mask >> 32) & ~FLAG_MASK_CC) | 400 ((env->psw.mask & PSW_MASK_32) ? FLAG_MASK_32 : 0); 401 } 402 403 #define MAX_ILEN 6 404 405 /* While the PoO talks about ILC (a number between 1-3) what is actually 406 stored in LowCore is shifted left one bit (an even between 2-6). As 407 this is the actual length of the insn and therefore more useful, that 408 is what we want to pass around and manipulate. To make sure that we 409 have applied this distinction universally, rename the "ILC" to "ILEN". */ 410 static inline int get_ilen(uint8_t opc) 411 { 412 switch (opc >> 6) { 413 case 0: 414 return 2; 415 case 1: 416 case 2: 417 return 4; 418 default: 419 return 6; 420 } 421 } 422 423 /* PER bits from control register 9 */ 424 #define PER_CR9_EVENT_BRANCH 0x80000000 425 #define PER_CR9_EVENT_IFETCH 0x40000000 426 #define PER_CR9_EVENT_STORE 0x20000000 427 #define PER_CR9_EVENT_STORE_REAL 0x08000000 428 #define PER_CR9_EVENT_NULLIFICATION 0x01000000 429 #define PER_CR9_CONTROL_BRANCH_ADDRESS 0x00800000 430 #define PER_CR9_CONTROL_ALTERATION 0x00200000 431 432 /* PER bits from the PER CODE/ATMID/AI in lowcore */ 433 #define PER_CODE_EVENT_BRANCH 0x8000 434 #define PER_CODE_EVENT_IFETCH 0x4000 435 #define PER_CODE_EVENT_STORE 0x2000 436 #define PER_CODE_EVENT_STORE_REAL 0x0800 437 #define PER_CODE_EVENT_NULLIFICATION 0x0100 438 439 /* Compute the ATMID field that is stored in the per_perc_atmid lowcore 440 entry when a PER exception is triggered. */ 441 static inline uint8_t get_per_atmid(CPUS390XState *env) 442 { 443 return ((env->psw.mask & PSW_MASK_64) ? (1 << 7) : 0) | 444 ( (1 << 6) ) | 445 ((env->psw.mask & PSW_MASK_32) ? (1 << 5) : 0) | 446 ((env->psw.mask & PSW_MASK_DAT)? (1 << 4) : 0) | 447 ((env->psw.mask & PSW_ASC_SECONDARY)? (1 << 3) : 0) | 448 ((env->psw.mask & PSW_ASC_ACCREG)? (1 << 2) : 0); 449 } 450 451 /* Check if an address is within the PER starting address and the PER 452 ending address. The address range might loop. */ 453 static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr) 454 { 455 if (env->cregs[10] <= env->cregs[11]) { 456 return env->cregs[10] <= addr && addr <= env->cregs[11]; 457 } else { 458 return env->cregs[10] <= addr || addr <= env->cregs[11]; 459 } 460 } 461 462 #ifndef CONFIG_USER_ONLY 463 /* In several cases of runtime exceptions, we havn't recorded the true 464 instruction length. Use these codes when raising exceptions in order 465 to re-compute the length by examining the insn in memory. */ 466 #define ILEN_LATER 0x20 467 #define ILEN_LATER_INC 0x21 468 void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen); 469 #endif 470 471 S390CPU *cpu_s390x_init(const char *cpu_model); 472 S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp); 473 S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp); 474 void s390x_translate_init(void); 475 476 /* you can call this signal handler from your SIGBUS and SIGSEGV 477 signal handlers to inform the virtual CPU of exceptions. non zero 478 is returned if the signal was handled by the virtual CPU. */ 479 int cpu_s390x_signal_handler(int host_signum, void *pinfo, 480 void *puc); 481 int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, 482 int mmu_idx); 483 484 485 #ifndef CONFIG_USER_ONLY 486 void do_restart_interrupt(CPUS390XState *env); 487 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 488 MMUAccessType access_type, 489 int mmu_idx, uintptr_t retaddr); 490 491 static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb, 492 uint8_t *ar) 493 { 494 hwaddr addr = 0; 495 uint8_t reg; 496 497 reg = ipb >> 28; 498 if (reg > 0) { 499 addr = env->regs[reg]; 500 } 501 addr += (ipb >> 16) & 0xfff; 502 if (ar) { 503 *ar = reg; 504 } 505 506 return addr; 507 } 508 509 /* Base/displacement are at the same locations. */ 510 #define decode_basedisp_rs decode_basedisp_s 511 512 /* helper functions for run_on_cpu() */ 513 static inline void s390_do_cpu_reset(CPUState *cs, run_on_cpu_data arg) 514 { 515 S390CPUClass *scc = S390_CPU_GET_CLASS(cs); 516 517 scc->cpu_reset(cs); 518 } 519 static inline void s390_do_cpu_full_reset(CPUState *cs, run_on_cpu_data arg) 520 { 521 cpu_reset(cs); 522 } 523 524 void s390x_tod_timer(void *opaque); 525 void s390x_cpu_timer(void *opaque); 526 527 int s390_virtio_hypercall(CPUS390XState *env); 528 529 #ifdef CONFIG_KVM 530 void kvm_s390_service_interrupt(uint32_t parm); 531 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq); 532 void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq); 533 int kvm_s390_inject_flic(struct kvm_s390_irq *irq); 534 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code); 535 int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, 536 int len, bool is_write); 537 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock); 538 int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_clock); 539 #else 540 static inline void kvm_s390_service_interrupt(uint32_t parm) 541 { 542 } 543 static inline int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low) 544 { 545 return -ENOSYS; 546 } 547 static inline int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low) 548 { 549 return -ENOSYS; 550 } 551 static inline int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, 552 void *hostbuf, int len, bool is_write) 553 { 554 return -ENOSYS; 555 } 556 static inline void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, 557 uint64_t te_code) 558 { 559 } 560 #endif 561 562 static inline int s390_get_clock(uint8_t *tod_high, uint64_t *tod_low) 563 { 564 if (kvm_enabled()) { 565 return kvm_s390_get_clock(tod_high, tod_low); 566 } 567 /* Fixme TCG */ 568 *tod_high = 0; 569 *tod_low = 0; 570 return 0; 571 } 572 573 static inline int s390_set_clock(uint8_t *tod_high, uint64_t *tod_low) 574 { 575 if (kvm_enabled()) { 576 return kvm_s390_set_clock(tod_high, tod_low); 577 } 578 /* Fixme TCG */ 579 return 0; 580 } 581 582 S390CPU *s390_cpu_addr2state(uint16_t cpu_addr); 583 unsigned int s390_cpu_halt(S390CPU *cpu); 584 void s390_cpu_unhalt(S390CPU *cpu); 585 unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu); 586 static inline uint8_t s390_cpu_get_state(S390CPU *cpu) 587 { 588 return cpu->env.cpu_state; 589 } 590 591 void gtod_save(QEMUFile *f, void *opaque); 592 int gtod_load(QEMUFile *f, void *opaque, int version_id); 593 594 void cpu_inject_ext(S390CPU *cpu, uint32_t code, uint32_t param, 595 uint64_t param64); 596 597 /* ioinst.c */ 598 void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1); 599 void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1); 600 void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1); 601 void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb); 602 void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb); 603 void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb); 604 void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb); 605 int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb); 606 void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb); 607 int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb); 608 void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2, 609 uint32_t ipb); 610 void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1); 611 void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1); 612 void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1); 613 614 /* service interrupts are floating therefore we must not pass an cpustate */ 615 void s390_sclp_extint(uint32_t parm); 616 617 #else 618 static inline unsigned int s390_cpu_halt(S390CPU *cpu) 619 { 620 return 0; 621 } 622 623 static inline void s390_cpu_unhalt(S390CPU *cpu) 624 { 625 } 626 627 static inline unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu) 628 { 629 return 0; 630 } 631 #endif 632 633 extern void subsystem_reset(void); 634 635 #define cpu_init(model) CPU(cpu_s390x_init(model)) 636 #define cpu_signal_handler cpu_s390x_signal_handler 637 638 void s390_cpu_list(FILE *f, fprintf_function cpu_fprintf); 639 #define cpu_list s390_cpu_list 640 void s390_cpu_model_register_props(Object *obj); 641 void s390_cpu_model_class_register_props(ObjectClass *oc); 642 void s390_realize_cpu_model(CPUState *cs, Error **errp); 643 ObjectClass *s390_cpu_class_by_name(const char *name); 644 645 #define EXCP_EXT 1 /* external interrupt */ 646 #define EXCP_SVC 2 /* supervisor call (syscall) */ 647 #define EXCP_PGM 3 /* program interruption */ 648 #define EXCP_IO 7 /* I/O interrupt */ 649 #define EXCP_MCHK 8 /* machine check */ 650 651 #define INTERRUPT_EXT (1 << 0) 652 #define INTERRUPT_TOD (1 << 1) 653 #define INTERRUPT_CPUTIMER (1 << 2) 654 #define INTERRUPT_IO (1 << 3) 655 #define INTERRUPT_MCHK (1 << 4) 656 657 /* Program Status Word. */ 658 #define S390_PSWM_REGNUM 0 659 #define S390_PSWA_REGNUM 1 660 /* General Purpose Registers. */ 661 #define S390_R0_REGNUM 2 662 #define S390_R1_REGNUM 3 663 #define S390_R2_REGNUM 4 664 #define S390_R3_REGNUM 5 665 #define S390_R4_REGNUM 6 666 #define S390_R5_REGNUM 7 667 #define S390_R6_REGNUM 8 668 #define S390_R7_REGNUM 9 669 #define S390_R8_REGNUM 10 670 #define S390_R9_REGNUM 11 671 #define S390_R10_REGNUM 12 672 #define S390_R11_REGNUM 13 673 #define S390_R12_REGNUM 14 674 #define S390_R13_REGNUM 15 675 #define S390_R14_REGNUM 16 676 #define S390_R15_REGNUM 17 677 /* Total Core Registers. */ 678 #define S390_NUM_CORE_REGS 18 679 680 /* CC optimization */ 681 682 /* Instead of computing the condition codes after each x86 instruction, 683 * QEMU just stores the result (called CC_DST), the type of operation 684 * (called CC_OP) and whatever operands are needed (CC_SRC and possibly 685 * CC_VR). When the condition codes are needed, the condition codes can 686 * be calculated using this information. Condition codes are not generated 687 * if they are only needed for conditional branches. 688 */ 689 enum cc_op { 690 CC_OP_CONST0 = 0, /* CC is 0 */ 691 CC_OP_CONST1, /* CC is 1 */ 692 CC_OP_CONST2, /* CC is 2 */ 693 CC_OP_CONST3, /* CC is 3 */ 694 695 CC_OP_DYNAMIC, /* CC calculation defined by env->cc_op */ 696 CC_OP_STATIC, /* CC value is env->cc_op */ 697 698 CC_OP_NZ, /* env->cc_dst != 0 */ 699 CC_OP_LTGT_32, /* signed less/greater than (32bit) */ 700 CC_OP_LTGT_64, /* signed less/greater than (64bit) */ 701 CC_OP_LTUGTU_32, /* unsigned less/greater than (32bit) */ 702 CC_OP_LTUGTU_64, /* unsigned less/greater than (64bit) */ 703 CC_OP_LTGT0_32, /* signed less/greater than 0 (32bit) */ 704 CC_OP_LTGT0_64, /* signed less/greater than 0 (64bit) */ 705 706 CC_OP_ADD_64, /* overflow on add (64bit) */ 707 CC_OP_ADDU_64, /* overflow on unsigned add (64bit) */ 708 CC_OP_ADDC_64, /* overflow on unsigned add-carry (64bit) */ 709 CC_OP_SUB_64, /* overflow on subtraction (64bit) */ 710 CC_OP_SUBU_64, /* overflow on unsigned subtraction (64bit) */ 711 CC_OP_SUBB_64, /* overflow on unsigned sub-borrow (64bit) */ 712 CC_OP_ABS_64, /* sign eval on abs (64bit) */ 713 CC_OP_NABS_64, /* sign eval on nabs (64bit) */ 714 715 CC_OP_ADD_32, /* overflow on add (32bit) */ 716 CC_OP_ADDU_32, /* overflow on unsigned add (32bit) */ 717 CC_OP_ADDC_32, /* overflow on unsigned add-carry (32bit) */ 718 CC_OP_SUB_32, /* overflow on subtraction (32bit) */ 719 CC_OP_SUBU_32, /* overflow on unsigned subtraction (32bit) */ 720 CC_OP_SUBB_32, /* overflow on unsigned sub-borrow (32bit) */ 721 CC_OP_ABS_32, /* sign eval on abs (64bit) */ 722 CC_OP_NABS_32, /* sign eval on nabs (64bit) */ 723 724 CC_OP_COMP_32, /* complement */ 725 CC_OP_COMP_64, /* complement */ 726 727 CC_OP_TM_32, /* test under mask (32bit) */ 728 CC_OP_TM_64, /* test under mask (64bit) */ 729 730 CC_OP_NZ_F32, /* FP dst != 0 (32bit) */ 731 CC_OP_NZ_F64, /* FP dst != 0 (64bit) */ 732 CC_OP_NZ_F128, /* FP dst != 0 (128bit) */ 733 734 CC_OP_ICM, /* insert characters under mask */ 735 CC_OP_SLA_32, /* Calculate shift left signed (32bit) */ 736 CC_OP_SLA_64, /* Calculate shift left signed (64bit) */ 737 CC_OP_FLOGR, /* find leftmost one */ 738 CC_OP_MAX 739 }; 740 741 static const char *cc_names[] = { 742 [CC_OP_CONST0] = "CC_OP_CONST0", 743 [CC_OP_CONST1] = "CC_OP_CONST1", 744 [CC_OP_CONST2] = "CC_OP_CONST2", 745 [CC_OP_CONST3] = "CC_OP_CONST3", 746 [CC_OP_DYNAMIC] = "CC_OP_DYNAMIC", 747 [CC_OP_STATIC] = "CC_OP_STATIC", 748 [CC_OP_NZ] = "CC_OP_NZ", 749 [CC_OP_LTGT_32] = "CC_OP_LTGT_32", 750 [CC_OP_LTGT_64] = "CC_OP_LTGT_64", 751 [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32", 752 [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64", 753 [CC_OP_LTGT0_32] = "CC_OP_LTGT0_32", 754 [CC_OP_LTGT0_64] = "CC_OP_LTGT0_64", 755 [CC_OP_ADD_64] = "CC_OP_ADD_64", 756 [CC_OP_ADDU_64] = "CC_OP_ADDU_64", 757 [CC_OP_ADDC_64] = "CC_OP_ADDC_64", 758 [CC_OP_SUB_64] = "CC_OP_SUB_64", 759 [CC_OP_SUBU_64] = "CC_OP_SUBU_64", 760 [CC_OP_SUBB_64] = "CC_OP_SUBB_64", 761 [CC_OP_ABS_64] = "CC_OP_ABS_64", 762 [CC_OP_NABS_64] = "CC_OP_NABS_64", 763 [CC_OP_ADD_32] = "CC_OP_ADD_32", 764 [CC_OP_ADDU_32] = "CC_OP_ADDU_32", 765 [CC_OP_ADDC_32] = "CC_OP_ADDC_32", 766 [CC_OP_SUB_32] = "CC_OP_SUB_32", 767 [CC_OP_SUBU_32] = "CC_OP_SUBU_32", 768 [CC_OP_SUBB_32] = "CC_OP_SUBB_32", 769 [CC_OP_ABS_32] = "CC_OP_ABS_32", 770 [CC_OP_NABS_32] = "CC_OP_NABS_32", 771 [CC_OP_COMP_32] = "CC_OP_COMP_32", 772 [CC_OP_COMP_64] = "CC_OP_COMP_64", 773 [CC_OP_TM_32] = "CC_OP_TM_32", 774 [CC_OP_TM_64] = "CC_OP_TM_64", 775 [CC_OP_NZ_F32] = "CC_OP_NZ_F32", 776 [CC_OP_NZ_F64] = "CC_OP_NZ_F64", 777 [CC_OP_NZ_F128] = "CC_OP_NZ_F128", 778 [CC_OP_ICM] = "CC_OP_ICM", 779 [CC_OP_SLA_32] = "CC_OP_SLA_32", 780 [CC_OP_SLA_64] = "CC_OP_SLA_64", 781 [CC_OP_FLOGR] = "CC_OP_FLOGR", 782 }; 783 784 static inline const char *cc_name(int cc_op) 785 { 786 return cc_names[cc_op]; 787 } 788 789 static inline void setcc(S390CPU *cpu, uint64_t cc) 790 { 791 CPUS390XState *env = &cpu->env; 792 793 env->psw.mask &= ~(3ull << 44); 794 env->psw.mask |= (cc & 3) << 44; 795 env->cc_op = cc; 796 } 797 798 typedef struct LowCore 799 { 800 /* prefix area: defined by architecture */ 801 uint32_t ccw1[2]; /* 0x000 */ 802 uint32_t ccw2[4]; /* 0x008 */ 803 uint8_t pad1[0x80-0x18]; /* 0x018 */ 804 uint32_t ext_params; /* 0x080 */ 805 uint16_t cpu_addr; /* 0x084 */ 806 uint16_t ext_int_code; /* 0x086 */ 807 uint16_t svc_ilen; /* 0x088 */ 808 uint16_t svc_code; /* 0x08a */ 809 uint16_t pgm_ilen; /* 0x08c */ 810 uint16_t pgm_code; /* 0x08e */ 811 uint32_t data_exc_code; /* 0x090 */ 812 uint16_t mon_class_num; /* 0x094 */ 813 uint16_t per_perc_atmid; /* 0x096 */ 814 uint64_t per_address; /* 0x098 */ 815 uint8_t exc_access_id; /* 0x0a0 */ 816 uint8_t per_access_id; /* 0x0a1 */ 817 uint8_t op_access_id; /* 0x0a2 */ 818 uint8_t ar_access_id; /* 0x0a3 */ 819 uint8_t pad2[0xA8-0xA4]; /* 0x0a4 */ 820 uint64_t trans_exc_code; /* 0x0a8 */ 821 uint64_t monitor_code; /* 0x0b0 */ 822 uint16_t subchannel_id; /* 0x0b8 */ 823 uint16_t subchannel_nr; /* 0x0ba */ 824 uint32_t io_int_parm; /* 0x0bc */ 825 uint32_t io_int_word; /* 0x0c0 */ 826 uint8_t pad3[0xc8-0xc4]; /* 0x0c4 */ 827 uint32_t stfl_fac_list; /* 0x0c8 */ 828 uint8_t pad4[0xe8-0xcc]; /* 0x0cc */ 829 uint32_t mcck_interruption_code[2]; /* 0x0e8 */ 830 uint8_t pad5[0xf4-0xf0]; /* 0x0f0 */ 831 uint32_t external_damage_code; /* 0x0f4 */ 832 uint64_t failing_storage_address; /* 0x0f8 */ 833 uint8_t pad6[0x110-0x100]; /* 0x100 */ 834 uint64_t per_breaking_event_addr; /* 0x110 */ 835 uint8_t pad7[0x120-0x118]; /* 0x118 */ 836 PSW restart_old_psw; /* 0x120 */ 837 PSW external_old_psw; /* 0x130 */ 838 PSW svc_old_psw; /* 0x140 */ 839 PSW program_old_psw; /* 0x150 */ 840 PSW mcck_old_psw; /* 0x160 */ 841 PSW io_old_psw; /* 0x170 */ 842 uint8_t pad8[0x1a0-0x180]; /* 0x180 */ 843 PSW restart_new_psw; /* 0x1a0 */ 844 PSW external_new_psw; /* 0x1b0 */ 845 PSW svc_new_psw; /* 0x1c0 */ 846 PSW program_new_psw; /* 0x1d0 */ 847 PSW mcck_new_psw; /* 0x1e0 */ 848 PSW io_new_psw; /* 0x1f0 */ 849 PSW return_psw; /* 0x200 */ 850 uint8_t irb[64]; /* 0x210 */ 851 uint64_t sync_enter_timer; /* 0x250 */ 852 uint64_t async_enter_timer; /* 0x258 */ 853 uint64_t exit_timer; /* 0x260 */ 854 uint64_t last_update_timer; /* 0x268 */ 855 uint64_t user_timer; /* 0x270 */ 856 uint64_t system_timer; /* 0x278 */ 857 uint64_t last_update_clock; /* 0x280 */ 858 uint64_t steal_clock; /* 0x288 */ 859 PSW return_mcck_psw; /* 0x290 */ 860 uint8_t pad9[0xc00-0x2a0]; /* 0x2a0 */ 861 /* System info area */ 862 uint64_t save_area[16]; /* 0xc00 */ 863 uint8_t pad10[0xd40-0xc80]; /* 0xc80 */ 864 uint64_t kernel_stack; /* 0xd40 */ 865 uint64_t thread_info; /* 0xd48 */ 866 uint64_t async_stack; /* 0xd50 */ 867 uint64_t kernel_asce; /* 0xd58 */ 868 uint64_t user_asce; /* 0xd60 */ 869 uint64_t panic_stack; /* 0xd68 */ 870 uint64_t user_exec_asce; /* 0xd70 */ 871 uint8_t pad11[0xdc0-0xd78]; /* 0xd78 */ 872 873 /* SMP info area: defined by DJB */ 874 uint64_t clock_comparator; /* 0xdc0 */ 875 uint64_t ext_call_fast; /* 0xdc8 */ 876 uint64_t percpu_offset; /* 0xdd0 */ 877 uint64_t current_task; /* 0xdd8 */ 878 uint32_t softirq_pending; /* 0xde0 */ 879 uint32_t pad_0x0de4; /* 0xde4 */ 880 uint64_t int_clock; /* 0xde8 */ 881 uint8_t pad12[0xe00-0xdf0]; /* 0xdf0 */ 882 883 /* 0xe00 is used as indicator for dump tools */ 884 /* whether the kernel died with panic() or not */ 885 uint32_t panic_magic; /* 0xe00 */ 886 887 uint8_t pad13[0x11b8-0xe04]; /* 0xe04 */ 888 889 /* 64 bit extparam used for pfault, diag 250 etc */ 890 uint64_t ext_params2; /* 0x11B8 */ 891 892 uint8_t pad14[0x1200-0x11C0]; /* 0x11C0 */ 893 894 /* System info area */ 895 896 uint64_t floating_pt_save_area[16]; /* 0x1200 */ 897 uint64_t gpregs_save_area[16]; /* 0x1280 */ 898 uint32_t st_status_fixed_logout[4]; /* 0x1300 */ 899 uint8_t pad15[0x1318-0x1310]; /* 0x1310 */ 900 uint32_t prefixreg_save_area; /* 0x1318 */ 901 uint32_t fpt_creg_save_area; /* 0x131c */ 902 uint8_t pad16[0x1324-0x1320]; /* 0x1320 */ 903 uint32_t tod_progreg_save_area; /* 0x1324 */ 904 uint32_t cpu_timer_save_area[2]; /* 0x1328 */ 905 uint32_t clock_comp_save_area[2]; /* 0x1330 */ 906 uint8_t pad17[0x1340-0x1338]; /* 0x1338 */ 907 uint32_t access_regs_save_area[16]; /* 0x1340 */ 908 uint64_t cregs_save_area[16]; /* 0x1380 */ 909 910 /* align to the top of the prefix area */ 911 912 uint8_t pad18[0x2000-0x1400]; /* 0x1400 */ 913 } QEMU_PACKED LowCore; 914 915 /* STSI */ 916 #define STSI_LEVEL_MASK 0x00000000f0000000ULL 917 #define STSI_LEVEL_CURRENT 0x0000000000000000ULL 918 #define STSI_LEVEL_1 0x0000000010000000ULL 919 #define STSI_LEVEL_2 0x0000000020000000ULL 920 #define STSI_LEVEL_3 0x0000000030000000ULL 921 #define STSI_R0_RESERVED_MASK 0x000000000fffff00ULL 922 #define STSI_R0_SEL1_MASK 0x00000000000000ffULL 923 #define STSI_R1_RESERVED_MASK 0x00000000ffff0000ULL 924 #define STSI_R1_SEL2_MASK 0x000000000000ffffULL 925 926 /* Basic Machine Configuration */ 927 struct sysib_111 { 928 uint32_t res1[8]; 929 uint8_t manuf[16]; 930 uint8_t type[4]; 931 uint8_t res2[12]; 932 uint8_t model[16]; 933 uint8_t sequence[16]; 934 uint8_t plant[4]; 935 uint8_t res3[156]; 936 }; 937 938 /* Basic Machine CPU */ 939 struct sysib_121 { 940 uint32_t res1[80]; 941 uint8_t sequence[16]; 942 uint8_t plant[4]; 943 uint8_t res2[2]; 944 uint16_t cpu_addr; 945 uint8_t res3[152]; 946 }; 947 948 /* Basic Machine CPUs */ 949 struct sysib_122 { 950 uint8_t res1[32]; 951 uint32_t capability; 952 uint16_t total_cpus; 953 uint16_t active_cpus; 954 uint16_t standby_cpus; 955 uint16_t reserved_cpus; 956 uint16_t adjustments[2026]; 957 }; 958 959 /* LPAR CPU */ 960 struct sysib_221 { 961 uint32_t res1[80]; 962 uint8_t sequence[16]; 963 uint8_t plant[4]; 964 uint16_t cpu_id; 965 uint16_t cpu_addr; 966 uint8_t res3[152]; 967 }; 968 969 /* LPAR CPUs */ 970 struct sysib_222 { 971 uint32_t res1[32]; 972 uint16_t lpar_num; 973 uint8_t res2; 974 uint8_t lcpuc; 975 uint16_t total_cpus; 976 uint16_t conf_cpus; 977 uint16_t standby_cpus; 978 uint16_t reserved_cpus; 979 uint8_t name[8]; 980 uint32_t caf; 981 uint8_t res3[16]; 982 uint16_t dedicated_cpus; 983 uint16_t shared_cpus; 984 uint8_t res4[180]; 985 }; 986 987 /* VM CPUs */ 988 struct sysib_322 { 989 uint8_t res1[31]; 990 uint8_t count; 991 struct { 992 uint8_t res2[4]; 993 uint16_t total_cpus; 994 uint16_t conf_cpus; 995 uint16_t standby_cpus; 996 uint16_t reserved_cpus; 997 uint8_t name[8]; 998 uint32_t caf; 999 uint8_t cpi[16]; 1000 uint8_t res5[3]; 1001 uint8_t ext_name_encoding; 1002 uint32_t res3; 1003 uint8_t uuid[16]; 1004 } vm[8]; 1005 uint8_t res4[1504]; 1006 uint8_t ext_names[8][256]; 1007 }; 1008 1009 /* MMU defines */ 1010 #define _ASCE_ORIGIN ~0xfffULL /* segment table origin */ 1011 #define _ASCE_SUBSPACE 0x200 /* subspace group control */ 1012 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 1013 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 1014 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ 1015 #define _ASCE_REAL_SPACE 0x20 /* real space control */ 1016 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ 1017 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ 1018 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ 1019 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ 1020 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ 1021 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ 1022 1023 #define _REGION_ENTRY_ORIGIN ~0xfffULL /* region/segment table origin */ 1024 #define _REGION_ENTRY_RO 0x200 /* region/segment protection bit */ 1025 #define _REGION_ENTRY_TF 0xc0 /* region/segment table offset */ 1026 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ 1027 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ 1028 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ 1029 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ 1030 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ 1031 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ 1032 1033 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffULL /* segment table origin */ 1034 #define _SEGMENT_ENTRY_FC 0x400 /* format control */ 1035 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ 1036 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ 1037 1038 #define VADDR_PX 0xff000 /* page index bits */ 1039 1040 #define _PAGE_RO 0x200 /* HW read-only bit */ 1041 #define _PAGE_INVALID 0x400 /* HW invalid bit */ 1042 #define _PAGE_RES0 0x800 /* bit must be zero */ 1043 1044 #define SK_C (0x1 << 1) 1045 #define SK_R (0x1 << 2) 1046 #define SK_F (0x1 << 3) 1047 #define SK_ACC_MASK (0xf << 4) 1048 1049 /* SIGP order codes */ 1050 #define SIGP_SENSE 0x01 1051 #define SIGP_EXTERNAL_CALL 0x02 1052 #define SIGP_EMERGENCY 0x03 1053 #define SIGP_START 0x04 1054 #define SIGP_STOP 0x05 1055 #define SIGP_RESTART 0x06 1056 #define SIGP_STOP_STORE_STATUS 0x09 1057 #define SIGP_INITIAL_CPU_RESET 0x0b 1058 #define SIGP_CPU_RESET 0x0c 1059 #define SIGP_SET_PREFIX 0x0d 1060 #define SIGP_STORE_STATUS_ADDR 0x0e 1061 #define SIGP_SET_ARCH 0x12 1062 #define SIGP_STORE_ADTL_STATUS 0x17 1063 1064 /* SIGP condition codes */ 1065 #define SIGP_CC_ORDER_CODE_ACCEPTED 0 1066 #define SIGP_CC_STATUS_STORED 1 1067 #define SIGP_CC_BUSY 2 1068 #define SIGP_CC_NOT_OPERATIONAL 3 1069 1070 /* SIGP status bits */ 1071 #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL 1072 #define SIGP_STAT_INCORRECT_STATE 0x00000200UL 1073 #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL 1074 #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL 1075 #define SIGP_STAT_STOPPED 0x00000040UL 1076 #define SIGP_STAT_OPERATOR_INTERV 0x00000020UL 1077 #define SIGP_STAT_CHECK_STOP 0x00000010UL 1078 #define SIGP_STAT_INOPERATIVE 0x00000004UL 1079 #define SIGP_STAT_INVALID_ORDER 0x00000002UL 1080 #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL 1081 1082 /* SIGP SET ARCHITECTURE modes */ 1083 #define SIGP_MODE_ESA_S390 0 1084 #define SIGP_MODE_Z_ARCH_TRANS_ALL_PSW 1 1085 #define SIGP_MODE_Z_ARCH_TRANS_CUR_PSW 2 1086 1087 /* SIGP order code mask corresponding to bit positions 56-63 */ 1088 #define SIGP_ORDER_MASK 0x000000ff 1089 1090 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr); 1091 target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr); 1092 int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, 1093 target_ulong *raddr, int *flags, bool exc); 1094 int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code); 1095 uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, 1096 uint64_t vr); 1097 void s390_cpu_recompute_watchpoints(CPUState *cs); 1098 1099 int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, 1100 int len, bool is_write); 1101 1102 #define s390_cpu_virt_mem_read(cpu, laddr, ar, dest, len) \ 1103 s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, false) 1104 #define s390_cpu_virt_mem_write(cpu, laddr, ar, dest, len) \ 1105 s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true) 1106 #define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \ 1107 s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true) 1108 1109 /* The value of the TOD clock for 1.1.1970. */ 1110 #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL 1111 1112 /* Converts ns to s390's clock format */ 1113 static inline uint64_t time2tod(uint64_t ns) { 1114 return (ns << 9) / 125; 1115 } 1116 1117 /* Converts s390's clock format to ns */ 1118 static inline uint64_t tod2time(uint64_t t) { 1119 return (t * 125) >> 9; 1120 } 1121 1122 /* from s390-virtio-ccw */ 1123 #define MEM_SECTION_SIZE 0x10000000UL 1124 #define MAX_AVAIL_SLOTS 32 1125 1126 /* fpu_helper.c */ 1127 uint32_t set_cc_nz_f32(float32 v); 1128 uint32_t set_cc_nz_f64(float64 v); 1129 uint32_t set_cc_nz_f128(float128 v); 1130 1131 /* misc_helper.c */ 1132 #ifndef CONFIG_USER_ONLY 1133 int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3); 1134 void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3); 1135 #endif 1136 void program_interrupt(CPUS390XState *env, uint32_t code, int ilen); 1137 void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp, 1138 uintptr_t retaddr); 1139 1140 #ifdef CONFIG_KVM 1141 void kvm_s390_io_interrupt(uint16_t subchannel_id, 1142 uint16_t subchannel_nr, uint32_t io_int_parm, 1143 uint32_t io_int_word); 1144 void kvm_s390_crw_mchk(void); 1145 void kvm_s390_enable_css_support(S390CPU *cpu); 1146 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, 1147 int vq, bool assign); 1148 int kvm_s390_cpu_restart(S390CPU *cpu); 1149 int kvm_s390_get_memslot_count(KVMState *s); 1150 void kvm_s390_cmma_reset(void); 1151 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state); 1152 void kvm_s390_reset_vcpu(S390CPU *cpu); 1153 int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, uint64_t *hw_limit); 1154 void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu); 1155 int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu); 1156 int kvm_s390_get_ri(void); 1157 void kvm_s390_crypto_reset(void); 1158 #else 1159 static inline void kvm_s390_io_interrupt(uint16_t subchannel_id, 1160 uint16_t subchannel_nr, 1161 uint32_t io_int_parm, 1162 uint32_t io_int_word) 1163 { 1164 } 1165 static inline void kvm_s390_crw_mchk(void) 1166 { 1167 } 1168 static inline void kvm_s390_enable_css_support(S390CPU *cpu) 1169 { 1170 } 1171 static inline int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, 1172 uint32_t sch, int vq, 1173 bool assign) 1174 { 1175 return -ENOSYS; 1176 } 1177 static inline int kvm_s390_cpu_restart(S390CPU *cpu) 1178 { 1179 return -ENOSYS; 1180 } 1181 static inline void kvm_s390_cmma_reset(void) 1182 { 1183 } 1184 static inline int kvm_s390_get_memslot_count(KVMState *s) 1185 { 1186 return MAX_AVAIL_SLOTS; 1187 } 1188 static inline int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state) 1189 { 1190 return -ENOSYS; 1191 } 1192 static inline void kvm_s390_reset_vcpu(S390CPU *cpu) 1193 { 1194 } 1195 static inline int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, 1196 uint64_t *hw_limit) 1197 { 1198 return 0; 1199 } 1200 static inline void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu) 1201 { 1202 } 1203 static inline int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu) 1204 { 1205 return 0; 1206 } 1207 static inline int kvm_s390_get_ri(void) 1208 { 1209 return 0; 1210 } 1211 static inline void kvm_s390_crypto_reset(void) 1212 { 1213 } 1214 #endif 1215 1216 static inline int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit) 1217 { 1218 if (kvm_enabled()) { 1219 return kvm_s390_set_mem_limit(kvm_state, new_limit, hw_limit); 1220 } 1221 return 0; 1222 } 1223 1224 static inline void s390_cmma_reset(void) 1225 { 1226 if (kvm_enabled()) { 1227 kvm_s390_cmma_reset(); 1228 } 1229 } 1230 1231 static inline int s390_cpu_restart(S390CPU *cpu) 1232 { 1233 if (kvm_enabled()) { 1234 return kvm_s390_cpu_restart(cpu); 1235 } 1236 return -ENOSYS; 1237 } 1238 1239 static inline int s390_get_memslot_count(KVMState *s) 1240 { 1241 if (kvm_enabled()) { 1242 return kvm_s390_get_memslot_count(s); 1243 } else { 1244 return MAX_AVAIL_SLOTS; 1245 } 1246 } 1247 1248 void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr, 1249 uint32_t io_int_parm, uint32_t io_int_word); 1250 void s390_crw_mchk(void); 1251 1252 static inline int s390_assign_subch_ioeventfd(EventNotifier *notifier, 1253 uint32_t sch_id, int vq, 1254 bool assign) 1255 { 1256 return kvm_s390_assign_subch_ioeventfd(notifier, sch_id, vq, assign); 1257 } 1258 1259 static inline void s390_crypto_reset(void) 1260 { 1261 if (kvm_enabled()) { 1262 kvm_s390_crypto_reset(); 1263 } 1264 } 1265 1266 static inline bool s390_get_squash_mcss(void) 1267 { 1268 if (object_property_get_bool(OBJECT(qdev_get_machine()), "s390-squash-mcss", 1269 NULL)) { 1270 return true; 1271 } 1272 1273 return false; 1274 } 1275 1276 /* machine check interruption code */ 1277 1278 /* subclasses */ 1279 #define MCIC_SC_SD 0x8000000000000000ULL 1280 #define MCIC_SC_PD 0x4000000000000000ULL 1281 #define MCIC_SC_SR 0x2000000000000000ULL 1282 #define MCIC_SC_CD 0x0800000000000000ULL 1283 #define MCIC_SC_ED 0x0400000000000000ULL 1284 #define MCIC_SC_DG 0x0100000000000000ULL 1285 #define MCIC_SC_W 0x0080000000000000ULL 1286 #define MCIC_SC_CP 0x0040000000000000ULL 1287 #define MCIC_SC_SP 0x0020000000000000ULL 1288 #define MCIC_SC_CK 0x0010000000000000ULL 1289 1290 /* subclass modifiers */ 1291 #define MCIC_SCM_B 0x0002000000000000ULL 1292 #define MCIC_SCM_DA 0x0000000020000000ULL 1293 #define MCIC_SCM_AP 0x0000000000080000ULL 1294 1295 /* storage errors */ 1296 #define MCIC_SE_SE 0x0000800000000000ULL 1297 #define MCIC_SE_SC 0x0000400000000000ULL 1298 #define MCIC_SE_KE 0x0000200000000000ULL 1299 #define MCIC_SE_DS 0x0000100000000000ULL 1300 #define MCIC_SE_IE 0x0000000080000000ULL 1301 1302 /* validity bits */ 1303 #define MCIC_VB_WP 0x0000080000000000ULL 1304 #define MCIC_VB_MS 0x0000040000000000ULL 1305 #define MCIC_VB_PM 0x0000020000000000ULL 1306 #define MCIC_VB_IA 0x0000010000000000ULL 1307 #define MCIC_VB_FA 0x0000008000000000ULL 1308 #define MCIC_VB_VR 0x0000004000000000ULL 1309 #define MCIC_VB_EC 0x0000002000000000ULL 1310 #define MCIC_VB_FP 0x0000001000000000ULL 1311 #define MCIC_VB_GR 0x0000000800000000ULL 1312 #define MCIC_VB_CR 0x0000000400000000ULL 1313 #define MCIC_VB_ST 0x0000000100000000ULL 1314 #define MCIC_VB_AR 0x0000000040000000ULL 1315 #define MCIC_VB_PR 0x0000000000200000ULL 1316 #define MCIC_VB_FC 0x0000000000100000ULL 1317 #define MCIC_VB_CT 0x0000000000020000ULL 1318 #define MCIC_VB_CC 0x0000000000010000ULL 1319 1320 #endif 1321