1 /* 2 * S/390 helpers 3 * 4 * Copyright (c) 2009 Ulrich Hecht 5 * Copyright (c) 2011 Alexander Graf 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "internal.h" 24 #include "exec/gdbstub.h" 25 #include "qemu/timer.h" 26 #include "qemu/qemu-print.h" 27 #include "hw/s390x/ioinst.h" 28 #include "hw/s390x/pv.h" 29 #include "sysemu/hw_accel.h" 30 #include "sysemu/runstate.h" 31 #ifndef CONFIG_USER_ONLY 32 #include "sysemu/tcg.h" 33 #endif 34 35 #ifndef CONFIG_USER_ONLY 36 void s390x_tod_timer(void *opaque) 37 { 38 cpu_inject_clock_comparator((S390CPU *) opaque); 39 } 40 41 void s390x_cpu_timer(void *opaque) 42 { 43 cpu_inject_cpu_timer((S390CPU *) opaque); 44 } 45 #endif 46 47 #ifndef CONFIG_USER_ONLY 48 49 hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr) 50 { 51 S390CPU *cpu = S390_CPU(cs); 52 CPUS390XState *env = &cpu->env; 53 target_ulong raddr; 54 int prot; 55 uint64_t asc = env->psw.mask & PSW_MASK_ASC; 56 uint64_t tec; 57 58 /* 31-Bit mode */ 59 if (!(env->psw.mask & PSW_MASK_64)) { 60 vaddr &= 0x7fffffff; 61 } 62 63 /* We want to read the code (e.g., see what we are single-stepping).*/ 64 if (asc != PSW_ASC_HOME) { 65 asc = PSW_ASC_PRIMARY; 66 } 67 68 /* 69 * We want to read code even if IEP is active. Use MMU_DATA_LOAD instead 70 * of MMU_INST_FETCH. 71 */ 72 if (mmu_translate(env, vaddr, MMU_DATA_LOAD, asc, &raddr, &prot, &tec)) { 73 return -1; 74 } 75 return raddr; 76 } 77 78 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr) 79 { 80 hwaddr phys_addr; 81 target_ulong page; 82 83 page = vaddr & TARGET_PAGE_MASK; 84 phys_addr = cpu_get_phys_page_debug(cs, page); 85 phys_addr += (vaddr & ~TARGET_PAGE_MASK); 86 87 return phys_addr; 88 } 89 90 static inline bool is_special_wait_psw(uint64_t psw_addr) 91 { 92 /* signal quiesce */ 93 return (psw_addr & 0xfffUL) == 0xfffUL; 94 } 95 96 void s390_handle_wait(S390CPU *cpu) 97 { 98 CPUState *cs = CPU(cpu); 99 100 if (s390_cpu_halt(cpu) == 0) { 101 #ifndef CONFIG_USER_ONLY 102 if (is_special_wait_psw(cpu->env.psw.addr)) { 103 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); 104 } else { 105 cpu->env.crash_reason = S390_CRASH_REASON_DISABLED_WAIT; 106 qemu_system_guest_panicked(cpu_get_crash_info(cs)); 107 } 108 #endif 109 } 110 } 111 112 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr) 113 { 114 uint64_t old_mask = env->psw.mask; 115 116 env->psw.addr = addr; 117 env->psw.mask = mask; 118 119 /* KVM will handle all WAITs and trigger a WAIT exit on disabled_wait */ 120 if (!tcg_enabled()) { 121 return; 122 } 123 env->cc_op = (mask >> 44) & 3; 124 125 if ((old_mask ^ mask) & PSW_MASK_PER) { 126 s390_cpu_recompute_watchpoints(env_cpu(env)); 127 } 128 129 if (mask & PSW_MASK_WAIT) { 130 s390_handle_wait(env_archcpu(env)); 131 } 132 } 133 134 uint64_t get_psw_mask(CPUS390XState *env) 135 { 136 uint64_t r = env->psw.mask; 137 138 if (tcg_enabled()) { 139 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, 140 env->cc_vr); 141 142 r &= ~PSW_MASK_CC; 143 assert(!(env->cc_op & ~3)); 144 r |= (uint64_t)env->cc_op << 44; 145 } 146 147 return r; 148 } 149 150 LowCore *cpu_map_lowcore(CPUS390XState *env) 151 { 152 LowCore *lowcore; 153 hwaddr len = sizeof(LowCore); 154 155 lowcore = cpu_physical_memory_map(env->psa, &len, true); 156 157 if (len < sizeof(LowCore)) { 158 cpu_abort(env_cpu(env), "Could not map lowcore\n"); 159 } 160 161 return lowcore; 162 } 163 164 void cpu_unmap_lowcore(LowCore *lowcore) 165 { 166 cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore)); 167 } 168 169 void do_restart_interrupt(CPUS390XState *env) 170 { 171 uint64_t mask, addr; 172 LowCore *lowcore; 173 174 lowcore = cpu_map_lowcore(env); 175 176 lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env)); 177 lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr); 178 mask = be64_to_cpu(lowcore->restart_new_psw.mask); 179 addr = be64_to_cpu(lowcore->restart_new_psw.addr); 180 181 cpu_unmap_lowcore(lowcore); 182 env->pending_int &= ~INTERRUPT_RESTART; 183 184 load_psw(env, mask, addr); 185 } 186 187 void s390_cpu_recompute_watchpoints(CPUState *cs) 188 { 189 const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS; 190 S390CPU *cpu = S390_CPU(cs); 191 CPUS390XState *env = &cpu->env; 192 193 /* We are called when the watchpoints have changed. First 194 remove them all. */ 195 cpu_watchpoint_remove_all(cs, BP_CPU); 196 197 /* Return if PER is not enabled */ 198 if (!(env->psw.mask & PSW_MASK_PER)) { 199 return; 200 } 201 202 /* Return if storage-alteration event is not enabled. */ 203 if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) { 204 return; 205 } 206 207 if (env->cregs[10] == 0 && env->cregs[11] == -1LL) { 208 /* We can't create a watchoint spanning the whole memory range, so 209 split it in two parts. */ 210 cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL); 211 cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL); 212 } else if (env->cregs[10] > env->cregs[11]) { 213 /* The address range loops, create two watchpoints. */ 214 cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10], 215 wp_flags, NULL); 216 cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL); 217 218 } else { 219 /* Default case, create a single watchpoint. */ 220 cpu_watchpoint_insert(cs, env->cregs[10], 221 env->cregs[11] - env->cregs[10] + 1, 222 wp_flags, NULL); 223 } 224 } 225 226 typedef struct SigpSaveArea { 227 uint64_t fprs[16]; /* 0x0000 */ 228 uint64_t grs[16]; /* 0x0080 */ 229 PSW psw; /* 0x0100 */ 230 uint8_t pad_0x0110[0x0118 - 0x0110]; /* 0x0110 */ 231 uint32_t prefix; /* 0x0118 */ 232 uint32_t fpc; /* 0x011c */ 233 uint8_t pad_0x0120[0x0124 - 0x0120]; /* 0x0120 */ 234 uint32_t todpr; /* 0x0124 */ 235 uint64_t cputm; /* 0x0128 */ 236 uint64_t ckc; /* 0x0130 */ 237 uint8_t pad_0x0138[0x0140 - 0x0138]; /* 0x0138 */ 238 uint32_t ars[16]; /* 0x0140 */ 239 uint64_t crs[16]; /* 0x0384 */ 240 } SigpSaveArea; 241 QEMU_BUILD_BUG_ON(sizeof(SigpSaveArea) != 512); 242 243 int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch) 244 { 245 static const uint8_t ar_id = 1; 246 SigpSaveArea *sa; 247 hwaddr len = sizeof(*sa); 248 int i; 249 250 /* For PVMs storing will occur when this cpu enters SIE again */ 251 if (s390_is_pv()) { 252 return 0; 253 } 254 255 sa = cpu_physical_memory_map(addr, &len, true); 256 if (!sa) { 257 return -EFAULT; 258 } 259 if (len != sizeof(*sa)) { 260 cpu_physical_memory_unmap(sa, len, 1, 0); 261 return -EFAULT; 262 } 263 264 if (store_arch) { 265 cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1); 266 } 267 for (i = 0; i < 16; ++i) { 268 sa->fprs[i] = cpu_to_be64(*get_freg(&cpu->env, i)); 269 } 270 for (i = 0; i < 16; ++i) { 271 sa->grs[i] = cpu_to_be64(cpu->env.regs[i]); 272 } 273 sa->psw.addr = cpu_to_be64(cpu->env.psw.addr); 274 sa->psw.mask = cpu_to_be64(get_psw_mask(&cpu->env)); 275 sa->prefix = cpu_to_be32(cpu->env.psa); 276 sa->fpc = cpu_to_be32(cpu->env.fpc); 277 sa->todpr = cpu_to_be32(cpu->env.todpr); 278 sa->cputm = cpu_to_be64(cpu->env.cputm); 279 sa->ckc = cpu_to_be64(cpu->env.ckc >> 8); 280 for (i = 0; i < 16; ++i) { 281 sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]); 282 } 283 for (i = 0; i < 16; ++i) { 284 sa->crs[i] = cpu_to_be64(cpu->env.cregs[i]); 285 } 286 287 cpu_physical_memory_unmap(sa, len, 1, len); 288 289 return 0; 290 } 291 292 typedef struct SigpAdtlSaveArea { 293 uint64_t vregs[32][2]; /* 0x0000 */ 294 uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */ 295 uint64_t gscb[4]; /* 0x0400 */ 296 uint8_t pad_0x0420[0x1000 - 0x0420]; /* 0x0420 */ 297 } SigpAdtlSaveArea; 298 QEMU_BUILD_BUG_ON(sizeof(SigpAdtlSaveArea) != 4096); 299 300 #define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */ 301 int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len) 302 { 303 SigpAdtlSaveArea *sa; 304 hwaddr save = len; 305 int i; 306 307 sa = cpu_physical_memory_map(addr, &save, true); 308 if (!sa) { 309 return -EFAULT; 310 } 311 if (save != len) { 312 cpu_physical_memory_unmap(sa, len, 1, 0); 313 return -EFAULT; 314 } 315 316 if (s390_has_feat(S390_FEAT_VECTOR)) { 317 for (i = 0; i < 32; i++) { 318 sa->vregs[i][0] = cpu_to_be64(cpu->env.vregs[i][0]); 319 sa->vregs[i][1] = cpu_to_be64(cpu->env.vregs[i][1]); 320 } 321 } 322 if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) { 323 for (i = 0; i < 4; i++) { 324 sa->gscb[i] = cpu_to_be64(cpu->env.gscb[i]); 325 } 326 } 327 328 cpu_physical_memory_unmap(sa, len, 1, len); 329 return 0; 330 } 331 #endif /* CONFIG_USER_ONLY */ 332 333 void s390_cpu_dump_state(CPUState *cs, FILE *f, int flags) 334 { 335 S390CPU *cpu = S390_CPU(cs); 336 CPUS390XState *env = &cpu->env; 337 int i; 338 339 if (env->cc_op > 3) { 340 qemu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n", 341 env->psw.mask, env->psw.addr, cc_name(env->cc_op)); 342 } else { 343 qemu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n", 344 env->psw.mask, env->psw.addr, env->cc_op); 345 } 346 347 for (i = 0; i < 16; i++) { 348 qemu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]); 349 if ((i % 4) == 3) { 350 qemu_fprintf(f, "\n"); 351 } else { 352 qemu_fprintf(f, " "); 353 } 354 } 355 356 if (flags & CPU_DUMP_FPU) { 357 if (s390_has_feat(S390_FEAT_VECTOR)) { 358 for (i = 0; i < 32; i++) { 359 qemu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64 "%c", 360 i, env->vregs[i][0], env->vregs[i][1], 361 i % 2 ? '\n' : ' '); 362 } 363 } else { 364 for (i = 0; i < 16; i++) { 365 qemu_fprintf(f, "F%02d=%016" PRIx64 "%c", 366 i, *get_freg(env, i), 367 (i % 4) == 3 ? '\n' : ' '); 368 } 369 } 370 } 371 372 #ifndef CONFIG_USER_ONLY 373 for (i = 0; i < 16; i++) { 374 qemu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]); 375 if ((i % 4) == 3) { 376 qemu_fprintf(f, "\n"); 377 } else { 378 qemu_fprintf(f, " "); 379 } 380 } 381 #endif 382 383 #ifdef DEBUG_INLINE_BRANCHES 384 for (i = 0; i < CC_OP_MAX; i++) { 385 qemu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i), 386 inline_branch_miss[i], inline_branch_hit[i]); 387 } 388 #endif 389 390 qemu_fprintf(f, "\n"); 391 } 392 393 const char *cc_name(enum cc_op cc_op) 394 { 395 static const char * const cc_names[] = { 396 [CC_OP_CONST0] = "CC_OP_CONST0", 397 [CC_OP_CONST1] = "CC_OP_CONST1", 398 [CC_OP_CONST2] = "CC_OP_CONST2", 399 [CC_OP_CONST3] = "CC_OP_CONST3", 400 [CC_OP_DYNAMIC] = "CC_OP_DYNAMIC", 401 [CC_OP_STATIC] = "CC_OP_STATIC", 402 [CC_OP_NZ] = "CC_OP_NZ", 403 [CC_OP_LTGT_32] = "CC_OP_LTGT_32", 404 [CC_OP_LTGT_64] = "CC_OP_LTGT_64", 405 [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32", 406 [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64", 407 [CC_OP_LTGT0_32] = "CC_OP_LTGT0_32", 408 [CC_OP_LTGT0_64] = "CC_OP_LTGT0_64", 409 [CC_OP_ADD_64] = "CC_OP_ADD_64", 410 [CC_OP_ADDU_64] = "CC_OP_ADDU_64", 411 [CC_OP_ADDC_64] = "CC_OP_ADDC_64", 412 [CC_OP_SUB_64] = "CC_OP_SUB_64", 413 [CC_OP_SUBU_64] = "CC_OP_SUBU_64", 414 [CC_OP_SUBB_64] = "CC_OP_SUBB_64", 415 [CC_OP_ABS_64] = "CC_OP_ABS_64", 416 [CC_OP_NABS_64] = "CC_OP_NABS_64", 417 [CC_OP_ADD_32] = "CC_OP_ADD_32", 418 [CC_OP_ADDU_32] = "CC_OP_ADDU_32", 419 [CC_OP_ADDC_32] = "CC_OP_ADDC_32", 420 [CC_OP_SUB_32] = "CC_OP_SUB_32", 421 [CC_OP_SUBU_32] = "CC_OP_SUBU_32", 422 [CC_OP_SUBB_32] = "CC_OP_SUBB_32", 423 [CC_OP_ABS_32] = "CC_OP_ABS_32", 424 [CC_OP_NABS_32] = "CC_OP_NABS_32", 425 [CC_OP_COMP_32] = "CC_OP_COMP_32", 426 [CC_OP_COMP_64] = "CC_OP_COMP_64", 427 [CC_OP_TM_32] = "CC_OP_TM_32", 428 [CC_OP_TM_64] = "CC_OP_TM_64", 429 [CC_OP_NZ_F32] = "CC_OP_NZ_F32", 430 [CC_OP_NZ_F64] = "CC_OP_NZ_F64", 431 [CC_OP_NZ_F128] = "CC_OP_NZ_F128", 432 [CC_OP_ICM] = "CC_OP_ICM", 433 [CC_OP_SLA_32] = "CC_OP_SLA_32", 434 [CC_OP_SLA_64] = "CC_OP_SLA_64", 435 [CC_OP_FLOGR] = "CC_OP_FLOGR", 436 [CC_OP_LCBB] = "CC_OP_LCBB", 437 [CC_OP_VC] = "CC_OP_VC", 438 }; 439 440 return cc_names[cc_op]; 441 } 442