1 /* 2 * s390x SIGP instruction handling 3 * 4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de> 5 * Copyright IBM Corp. 2012 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or later. 8 * See the COPYING file in the top-level directory. 9 */ 10 11 #include "qemu/osdep.h" 12 #include "cpu.h" 13 #include "s390x-internal.h" 14 #include "sysemu/hw_accel.h" 15 #include "sysemu/runstate.h" 16 #include "exec/address-spaces.h" 17 #include "exec/exec-all.h" 18 #include "sysemu/tcg.h" 19 #include "trace.h" 20 #include "qapi/qapi-types-machine.h" 21 22 QemuMutex qemu_sigp_mutex; 23 24 typedef struct SigpInfo { 25 uint64_t param; 26 int cc; 27 uint64_t *status_reg; 28 } SigpInfo; 29 30 static void set_sigp_status(SigpInfo *si, uint64_t status) 31 { 32 *si->status_reg &= 0xffffffff00000000ULL; 33 *si->status_reg |= status; 34 si->cc = SIGP_CC_STATUS_STORED; 35 } 36 37 static void sigp_sense(S390CPU *dst_cpu, SigpInfo *si) 38 { 39 uint8_t state = s390_cpu_get_state(dst_cpu); 40 bool ext_call = dst_cpu->env.pending_int & INTERRUPT_EXTERNAL_CALL; 41 uint64_t status = 0; 42 43 if (!tcg_enabled()) { 44 /* handled in KVM */ 45 set_sigp_status(si, SIGP_STAT_INVALID_ORDER); 46 return; 47 } 48 49 /* sensing without locks is racy, but it's the same for real hw */ 50 if (state != S390_CPU_STATE_STOPPED && !ext_call) { 51 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 52 } else { 53 if (ext_call) { 54 status |= SIGP_STAT_EXT_CALL_PENDING; 55 } 56 if (state == S390_CPU_STATE_STOPPED) { 57 status |= SIGP_STAT_STOPPED; 58 } 59 set_sigp_status(si, status); 60 } 61 } 62 63 static void sigp_external_call(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si) 64 { 65 int ret; 66 67 if (!tcg_enabled()) { 68 /* handled in KVM */ 69 set_sigp_status(si, SIGP_STAT_INVALID_ORDER); 70 return; 71 } 72 73 ret = cpu_inject_external_call(dst_cpu, src_cpu->env.core_id); 74 if (!ret) { 75 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 76 } else { 77 set_sigp_status(si, SIGP_STAT_EXT_CALL_PENDING); 78 } 79 } 80 81 static void sigp_emergency(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si) 82 { 83 if (!tcg_enabled()) { 84 /* handled in KVM */ 85 set_sigp_status(si, SIGP_STAT_INVALID_ORDER); 86 return; 87 } 88 89 cpu_inject_emergency_signal(dst_cpu, src_cpu->env.core_id); 90 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 91 } 92 93 static void sigp_start(CPUState *cs, run_on_cpu_data arg) 94 { 95 S390CPU *cpu = S390_CPU(cs); 96 SigpInfo *si = arg.host_ptr; 97 98 if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) { 99 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 100 return; 101 } 102 103 s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu); 104 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 105 } 106 107 static void sigp_stop(CPUState *cs, run_on_cpu_data arg) 108 { 109 S390CPU *cpu = S390_CPU(cs); 110 SigpInfo *si = arg.host_ptr; 111 112 if (s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING) { 113 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 114 return; 115 } 116 117 /* disabled wait - sleeping in user space */ 118 if (cs->halted) { 119 s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); 120 } else { 121 /* execute the stop function */ 122 cpu->env.sigp_order = SIGP_STOP; 123 cpu_inject_stop(cpu); 124 } 125 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 126 } 127 128 static void sigp_stop_and_store_status(CPUState *cs, run_on_cpu_data arg) 129 { 130 S390CPU *cpu = S390_CPU(cs); 131 SigpInfo *si = arg.host_ptr; 132 133 /* disabled wait - sleeping in user space */ 134 if (s390_cpu_get_state(cpu) == S390_CPU_STATE_OPERATING && cs->halted) { 135 s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); 136 } 137 138 switch (s390_cpu_get_state(cpu)) { 139 case S390_CPU_STATE_OPERATING: 140 cpu->env.sigp_order = SIGP_STOP_STORE_STATUS; 141 cpu_inject_stop(cpu); 142 /* store will be performed in do_stop_interrup() */ 143 break; 144 case S390_CPU_STATE_STOPPED: 145 /* already stopped, just store the status */ 146 cpu_synchronize_state(cs); 147 s390_store_status(cpu, S390_STORE_STATUS_DEF_ADDR, true); 148 break; 149 } 150 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 151 } 152 153 static void sigp_store_status_at_address(CPUState *cs, run_on_cpu_data arg) 154 { 155 S390CPU *cpu = S390_CPU(cs); 156 SigpInfo *si = arg.host_ptr; 157 uint32_t address = si->param & 0x7ffffe00u; 158 159 /* cpu has to be stopped */ 160 if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) { 161 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); 162 return; 163 } 164 165 cpu_synchronize_state(cs); 166 167 if (s390_store_status(cpu, address, false)) { 168 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); 169 return; 170 } 171 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 172 } 173 174 #define ADTL_SAVE_LC_MASK 0xfUL 175 static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg) 176 { 177 S390CPU *cpu = S390_CPU(cs); 178 SigpInfo *si = arg.host_ptr; 179 uint8_t lc = si->param & ADTL_SAVE_LC_MASK; 180 hwaddr addr = si->param & ~ADTL_SAVE_LC_MASK; 181 hwaddr len = 1UL << (lc ? lc : 10); 182 183 if (!s390_has_feat(S390_FEAT_VECTOR) && 184 !s390_has_feat(S390_FEAT_GUARDED_STORAGE)) { 185 set_sigp_status(si, SIGP_STAT_INVALID_ORDER); 186 return; 187 } 188 189 /* cpu has to be stopped */ 190 if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) { 191 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); 192 return; 193 } 194 195 /* address must be aligned to length */ 196 if (addr & (len - 1)) { 197 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); 198 return; 199 } 200 201 /* no GS: only lc == 0 is valid */ 202 if (!s390_has_feat(S390_FEAT_GUARDED_STORAGE) && 203 lc != 0) { 204 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); 205 return; 206 } 207 208 /* GS: 0, 10, 11, 12 are valid */ 209 if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && 210 lc != 0 && 211 lc != 10 && 212 lc != 11 && 213 lc != 12) { 214 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); 215 return; 216 } 217 218 cpu_synchronize_state(cs); 219 220 if (s390_store_adtl_status(cpu, addr, len)) { 221 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); 222 return; 223 } 224 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 225 } 226 227 static void sigp_restart(CPUState *cs, run_on_cpu_data arg) 228 { 229 S390CPU *cpu = S390_CPU(cs); 230 SigpInfo *si = arg.host_ptr; 231 232 switch (s390_cpu_get_state(cpu)) { 233 case S390_CPU_STATE_STOPPED: 234 /* the restart irq has to be delivered prior to any other pending irq */ 235 cpu_synchronize_state(cs); 236 /* 237 * Set OPERATING (and unhalting) before loading the restart PSW. 238 * s390_cpu_set_psw() will then properly halt the CPU again if 239 * necessary (TCG). 240 */ 241 s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu); 242 do_restart_interrupt(&cpu->env); 243 break; 244 case S390_CPU_STATE_OPERATING: 245 cpu_inject_restart(cpu); 246 break; 247 } 248 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 249 } 250 251 static void sigp_initial_cpu_reset(CPUState *cs, run_on_cpu_data arg) 252 { 253 S390CPU *cpu = S390_CPU(cs); 254 S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); 255 SigpInfo *si = arg.host_ptr; 256 257 cpu_synchronize_state(cs); 258 scc->reset(cs, S390_CPU_RESET_INITIAL); 259 cpu_synchronize_post_reset(cs); 260 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 261 } 262 263 static void sigp_cpu_reset(CPUState *cs, run_on_cpu_data arg) 264 { 265 S390CPU *cpu = S390_CPU(cs); 266 S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); 267 SigpInfo *si = arg.host_ptr; 268 269 cpu_synchronize_state(cs); 270 scc->reset(cs, S390_CPU_RESET_NORMAL); 271 cpu_synchronize_post_reset(cs); 272 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 273 } 274 275 static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg) 276 { 277 S390CPU *cpu = S390_CPU(cs); 278 SigpInfo *si = arg.host_ptr; 279 uint32_t addr = si->param & 0x7fffe000u; 280 281 cpu_synchronize_state(cs); 282 283 if (!address_space_access_valid(&address_space_memory, addr, 284 sizeof(struct LowCore), false, 285 MEMTXATTRS_UNSPECIFIED)) { 286 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); 287 return; 288 } 289 290 /* cpu has to be stopped */ 291 if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) { 292 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); 293 return; 294 } 295 296 cpu->env.psa = addr; 297 tlb_flush(cs); 298 cpu_synchronize_post_init(cs); 299 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 300 } 301 302 static void sigp_cond_emergency(S390CPU *src_cpu, S390CPU *dst_cpu, 303 SigpInfo *si) 304 { 305 const uint64_t psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; 306 uint16_t p_asn, s_asn, asn; 307 uint64_t psw_addr, psw_mask; 308 bool idle; 309 310 if (!tcg_enabled()) { 311 /* handled in KVM */ 312 set_sigp_status(si, SIGP_STAT_INVALID_ORDER); 313 return; 314 } 315 316 /* this looks racy, but these values are only used when STOPPED */ 317 idle = CPU(dst_cpu)->halted; 318 psw_addr = dst_cpu->env.psw.addr; 319 psw_mask = dst_cpu->env.psw.mask; 320 asn = si->param; 321 p_asn = dst_cpu->env.cregs[4] & 0xffff; /* Primary ASN */ 322 s_asn = dst_cpu->env.cregs[3] & 0xffff; /* Secondary ASN */ 323 324 if (s390_cpu_get_state(dst_cpu) != S390_CPU_STATE_STOPPED || 325 (psw_mask & psw_int_mask) != psw_int_mask || 326 (idle && psw_addr != 0) || 327 (!idle && (asn == p_asn || asn == s_asn))) { 328 cpu_inject_emergency_signal(dst_cpu, src_cpu->env.core_id); 329 } else { 330 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); 331 } 332 333 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 334 } 335 336 static void sigp_sense_running(S390CPU *dst_cpu, SigpInfo *si) 337 { 338 if (!tcg_enabled()) { 339 /* handled in KVM */ 340 set_sigp_status(si, SIGP_STAT_INVALID_ORDER); 341 return; 342 } 343 344 /* sensing without locks is racy, but it's the same for real hw */ 345 if (!s390_has_feat(S390_FEAT_SENSE_RUNNING_STATUS)) { 346 set_sigp_status(si, SIGP_STAT_INVALID_ORDER); 347 return; 348 } 349 350 /* If halted (which includes also STOPPED), it is not running */ 351 if (CPU(dst_cpu)->halted) { 352 set_sigp_status(si, SIGP_STAT_NOT_RUNNING); 353 } else { 354 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 355 } 356 } 357 358 static int handle_sigp_single_dst(S390CPU *cpu, S390CPU *dst_cpu, uint8_t order, 359 uint64_t param, uint64_t *status_reg) 360 { 361 SigpInfo si = { 362 .param = param, 363 .status_reg = status_reg, 364 }; 365 366 /* cpu available? */ 367 if (dst_cpu == NULL) { 368 return SIGP_CC_NOT_OPERATIONAL; 369 } 370 371 /* only resets can break pending orders */ 372 if (dst_cpu->env.sigp_order != 0 && 373 order != SIGP_CPU_RESET && 374 order != SIGP_INITIAL_CPU_RESET) { 375 return SIGP_CC_BUSY; 376 } 377 378 switch (order) { 379 case SIGP_SENSE: 380 sigp_sense(dst_cpu, &si); 381 break; 382 case SIGP_EXTERNAL_CALL: 383 sigp_external_call(cpu, dst_cpu, &si); 384 break; 385 case SIGP_EMERGENCY: 386 sigp_emergency(cpu, dst_cpu, &si); 387 break; 388 case SIGP_START: 389 run_on_cpu(CPU(dst_cpu), sigp_start, RUN_ON_CPU_HOST_PTR(&si)); 390 break; 391 case SIGP_STOP: 392 run_on_cpu(CPU(dst_cpu), sigp_stop, RUN_ON_CPU_HOST_PTR(&si)); 393 break; 394 case SIGP_RESTART: 395 run_on_cpu(CPU(dst_cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si)); 396 break; 397 case SIGP_STOP_STORE_STATUS: 398 run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, RUN_ON_CPU_HOST_PTR(&si)); 399 break; 400 case SIGP_STORE_STATUS_ADDR: 401 run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, RUN_ON_CPU_HOST_PTR(&si)); 402 break; 403 case SIGP_STORE_ADTL_STATUS: 404 run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, RUN_ON_CPU_HOST_PTR(&si)); 405 break; 406 case SIGP_SET_PREFIX: 407 run_on_cpu(CPU(dst_cpu), sigp_set_prefix, RUN_ON_CPU_HOST_PTR(&si)); 408 break; 409 case SIGP_INITIAL_CPU_RESET: 410 run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, RUN_ON_CPU_HOST_PTR(&si)); 411 break; 412 case SIGP_CPU_RESET: 413 run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, RUN_ON_CPU_HOST_PTR(&si)); 414 break; 415 case SIGP_COND_EMERGENCY: 416 sigp_cond_emergency(cpu, dst_cpu, &si); 417 break; 418 case SIGP_SENSE_RUNNING: 419 sigp_sense_running(dst_cpu, &si); 420 break; 421 default: 422 set_sigp_status(&si, SIGP_STAT_INVALID_ORDER); 423 } 424 425 return si.cc; 426 } 427 428 static int sigp_set_architecture(S390CPU *cpu, uint32_t param, 429 uint64_t *status_reg) 430 { 431 *status_reg &= 0xffffffff00000000ULL; 432 433 /* Reject set arch order, with czam we're always in z/Arch mode. */ 434 *status_reg |= SIGP_STAT_INVALID_PARAMETER; 435 return SIGP_CC_STATUS_STORED; 436 } 437 438 int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3) 439 { 440 uint64_t *status_reg = &env->regs[r1]; 441 uint64_t param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1]; 442 S390CPU *cpu = env_archcpu(env); 443 S390CPU *dst_cpu = NULL; 444 int ret; 445 446 if (qemu_mutex_trylock(&qemu_sigp_mutex)) { 447 ret = SIGP_CC_BUSY; 448 goto out; 449 } 450 451 switch (order) { 452 case SIGP_SET_ARCH: 453 ret = sigp_set_architecture(cpu, param, status_reg); 454 break; 455 default: 456 /* all other sigp orders target a single vcpu */ 457 dst_cpu = s390_cpu_addr2state(env->regs[r3]); 458 ret = handle_sigp_single_dst(cpu, dst_cpu, order, param, status_reg); 459 } 460 qemu_mutex_unlock(&qemu_sigp_mutex); 461 462 out: 463 trace_sigp_finished(order, CPU(cpu)->cpu_index, 464 dst_cpu ? CPU(dst_cpu)->cpu_index : -1, ret); 465 g_assert(ret >= 0); 466 467 return ret; 468 } 469 470 int s390_cpu_restart(S390CPU *cpu) 471 { 472 SigpInfo si = {}; 473 474 run_on_cpu(CPU(cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si)); 475 return 0; 476 } 477 478 void do_stop_interrupt(CPUS390XState *env) 479 { 480 S390CPU *cpu = env_archcpu(env); 481 482 if (s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu) == 0) { 483 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); 484 } 485 if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) { 486 s390_store_status(cpu, S390_STORE_STATUS_DEF_ADDR, true); 487 } 488 env->sigp_order = 0; 489 env->pending_int &= ~INTERRUPT_STOP; 490 } 491 492 void s390_init_sigp(void) 493 { 494 qemu_mutex_init(&qemu_sigp_mutex); 495 } 496