1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * handling interprocessor communication 4 * 5 * Copyright IBM Corp. 2008, 2013 6 * 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com> 9 * Christian Ehrhardt <ehrhardt@de.ibm.com> 10 */ 11 12 #include <linux/kvm.h> 13 #include <linux/kvm_host.h> 14 #include <linux/slab.h> 15 #include <asm/sigp.h> 16 #include "gaccess.h" 17 #include "kvm-s390.h" 18 #include "trace.h" 19 20 static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, 21 u64 *reg) 22 { 23 struct kvm_s390_local_interrupt *li; 24 int cpuflags; 25 int rc; 26 int ext_call_pending; 27 28 li = &dst_vcpu->arch.local_int; 29 30 cpuflags = atomic_read(li->cpuflags); 31 ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu); 32 if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending) 33 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 34 else { 35 *reg &= 0xffffffff00000000UL; 36 if (ext_call_pending) 37 *reg |= SIGP_STATUS_EXT_CALL_PENDING; 38 if (cpuflags & CPUSTAT_STOPPED) 39 *reg |= SIGP_STATUS_STOPPED; 40 rc = SIGP_CC_STATUS_STORED; 41 } 42 43 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id, 44 rc); 45 return rc; 46 } 47 48 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, 49 struct kvm_vcpu *dst_vcpu) 50 { 51 struct kvm_s390_irq irq = { 52 .type = KVM_S390_INT_EMERGENCY, 53 .u.emerg.code = vcpu->vcpu_id, 54 }; 55 int rc = 0; 56 57 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 58 if (!rc) 59 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", 60 dst_vcpu->vcpu_id); 61 62 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; 63 } 64 65 static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) 66 { 67 return __inject_sigp_emergency(vcpu, dst_vcpu); 68 } 69 70 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, 71 struct kvm_vcpu *dst_vcpu, 72 u16 asn, u64 *reg) 73 { 74 const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; 75 u16 p_asn, s_asn; 76 psw_t *psw; 77 bool idle; 78 79 idle = is_vcpu_idle(vcpu); 80 psw = &dst_vcpu->arch.sie_block->gpsw; 81 p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ 82 s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ 83 84 /* Inject the emergency signal? */ 85 if (!is_vcpu_stopped(vcpu) 86 || (psw->mask & psw_int_mask) != psw_int_mask 87 || (idle && psw->addr != 0) 88 || (!idle && (asn == p_asn || asn == s_asn))) { 89 return __inject_sigp_emergency(vcpu, dst_vcpu); 90 } else { 91 *reg &= 0xffffffff00000000UL; 92 *reg |= SIGP_STATUS_INCORRECT_STATE; 93 return SIGP_CC_STATUS_STORED; 94 } 95 } 96 97 static int __sigp_external_call(struct kvm_vcpu *vcpu, 98 struct kvm_vcpu *dst_vcpu, u64 *reg) 99 { 100 struct kvm_s390_irq irq = { 101 .type = KVM_S390_INT_EXTERNAL_CALL, 102 .u.extcall.code = vcpu->vcpu_id, 103 }; 104 int rc; 105 106 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 107 if (rc == -EBUSY) { 108 *reg &= 0xffffffff00000000UL; 109 *reg |= SIGP_STATUS_EXT_CALL_PENDING; 110 return SIGP_CC_STATUS_STORED; 111 } else if (rc == 0) { 112 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", 113 dst_vcpu->vcpu_id); 114 } 115 116 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; 117 } 118 119 static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) 120 { 121 struct kvm_s390_irq irq = { 122 .type = KVM_S390_SIGP_STOP, 123 }; 124 int rc; 125 126 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 127 if (rc == -EBUSY) 128 rc = SIGP_CC_BUSY; 129 else if (rc == 0) 130 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", 131 dst_vcpu->vcpu_id); 132 133 return rc; 134 } 135 136 static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu, 137 struct kvm_vcpu *dst_vcpu, u64 *reg) 138 { 139 struct kvm_s390_irq irq = { 140 .type = KVM_S390_SIGP_STOP, 141 .u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS, 142 }; 143 int rc; 144 145 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 146 if (rc == -EBUSY) 147 rc = SIGP_CC_BUSY; 148 else if (rc == 0) 149 VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x", 150 dst_vcpu->vcpu_id); 151 152 return rc; 153 } 154 155 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter, 156 u64 *status_reg) 157 { 158 unsigned int i; 159 struct kvm_vcpu *v; 160 bool all_stopped = true; 161 162 kvm_for_each_vcpu(i, v, vcpu->kvm) { 163 if (v == vcpu) 164 continue; 165 if (!is_vcpu_stopped(v)) 166 all_stopped = false; 167 } 168 169 *status_reg &= 0xffffffff00000000UL; 170 171 /* Reject set arch order, with czam we're always in z/Arch mode. */ 172 *status_reg |= (all_stopped ? SIGP_STATUS_INVALID_PARAMETER : 173 SIGP_STATUS_INCORRECT_STATE); 174 return SIGP_CC_STATUS_STORED; 175 } 176 177 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, 178 u32 address, u64 *reg) 179 { 180 struct kvm_s390_irq irq = { 181 .type = KVM_S390_SIGP_SET_PREFIX, 182 .u.prefix.address = address & 0x7fffe000u, 183 }; 184 int rc; 185 186 /* 187 * Make sure the new value is valid memory. We only need to check the 188 * first page, since address is 8k aligned and memory pieces are always 189 * at least 1MB aligned and have at least a size of 1MB. 190 */ 191 if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) { 192 *reg &= 0xffffffff00000000UL; 193 *reg |= SIGP_STATUS_INVALID_PARAMETER; 194 return SIGP_CC_STATUS_STORED; 195 } 196 197 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 198 if (rc == -EBUSY) { 199 *reg &= 0xffffffff00000000UL; 200 *reg |= SIGP_STATUS_INCORRECT_STATE; 201 return SIGP_CC_STATUS_STORED; 202 } 203 204 return rc; 205 } 206 207 static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, 208 struct kvm_vcpu *dst_vcpu, 209 u32 addr, u64 *reg) 210 { 211 int flags; 212 int rc; 213 214 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); 215 if (!(flags & CPUSTAT_STOPPED)) { 216 *reg &= 0xffffffff00000000UL; 217 *reg |= SIGP_STATUS_INCORRECT_STATE; 218 return SIGP_CC_STATUS_STORED; 219 } 220 221 addr &= 0x7ffffe00; 222 rc = kvm_s390_store_status_unloaded(dst_vcpu, addr); 223 if (rc == -EFAULT) { 224 *reg &= 0xffffffff00000000UL; 225 *reg |= SIGP_STATUS_INVALID_PARAMETER; 226 rc = SIGP_CC_STATUS_STORED; 227 } 228 return rc; 229 } 230 231 static int __sigp_sense_running(struct kvm_vcpu *vcpu, 232 struct kvm_vcpu *dst_vcpu, u64 *reg) 233 { 234 struct kvm_s390_local_interrupt *li; 235 int rc; 236 237 if (!test_kvm_facility(vcpu->kvm, 9)) { 238 *reg &= 0xffffffff00000000UL; 239 *reg |= SIGP_STATUS_INVALID_ORDER; 240 return SIGP_CC_STATUS_STORED; 241 } 242 243 li = &dst_vcpu->arch.local_int; 244 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { 245 /* running */ 246 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 247 } else { 248 /* not running */ 249 *reg &= 0xffffffff00000000UL; 250 *reg |= SIGP_STATUS_NOT_RUNNING; 251 rc = SIGP_CC_STATUS_STORED; 252 } 253 254 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", 255 dst_vcpu->vcpu_id, rc); 256 257 return rc; 258 } 259 260 static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu, 261 struct kvm_vcpu *dst_vcpu, u8 order_code) 262 { 263 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; 264 /* handle (RE)START in user space */ 265 int rc = -EOPNOTSUPP; 266 267 /* make sure we don't race with STOP irq injection */ 268 spin_lock(&li->lock); 269 if (kvm_s390_is_stop_irq_pending(dst_vcpu)) 270 rc = SIGP_CC_BUSY; 271 spin_unlock(&li->lock); 272 273 return rc; 274 } 275 276 static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu, 277 struct kvm_vcpu *dst_vcpu, u8 order_code) 278 { 279 /* handle (INITIAL) CPU RESET in user space */ 280 return -EOPNOTSUPP; 281 } 282 283 static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu, 284 struct kvm_vcpu *dst_vcpu) 285 { 286 /* handle unknown orders in user space */ 287 return -EOPNOTSUPP; 288 } 289 290 static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, 291 u16 cpu_addr, u32 parameter, u64 *status_reg) 292 { 293 int rc; 294 struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); 295 296 if (!dst_vcpu) 297 return SIGP_CC_NOT_OPERATIONAL; 298 299 switch (order_code) { 300 case SIGP_SENSE: 301 vcpu->stat.instruction_sigp_sense++; 302 rc = __sigp_sense(vcpu, dst_vcpu, status_reg); 303 break; 304 case SIGP_EXTERNAL_CALL: 305 vcpu->stat.instruction_sigp_external_call++; 306 rc = __sigp_external_call(vcpu, dst_vcpu, status_reg); 307 break; 308 case SIGP_EMERGENCY_SIGNAL: 309 vcpu->stat.instruction_sigp_emergency++; 310 rc = __sigp_emergency(vcpu, dst_vcpu); 311 break; 312 case SIGP_STOP: 313 vcpu->stat.instruction_sigp_stop++; 314 rc = __sigp_stop(vcpu, dst_vcpu); 315 break; 316 case SIGP_STOP_AND_STORE_STATUS: 317 vcpu->stat.instruction_sigp_stop_store_status++; 318 rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg); 319 break; 320 case SIGP_STORE_STATUS_AT_ADDRESS: 321 vcpu->stat.instruction_sigp_store_status++; 322 rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter, 323 status_reg); 324 break; 325 case SIGP_SET_PREFIX: 326 vcpu->stat.instruction_sigp_prefix++; 327 rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg); 328 break; 329 case SIGP_COND_EMERGENCY_SIGNAL: 330 vcpu->stat.instruction_sigp_cond_emergency++; 331 rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter, 332 status_reg); 333 break; 334 case SIGP_SENSE_RUNNING: 335 vcpu->stat.instruction_sigp_sense_running++; 336 rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg); 337 break; 338 case SIGP_START: 339 vcpu->stat.instruction_sigp_start++; 340 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); 341 break; 342 case SIGP_RESTART: 343 vcpu->stat.instruction_sigp_restart++; 344 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); 345 break; 346 case SIGP_INITIAL_CPU_RESET: 347 vcpu->stat.instruction_sigp_init_cpu_reset++; 348 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); 349 break; 350 case SIGP_CPU_RESET: 351 vcpu->stat.instruction_sigp_cpu_reset++; 352 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); 353 break; 354 default: 355 vcpu->stat.instruction_sigp_unknown++; 356 rc = __prepare_sigp_unknown(vcpu, dst_vcpu); 357 } 358 359 if (rc == -EOPNOTSUPP) 360 VCPU_EVENT(vcpu, 4, 361 "sigp order %u -> cpu %x: handled in user space", 362 order_code, dst_vcpu->vcpu_id); 363 364 return rc; 365 } 366 367 static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code, 368 u16 cpu_addr) 369 { 370 if (!vcpu->kvm->arch.user_sigp) 371 return 0; 372 373 switch (order_code) { 374 case SIGP_SENSE: 375 case SIGP_EXTERNAL_CALL: 376 case SIGP_EMERGENCY_SIGNAL: 377 case SIGP_COND_EMERGENCY_SIGNAL: 378 case SIGP_SENSE_RUNNING: 379 return 0; 380 /* update counters as we're directly dropping to user space */ 381 case SIGP_STOP: 382 vcpu->stat.instruction_sigp_stop++; 383 break; 384 case SIGP_STOP_AND_STORE_STATUS: 385 vcpu->stat.instruction_sigp_stop_store_status++; 386 break; 387 case SIGP_STORE_STATUS_AT_ADDRESS: 388 vcpu->stat.instruction_sigp_store_status++; 389 break; 390 case SIGP_STORE_ADDITIONAL_STATUS: 391 vcpu->stat.instruction_sigp_store_adtl_status++; 392 break; 393 case SIGP_SET_PREFIX: 394 vcpu->stat.instruction_sigp_prefix++; 395 break; 396 case SIGP_START: 397 vcpu->stat.instruction_sigp_start++; 398 break; 399 case SIGP_RESTART: 400 vcpu->stat.instruction_sigp_restart++; 401 break; 402 case SIGP_INITIAL_CPU_RESET: 403 vcpu->stat.instruction_sigp_init_cpu_reset++; 404 break; 405 case SIGP_CPU_RESET: 406 vcpu->stat.instruction_sigp_cpu_reset++; 407 break; 408 default: 409 vcpu->stat.instruction_sigp_unknown++; 410 } 411 VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace", 412 order_code, cpu_addr); 413 414 return 1; 415 } 416 417 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) 418 { 419 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 420 int r3 = vcpu->arch.sie_block->ipa & 0x000f; 421 u32 parameter; 422 u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; 423 u8 order_code; 424 int rc; 425 426 /* sigp in userspace can exit */ 427 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 428 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 429 430 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); 431 if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr)) 432 return -EOPNOTSUPP; 433 434 if (r1 % 2) 435 parameter = vcpu->run->s.regs.gprs[r1]; 436 else 437 parameter = vcpu->run->s.regs.gprs[r1 + 1]; 438 439 trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter); 440 switch (order_code) { 441 case SIGP_SET_ARCHITECTURE: 442 vcpu->stat.instruction_sigp_arch++; 443 rc = __sigp_set_arch(vcpu, parameter, 444 &vcpu->run->s.regs.gprs[r1]); 445 break; 446 default: 447 rc = handle_sigp_dst(vcpu, order_code, cpu_addr, 448 parameter, 449 &vcpu->run->s.regs.gprs[r1]); 450 } 451 452 if (rc < 0) 453 return rc; 454 455 kvm_s390_set_psw_cc(vcpu, rc); 456 return 0; 457 } 458 459 /* 460 * Handle SIGP partial execution interception. 461 * 462 * This interception will occur at the source cpu when a source cpu sends an 463 * external call to a target cpu and the target cpu has the WAIT bit set in 464 * its cpuflags. Interception will occurr after the interrupt indicator bits at 465 * the target cpu have been set. All error cases will lead to instruction 466 * interception, therefore nothing is to be checked or prepared. 467 */ 468 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) 469 { 470 int r3 = vcpu->arch.sie_block->ipa & 0x000f; 471 u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; 472 struct kvm_vcpu *dest_vcpu; 473 u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); 474 475 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); 476 477 if (order_code == SIGP_EXTERNAL_CALL) { 478 dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); 479 BUG_ON(dest_vcpu == NULL); 480 481 kvm_s390_vcpu_wakeup(dest_vcpu); 482 kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); 483 return 0; 484 } 485 486 return -EOPNOTSUPP; 487 } 488