1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * handling interprocessor communication 4 * 5 * Copyright IBM Corp. 2008, 2013 6 * 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com> 9 * Christian Ehrhardt <ehrhardt@de.ibm.com> 10 */ 11 12 #include <linux/kvm.h> 13 #include <linux/kvm_host.h> 14 #include <linux/slab.h> 15 #include <asm/sigp.h> 16 #include "gaccess.h" 17 #include "kvm-s390.h" 18 #include "trace.h" 19 20 static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, 21 u64 *reg) 22 { 23 const bool stopped = kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED); 24 int rc; 25 int ext_call_pending; 26 27 ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu); 28 if (!stopped && !ext_call_pending) 29 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 30 else { 31 *reg &= 0xffffffff00000000UL; 32 if (ext_call_pending) 33 *reg |= SIGP_STATUS_EXT_CALL_PENDING; 34 if (stopped) 35 *reg |= SIGP_STATUS_STOPPED; 36 rc = SIGP_CC_STATUS_STORED; 37 } 38 39 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id, 40 rc); 41 return rc; 42 } 43 44 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, 45 struct kvm_vcpu *dst_vcpu) 46 { 47 struct kvm_s390_irq irq = { 48 .type = KVM_S390_INT_EMERGENCY, 49 .u.emerg.code = vcpu->vcpu_id, 50 }; 51 int rc = 0; 52 53 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 54 if (!rc) 55 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", 56 dst_vcpu->vcpu_id); 57 58 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; 59 } 60 61 static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) 62 { 63 return __inject_sigp_emergency(vcpu, dst_vcpu); 64 } 65 66 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, 67 struct kvm_vcpu *dst_vcpu, 68 u16 asn, u64 *reg) 69 { 70 const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; 71 u16 p_asn, s_asn; 72 psw_t *psw; 73 bool idle; 74 75 idle = is_vcpu_idle(vcpu); 76 psw = &dst_vcpu->arch.sie_block->gpsw; 77 p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ 78 s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ 79 80 /* Inject the emergency signal? */ 81 if (!is_vcpu_stopped(vcpu) 82 || (psw->mask & psw_int_mask) != psw_int_mask 83 || (idle && psw->addr != 0) 84 || (!idle && (asn == p_asn || asn == s_asn))) { 85 return __inject_sigp_emergency(vcpu, dst_vcpu); 86 } else { 87 *reg &= 0xffffffff00000000UL; 88 *reg |= SIGP_STATUS_INCORRECT_STATE; 89 return SIGP_CC_STATUS_STORED; 90 } 91 } 92 93 static int __sigp_external_call(struct kvm_vcpu *vcpu, 94 struct kvm_vcpu *dst_vcpu, u64 *reg) 95 { 96 struct kvm_s390_irq irq = { 97 .type = KVM_S390_INT_EXTERNAL_CALL, 98 .u.extcall.code = vcpu->vcpu_id, 99 }; 100 int rc; 101 102 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 103 if (rc == -EBUSY) { 104 *reg &= 0xffffffff00000000UL; 105 *reg |= SIGP_STATUS_EXT_CALL_PENDING; 106 return SIGP_CC_STATUS_STORED; 107 } else if (rc == 0) { 108 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", 109 dst_vcpu->vcpu_id); 110 } 111 112 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; 113 } 114 115 static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) 116 { 117 struct kvm_s390_irq irq = { 118 .type = KVM_S390_SIGP_STOP, 119 }; 120 int rc; 121 122 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 123 if (rc == -EBUSY) 124 rc = SIGP_CC_BUSY; 125 else if (rc == 0) 126 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", 127 dst_vcpu->vcpu_id); 128 129 return rc; 130 } 131 132 static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu, 133 struct kvm_vcpu *dst_vcpu, u64 *reg) 134 { 135 struct kvm_s390_irq irq = { 136 .type = KVM_S390_SIGP_STOP, 137 .u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS, 138 }; 139 int rc; 140 141 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 142 if (rc == -EBUSY) 143 rc = SIGP_CC_BUSY; 144 else if (rc == 0) 145 VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x", 146 dst_vcpu->vcpu_id); 147 148 return rc; 149 } 150 151 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter, 152 u64 *status_reg) 153 { 154 unsigned int i; 155 struct kvm_vcpu *v; 156 bool all_stopped = true; 157 158 kvm_for_each_vcpu(i, v, vcpu->kvm) { 159 if (v == vcpu) 160 continue; 161 if (!is_vcpu_stopped(v)) 162 all_stopped = false; 163 } 164 165 *status_reg &= 0xffffffff00000000UL; 166 167 /* Reject set arch order, with czam we're always in z/Arch mode. */ 168 *status_reg |= (all_stopped ? SIGP_STATUS_INVALID_PARAMETER : 169 SIGP_STATUS_INCORRECT_STATE); 170 return SIGP_CC_STATUS_STORED; 171 } 172 173 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, 174 u32 address, u64 *reg) 175 { 176 struct kvm_s390_irq irq = { 177 .type = KVM_S390_SIGP_SET_PREFIX, 178 .u.prefix.address = address & 0x7fffe000u, 179 }; 180 int rc; 181 182 /* 183 * Make sure the new value is valid memory. We only need to check the 184 * first page, since address is 8k aligned and memory pieces are always 185 * at least 1MB aligned and have at least a size of 1MB. 186 */ 187 if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) { 188 *reg &= 0xffffffff00000000UL; 189 *reg |= SIGP_STATUS_INVALID_PARAMETER; 190 return SIGP_CC_STATUS_STORED; 191 } 192 193 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 194 if (rc == -EBUSY) { 195 *reg &= 0xffffffff00000000UL; 196 *reg |= SIGP_STATUS_INCORRECT_STATE; 197 return SIGP_CC_STATUS_STORED; 198 } 199 200 return rc; 201 } 202 203 static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, 204 struct kvm_vcpu *dst_vcpu, 205 u32 addr, u64 *reg) 206 { 207 int rc; 208 209 if (!kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED)) { 210 *reg &= 0xffffffff00000000UL; 211 *reg |= SIGP_STATUS_INCORRECT_STATE; 212 return SIGP_CC_STATUS_STORED; 213 } 214 215 addr &= 0x7ffffe00; 216 rc = kvm_s390_store_status_unloaded(dst_vcpu, addr); 217 if (rc == -EFAULT) { 218 *reg &= 0xffffffff00000000UL; 219 *reg |= SIGP_STATUS_INVALID_PARAMETER; 220 rc = SIGP_CC_STATUS_STORED; 221 } 222 return rc; 223 } 224 225 static int __sigp_sense_running(struct kvm_vcpu *vcpu, 226 struct kvm_vcpu *dst_vcpu, u64 *reg) 227 { 228 int rc; 229 230 if (!test_kvm_facility(vcpu->kvm, 9)) { 231 *reg &= 0xffffffff00000000UL; 232 *reg |= SIGP_STATUS_INVALID_ORDER; 233 return SIGP_CC_STATUS_STORED; 234 } 235 236 if (kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_RUNNING)) { 237 /* running */ 238 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 239 } else { 240 /* not running */ 241 *reg &= 0xffffffff00000000UL; 242 *reg |= SIGP_STATUS_NOT_RUNNING; 243 rc = SIGP_CC_STATUS_STORED; 244 } 245 246 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", 247 dst_vcpu->vcpu_id, rc); 248 249 return rc; 250 } 251 252 static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu, 253 struct kvm_vcpu *dst_vcpu, u8 order_code) 254 { 255 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; 256 /* handle (RE)START in user space */ 257 int rc = -EOPNOTSUPP; 258 259 /* make sure we don't race with STOP irq injection */ 260 spin_lock(&li->lock); 261 if (kvm_s390_is_stop_irq_pending(dst_vcpu)) 262 rc = SIGP_CC_BUSY; 263 spin_unlock(&li->lock); 264 265 return rc; 266 } 267 268 static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu, 269 struct kvm_vcpu *dst_vcpu, u8 order_code) 270 { 271 /* handle (INITIAL) CPU RESET in user space */ 272 return -EOPNOTSUPP; 273 } 274 275 static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu, 276 struct kvm_vcpu *dst_vcpu) 277 { 278 /* handle unknown orders in user space */ 279 return -EOPNOTSUPP; 280 } 281 282 static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, 283 u16 cpu_addr, u32 parameter, u64 *status_reg) 284 { 285 int rc; 286 struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); 287 288 if (!dst_vcpu) 289 return SIGP_CC_NOT_OPERATIONAL; 290 291 switch (order_code) { 292 case SIGP_SENSE: 293 vcpu->stat.instruction_sigp_sense++; 294 rc = __sigp_sense(vcpu, dst_vcpu, status_reg); 295 break; 296 case SIGP_EXTERNAL_CALL: 297 vcpu->stat.instruction_sigp_external_call++; 298 rc = __sigp_external_call(vcpu, dst_vcpu, status_reg); 299 break; 300 case SIGP_EMERGENCY_SIGNAL: 301 vcpu->stat.instruction_sigp_emergency++; 302 rc = __sigp_emergency(vcpu, dst_vcpu); 303 break; 304 case SIGP_STOP: 305 vcpu->stat.instruction_sigp_stop++; 306 rc = __sigp_stop(vcpu, dst_vcpu); 307 break; 308 case SIGP_STOP_AND_STORE_STATUS: 309 vcpu->stat.instruction_sigp_stop_store_status++; 310 rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg); 311 break; 312 case SIGP_STORE_STATUS_AT_ADDRESS: 313 vcpu->stat.instruction_sigp_store_status++; 314 rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter, 315 status_reg); 316 break; 317 case SIGP_SET_PREFIX: 318 vcpu->stat.instruction_sigp_prefix++; 319 rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg); 320 break; 321 case SIGP_COND_EMERGENCY_SIGNAL: 322 vcpu->stat.instruction_sigp_cond_emergency++; 323 rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter, 324 status_reg); 325 break; 326 case SIGP_SENSE_RUNNING: 327 vcpu->stat.instruction_sigp_sense_running++; 328 rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg); 329 break; 330 case SIGP_START: 331 vcpu->stat.instruction_sigp_start++; 332 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); 333 break; 334 case SIGP_RESTART: 335 vcpu->stat.instruction_sigp_restart++; 336 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); 337 break; 338 case SIGP_INITIAL_CPU_RESET: 339 vcpu->stat.instruction_sigp_init_cpu_reset++; 340 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); 341 break; 342 case SIGP_CPU_RESET: 343 vcpu->stat.instruction_sigp_cpu_reset++; 344 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); 345 break; 346 default: 347 vcpu->stat.instruction_sigp_unknown++; 348 rc = __prepare_sigp_unknown(vcpu, dst_vcpu); 349 } 350 351 if (rc == -EOPNOTSUPP) 352 VCPU_EVENT(vcpu, 4, 353 "sigp order %u -> cpu %x: handled in user space", 354 order_code, dst_vcpu->vcpu_id); 355 356 return rc; 357 } 358 359 static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code, 360 u16 cpu_addr) 361 { 362 if (!vcpu->kvm->arch.user_sigp) 363 return 0; 364 365 switch (order_code) { 366 case SIGP_SENSE: 367 case SIGP_EXTERNAL_CALL: 368 case SIGP_EMERGENCY_SIGNAL: 369 case SIGP_COND_EMERGENCY_SIGNAL: 370 case SIGP_SENSE_RUNNING: 371 return 0; 372 /* update counters as we're directly dropping to user space */ 373 case SIGP_STOP: 374 vcpu->stat.instruction_sigp_stop++; 375 break; 376 case SIGP_STOP_AND_STORE_STATUS: 377 vcpu->stat.instruction_sigp_stop_store_status++; 378 break; 379 case SIGP_STORE_STATUS_AT_ADDRESS: 380 vcpu->stat.instruction_sigp_store_status++; 381 break; 382 case SIGP_STORE_ADDITIONAL_STATUS: 383 vcpu->stat.instruction_sigp_store_adtl_status++; 384 break; 385 case SIGP_SET_PREFIX: 386 vcpu->stat.instruction_sigp_prefix++; 387 break; 388 case SIGP_START: 389 vcpu->stat.instruction_sigp_start++; 390 break; 391 case SIGP_RESTART: 392 vcpu->stat.instruction_sigp_restart++; 393 break; 394 case SIGP_INITIAL_CPU_RESET: 395 vcpu->stat.instruction_sigp_init_cpu_reset++; 396 break; 397 case SIGP_CPU_RESET: 398 vcpu->stat.instruction_sigp_cpu_reset++; 399 break; 400 default: 401 vcpu->stat.instruction_sigp_unknown++; 402 } 403 VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace", 404 order_code, cpu_addr); 405 406 return 1; 407 } 408 409 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) 410 { 411 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 412 int r3 = vcpu->arch.sie_block->ipa & 0x000f; 413 u32 parameter; 414 u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; 415 u8 order_code; 416 int rc; 417 418 /* sigp in userspace can exit */ 419 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 420 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 421 422 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); 423 if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr)) 424 return -EOPNOTSUPP; 425 426 if (r1 % 2) 427 parameter = vcpu->run->s.regs.gprs[r1]; 428 else 429 parameter = vcpu->run->s.regs.gprs[r1 + 1]; 430 431 trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter); 432 switch (order_code) { 433 case SIGP_SET_ARCHITECTURE: 434 vcpu->stat.instruction_sigp_arch++; 435 rc = __sigp_set_arch(vcpu, parameter, 436 &vcpu->run->s.regs.gprs[r1]); 437 break; 438 default: 439 rc = handle_sigp_dst(vcpu, order_code, cpu_addr, 440 parameter, 441 &vcpu->run->s.regs.gprs[r1]); 442 } 443 444 if (rc < 0) 445 return rc; 446 447 kvm_s390_set_psw_cc(vcpu, rc); 448 return 0; 449 } 450 451 /* 452 * Handle SIGP partial execution interception. 453 * 454 * This interception will occur at the source cpu when a source cpu sends an 455 * external call to a target cpu and the target cpu has the WAIT bit set in 456 * its cpuflags. Interception will occurr after the interrupt indicator bits at 457 * the target cpu have been set. All error cases will lead to instruction 458 * interception, therefore nothing is to be checked or prepared. 459 */ 460 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) 461 { 462 int r3 = vcpu->arch.sie_block->ipa & 0x000f; 463 u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; 464 struct kvm_vcpu *dest_vcpu; 465 u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); 466 467 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); 468 469 if (order_code == SIGP_EXTERNAL_CALL) { 470 dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); 471 BUG_ON(dest_vcpu == NULL); 472 473 kvm_s390_vcpu_wakeup(dest_vcpu); 474 kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); 475 return 0; 476 } 477 478 return -EOPNOTSUPP; 479 } 480