1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * handling interprocessor communication 4 * 5 * Copyright IBM Corp. 2008, 2013 6 * 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com> 9 * Christian Ehrhardt <ehrhardt@de.ibm.com> 10 */ 11 12 #include <linux/kvm.h> 13 #include <linux/kvm_host.h> 14 #include <linux/slab.h> 15 #include <asm/sigp.h> 16 #include "gaccess.h" 17 #include "kvm-s390.h" 18 #include "trace.h" 19 20 static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, 21 u64 *reg) 22 { 23 const bool stopped = kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED); 24 int rc; 25 int ext_call_pending; 26 27 ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu); 28 if (!stopped && !ext_call_pending) 29 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 30 else { 31 *reg &= 0xffffffff00000000UL; 32 if (ext_call_pending) 33 *reg |= SIGP_STATUS_EXT_CALL_PENDING; 34 if (stopped) 35 *reg |= SIGP_STATUS_STOPPED; 36 rc = SIGP_CC_STATUS_STORED; 37 } 38 39 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id, 40 rc); 41 return rc; 42 } 43 44 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, 45 struct kvm_vcpu *dst_vcpu) 46 { 47 struct kvm_s390_irq irq = { 48 .type = KVM_S390_INT_EMERGENCY, 49 .u.emerg.code = vcpu->vcpu_id, 50 }; 51 int rc = 0; 52 53 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 54 if (!rc) 55 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", 56 dst_vcpu->vcpu_id); 57 58 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; 59 } 60 61 static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) 62 { 63 return __inject_sigp_emergency(vcpu, dst_vcpu); 64 } 65 66 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, 67 struct kvm_vcpu *dst_vcpu, 68 u16 asn, u64 *reg) 69 { 70 const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; 71 u16 p_asn, s_asn; 72 psw_t *psw; 73 bool idle; 74 75 idle = is_vcpu_idle(vcpu); 76 psw = &dst_vcpu->arch.sie_block->gpsw; 77 p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ 78 s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ 79 80 /* Inject the emergency signal? */ 81 if (!is_vcpu_stopped(vcpu) 82 || (psw->mask & psw_int_mask) != psw_int_mask 83 || (idle && psw->addr != 0) 84 || (!idle && (asn == p_asn || asn == s_asn))) { 85 return __inject_sigp_emergency(vcpu, dst_vcpu); 86 } else { 87 *reg &= 0xffffffff00000000UL; 88 *reg |= SIGP_STATUS_INCORRECT_STATE; 89 return SIGP_CC_STATUS_STORED; 90 } 91 } 92 93 static int __sigp_external_call(struct kvm_vcpu *vcpu, 94 struct kvm_vcpu *dst_vcpu, u64 *reg) 95 { 96 struct kvm_s390_irq irq = { 97 .type = KVM_S390_INT_EXTERNAL_CALL, 98 .u.extcall.code = vcpu->vcpu_id, 99 }; 100 int rc; 101 102 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 103 if (rc == -EBUSY) { 104 *reg &= 0xffffffff00000000UL; 105 *reg |= SIGP_STATUS_EXT_CALL_PENDING; 106 return SIGP_CC_STATUS_STORED; 107 } else if (rc == 0) { 108 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", 109 dst_vcpu->vcpu_id); 110 } 111 112 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; 113 } 114 115 static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) 116 { 117 struct kvm_s390_irq irq = { 118 .type = KVM_S390_SIGP_STOP, 119 }; 120 int rc; 121 122 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 123 if (rc == -EBUSY) 124 rc = SIGP_CC_BUSY; 125 else if (rc == 0) 126 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", 127 dst_vcpu->vcpu_id); 128 129 return rc; 130 } 131 132 static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu, 133 struct kvm_vcpu *dst_vcpu, u64 *reg) 134 { 135 struct kvm_s390_irq irq = { 136 .type = KVM_S390_SIGP_STOP, 137 .u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS, 138 }; 139 int rc; 140 141 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 142 if (rc == -EBUSY) 143 rc = SIGP_CC_BUSY; 144 else if (rc == 0) 145 VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x", 146 dst_vcpu->vcpu_id); 147 148 return rc; 149 } 150 151 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter, 152 u64 *status_reg) 153 { 154 *status_reg &= 0xffffffff00000000UL; 155 156 /* Reject set arch order, with czam we're always in z/Arch mode. */ 157 *status_reg |= SIGP_STATUS_INVALID_PARAMETER; 158 return SIGP_CC_STATUS_STORED; 159 } 160 161 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, 162 u32 address, u64 *reg) 163 { 164 struct kvm_s390_irq irq = { 165 .type = KVM_S390_SIGP_SET_PREFIX, 166 .u.prefix.address = address & 0x7fffe000u, 167 }; 168 int rc; 169 170 /* 171 * Make sure the new value is valid memory. We only need to check the 172 * first page, since address is 8k aligned and memory pieces are always 173 * at least 1MB aligned and have at least a size of 1MB. 174 */ 175 if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) { 176 *reg &= 0xffffffff00000000UL; 177 *reg |= SIGP_STATUS_INVALID_PARAMETER; 178 return SIGP_CC_STATUS_STORED; 179 } 180 181 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 182 if (rc == -EBUSY) { 183 *reg &= 0xffffffff00000000UL; 184 *reg |= SIGP_STATUS_INCORRECT_STATE; 185 return SIGP_CC_STATUS_STORED; 186 } 187 188 return rc; 189 } 190 191 static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, 192 struct kvm_vcpu *dst_vcpu, 193 u32 addr, u64 *reg) 194 { 195 int rc; 196 197 if (!kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED)) { 198 *reg &= 0xffffffff00000000UL; 199 *reg |= SIGP_STATUS_INCORRECT_STATE; 200 return SIGP_CC_STATUS_STORED; 201 } 202 203 addr &= 0x7ffffe00; 204 rc = kvm_s390_store_status_unloaded(dst_vcpu, addr); 205 if (rc == -EFAULT) { 206 *reg &= 0xffffffff00000000UL; 207 *reg |= SIGP_STATUS_INVALID_PARAMETER; 208 rc = SIGP_CC_STATUS_STORED; 209 } 210 return rc; 211 } 212 213 static int __sigp_sense_running(struct kvm_vcpu *vcpu, 214 struct kvm_vcpu *dst_vcpu, u64 *reg) 215 { 216 int rc; 217 218 if (!test_kvm_facility(vcpu->kvm, 9)) { 219 *reg &= 0xffffffff00000000UL; 220 *reg |= SIGP_STATUS_INVALID_ORDER; 221 return SIGP_CC_STATUS_STORED; 222 } 223 224 if (kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_RUNNING)) { 225 /* running */ 226 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 227 } else { 228 /* not running */ 229 *reg &= 0xffffffff00000000UL; 230 *reg |= SIGP_STATUS_NOT_RUNNING; 231 rc = SIGP_CC_STATUS_STORED; 232 } 233 234 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", 235 dst_vcpu->vcpu_id, rc); 236 237 return rc; 238 } 239 240 static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu, 241 struct kvm_vcpu *dst_vcpu, u8 order_code) 242 { 243 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; 244 /* handle (RE)START in user space */ 245 int rc = -EOPNOTSUPP; 246 247 /* make sure we don't race with STOP irq injection */ 248 spin_lock(&li->lock); 249 if (kvm_s390_is_stop_irq_pending(dst_vcpu)) 250 rc = SIGP_CC_BUSY; 251 spin_unlock(&li->lock); 252 253 return rc; 254 } 255 256 static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu, 257 struct kvm_vcpu *dst_vcpu, u8 order_code) 258 { 259 /* handle (INITIAL) CPU RESET in user space */ 260 return -EOPNOTSUPP; 261 } 262 263 static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu, 264 struct kvm_vcpu *dst_vcpu) 265 { 266 /* handle unknown orders in user space */ 267 return -EOPNOTSUPP; 268 } 269 270 static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, 271 u16 cpu_addr, u32 parameter, u64 *status_reg) 272 { 273 int rc; 274 struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); 275 276 if (!dst_vcpu) 277 return SIGP_CC_NOT_OPERATIONAL; 278 279 switch (order_code) { 280 case SIGP_SENSE: 281 vcpu->stat.instruction_sigp_sense++; 282 rc = __sigp_sense(vcpu, dst_vcpu, status_reg); 283 break; 284 case SIGP_EXTERNAL_CALL: 285 vcpu->stat.instruction_sigp_external_call++; 286 rc = __sigp_external_call(vcpu, dst_vcpu, status_reg); 287 break; 288 case SIGP_EMERGENCY_SIGNAL: 289 vcpu->stat.instruction_sigp_emergency++; 290 rc = __sigp_emergency(vcpu, dst_vcpu); 291 break; 292 case SIGP_STOP: 293 vcpu->stat.instruction_sigp_stop++; 294 rc = __sigp_stop(vcpu, dst_vcpu); 295 break; 296 case SIGP_STOP_AND_STORE_STATUS: 297 vcpu->stat.instruction_sigp_stop_store_status++; 298 rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg); 299 break; 300 case SIGP_STORE_STATUS_AT_ADDRESS: 301 vcpu->stat.instruction_sigp_store_status++; 302 rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter, 303 status_reg); 304 break; 305 case SIGP_SET_PREFIX: 306 vcpu->stat.instruction_sigp_prefix++; 307 rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg); 308 break; 309 case SIGP_COND_EMERGENCY_SIGNAL: 310 vcpu->stat.instruction_sigp_cond_emergency++; 311 rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter, 312 status_reg); 313 break; 314 case SIGP_SENSE_RUNNING: 315 vcpu->stat.instruction_sigp_sense_running++; 316 rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg); 317 break; 318 case SIGP_START: 319 vcpu->stat.instruction_sigp_start++; 320 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); 321 break; 322 case SIGP_RESTART: 323 vcpu->stat.instruction_sigp_restart++; 324 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); 325 break; 326 case SIGP_INITIAL_CPU_RESET: 327 vcpu->stat.instruction_sigp_init_cpu_reset++; 328 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); 329 break; 330 case SIGP_CPU_RESET: 331 vcpu->stat.instruction_sigp_cpu_reset++; 332 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); 333 break; 334 default: 335 vcpu->stat.instruction_sigp_unknown++; 336 rc = __prepare_sigp_unknown(vcpu, dst_vcpu); 337 } 338 339 if (rc == -EOPNOTSUPP) 340 VCPU_EVENT(vcpu, 4, 341 "sigp order %u -> cpu %x: handled in user space", 342 order_code, dst_vcpu->vcpu_id); 343 344 return rc; 345 } 346 347 static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code, 348 u16 cpu_addr) 349 { 350 if (!vcpu->kvm->arch.user_sigp) 351 return 0; 352 353 switch (order_code) { 354 case SIGP_SENSE: 355 case SIGP_EXTERNAL_CALL: 356 case SIGP_EMERGENCY_SIGNAL: 357 case SIGP_COND_EMERGENCY_SIGNAL: 358 case SIGP_SENSE_RUNNING: 359 return 0; 360 /* update counters as we're directly dropping to user space */ 361 case SIGP_STOP: 362 vcpu->stat.instruction_sigp_stop++; 363 break; 364 case SIGP_STOP_AND_STORE_STATUS: 365 vcpu->stat.instruction_sigp_stop_store_status++; 366 break; 367 case SIGP_STORE_STATUS_AT_ADDRESS: 368 vcpu->stat.instruction_sigp_store_status++; 369 break; 370 case SIGP_STORE_ADDITIONAL_STATUS: 371 vcpu->stat.instruction_sigp_store_adtl_status++; 372 break; 373 case SIGP_SET_PREFIX: 374 vcpu->stat.instruction_sigp_prefix++; 375 break; 376 case SIGP_START: 377 vcpu->stat.instruction_sigp_start++; 378 break; 379 case SIGP_RESTART: 380 vcpu->stat.instruction_sigp_restart++; 381 break; 382 case SIGP_INITIAL_CPU_RESET: 383 vcpu->stat.instruction_sigp_init_cpu_reset++; 384 break; 385 case SIGP_CPU_RESET: 386 vcpu->stat.instruction_sigp_cpu_reset++; 387 break; 388 default: 389 vcpu->stat.instruction_sigp_unknown++; 390 } 391 VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace", 392 order_code, cpu_addr); 393 394 return 1; 395 } 396 397 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) 398 { 399 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 400 int r3 = vcpu->arch.sie_block->ipa & 0x000f; 401 u32 parameter; 402 u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; 403 u8 order_code; 404 int rc; 405 406 /* sigp in userspace can exit */ 407 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 408 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 409 410 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); 411 if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr)) 412 return -EOPNOTSUPP; 413 414 if (r1 % 2) 415 parameter = vcpu->run->s.regs.gprs[r1]; 416 else 417 parameter = vcpu->run->s.regs.gprs[r1 + 1]; 418 419 trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter); 420 switch (order_code) { 421 case SIGP_SET_ARCHITECTURE: 422 vcpu->stat.instruction_sigp_arch++; 423 rc = __sigp_set_arch(vcpu, parameter, 424 &vcpu->run->s.regs.gprs[r1]); 425 break; 426 default: 427 rc = handle_sigp_dst(vcpu, order_code, cpu_addr, 428 parameter, 429 &vcpu->run->s.regs.gprs[r1]); 430 } 431 432 if (rc < 0) 433 return rc; 434 435 kvm_s390_set_psw_cc(vcpu, rc); 436 return 0; 437 } 438 439 /* 440 * Handle SIGP partial execution interception. 441 * 442 * This interception will occur at the source cpu when a source cpu sends an 443 * external call to a target cpu and the target cpu has the WAIT bit set in 444 * its cpuflags. Interception will occurr after the interrupt indicator bits at 445 * the target cpu have been set. All error cases will lead to instruction 446 * interception, therefore nothing is to be checked or prepared. 447 */ 448 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) 449 { 450 int r3 = vcpu->arch.sie_block->ipa & 0x000f; 451 u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; 452 struct kvm_vcpu *dest_vcpu; 453 u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); 454 455 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); 456 457 if (order_code == SIGP_EXTERNAL_CALL) { 458 dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); 459 BUG_ON(dest_vcpu == NULL); 460 461 kvm_s390_vcpu_wakeup(dest_vcpu); 462 kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); 463 return 0; 464 } 465 466 return -EOPNOTSUPP; 467 } 468