1 /* 2 * handling interprocessor communication 3 * 4 * Copyright IBM Corp. 2008, 2013 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 * Christian Ehrhardt <ehrhardt@de.ibm.com> 13 */ 14 15 #include <linux/kvm.h> 16 #include <linux/kvm_host.h> 17 #include <linux/slab.h> 18 #include <asm/sigp.h> 19 #include "gaccess.h" 20 #include "kvm-s390.h" 21 #include "trace.h" 22 23 static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, 24 u64 *reg) 25 { 26 struct kvm_s390_local_interrupt *li; 27 int cpuflags; 28 int rc; 29 int ext_call_pending; 30 31 li = &dst_vcpu->arch.local_int; 32 33 cpuflags = atomic_read(li->cpuflags); 34 ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu); 35 if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending) 36 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 37 else { 38 *reg &= 0xffffffff00000000UL; 39 if (ext_call_pending) 40 *reg |= SIGP_STATUS_EXT_CALL_PENDING; 41 if (cpuflags & CPUSTAT_STOPPED) 42 *reg |= SIGP_STATUS_STOPPED; 43 rc = SIGP_CC_STATUS_STORED; 44 } 45 46 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id, 47 rc); 48 return rc; 49 } 50 51 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, 52 struct kvm_vcpu *dst_vcpu) 53 { 54 struct kvm_s390_irq irq = { 55 .type = KVM_S390_INT_EMERGENCY, 56 .u.emerg.code = vcpu->vcpu_id, 57 }; 58 int rc = 0; 59 60 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 61 if (!rc) 62 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", 63 dst_vcpu->vcpu_id); 64 65 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; 66 } 67 68 static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) 69 { 70 return __inject_sigp_emergency(vcpu, dst_vcpu); 71 } 72 73 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, 74 struct kvm_vcpu *dst_vcpu, 75 u16 asn, u64 *reg) 76 { 77 const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; 78 u16 p_asn, s_asn; 79 psw_t *psw; 80 u32 flags; 81 82 flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags); 83 psw = &dst_vcpu->arch.sie_block->gpsw; 84 p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ 85 s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ 86 87 /* Inject the emergency signal? */ 88 if (!(flags & CPUSTAT_STOPPED) 89 || (psw->mask & psw_int_mask) != psw_int_mask 90 || ((flags & CPUSTAT_WAIT) && psw->addr != 0) 91 || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) { 92 return __inject_sigp_emergency(vcpu, dst_vcpu); 93 } else { 94 *reg &= 0xffffffff00000000UL; 95 *reg |= SIGP_STATUS_INCORRECT_STATE; 96 return SIGP_CC_STATUS_STORED; 97 } 98 } 99 100 static int __sigp_external_call(struct kvm_vcpu *vcpu, 101 struct kvm_vcpu *dst_vcpu, u64 *reg) 102 { 103 struct kvm_s390_irq irq = { 104 .type = KVM_S390_INT_EXTERNAL_CALL, 105 .u.extcall.code = vcpu->vcpu_id, 106 }; 107 int rc; 108 109 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 110 if (rc == -EBUSY) { 111 *reg &= 0xffffffff00000000UL; 112 *reg |= SIGP_STATUS_EXT_CALL_PENDING; 113 return SIGP_CC_STATUS_STORED; 114 } else if (rc == 0) { 115 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", 116 dst_vcpu->vcpu_id); 117 } 118 119 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; 120 } 121 122 static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) 123 { 124 struct kvm_s390_irq irq = { 125 .type = KVM_S390_SIGP_STOP, 126 }; 127 int rc; 128 129 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 130 if (rc == -EBUSY) 131 rc = SIGP_CC_BUSY; 132 else if (rc == 0) 133 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", 134 dst_vcpu->vcpu_id); 135 136 return rc; 137 } 138 139 static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu, 140 struct kvm_vcpu *dst_vcpu, u64 *reg) 141 { 142 struct kvm_s390_irq irq = { 143 .type = KVM_S390_SIGP_STOP, 144 .u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS, 145 }; 146 int rc; 147 148 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 149 if (rc == -EBUSY) 150 rc = SIGP_CC_BUSY; 151 else if (rc == 0) 152 VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x", 153 dst_vcpu->vcpu_id); 154 155 return rc; 156 } 157 158 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) 159 { 160 int rc; 161 unsigned int i; 162 struct kvm_vcpu *v; 163 164 switch (parameter & 0xff) { 165 case 0: 166 rc = SIGP_CC_NOT_OPERATIONAL; 167 break; 168 case 1: 169 case 2: 170 kvm_for_each_vcpu(i, v, vcpu->kvm) { 171 v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 172 kvm_clear_async_pf_completion_queue(v); 173 } 174 175 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 176 break; 177 default: 178 rc = -EOPNOTSUPP; 179 } 180 return rc; 181 } 182 183 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, 184 u32 address, u64 *reg) 185 { 186 struct kvm_s390_irq irq = { 187 .type = KVM_S390_SIGP_SET_PREFIX, 188 .u.prefix.address = address & 0x7fffe000u, 189 }; 190 int rc; 191 192 /* 193 * Make sure the new value is valid memory. We only need to check the 194 * first page, since address is 8k aligned and memory pieces are always 195 * at least 1MB aligned and have at least a size of 1MB. 196 */ 197 if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) { 198 *reg &= 0xffffffff00000000UL; 199 *reg |= SIGP_STATUS_INVALID_PARAMETER; 200 return SIGP_CC_STATUS_STORED; 201 } 202 203 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 204 if (rc == -EBUSY) { 205 *reg &= 0xffffffff00000000UL; 206 *reg |= SIGP_STATUS_INCORRECT_STATE; 207 return SIGP_CC_STATUS_STORED; 208 } 209 210 return rc; 211 } 212 213 static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, 214 struct kvm_vcpu *dst_vcpu, 215 u32 addr, u64 *reg) 216 { 217 int flags; 218 int rc; 219 220 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); 221 if (!(flags & CPUSTAT_STOPPED)) { 222 *reg &= 0xffffffff00000000UL; 223 *reg |= SIGP_STATUS_INCORRECT_STATE; 224 return SIGP_CC_STATUS_STORED; 225 } 226 227 addr &= 0x7ffffe00; 228 rc = kvm_s390_store_status_unloaded(dst_vcpu, addr); 229 if (rc == -EFAULT) { 230 *reg &= 0xffffffff00000000UL; 231 *reg |= SIGP_STATUS_INVALID_PARAMETER; 232 rc = SIGP_CC_STATUS_STORED; 233 } 234 return rc; 235 } 236 237 static int __sigp_sense_running(struct kvm_vcpu *vcpu, 238 struct kvm_vcpu *dst_vcpu, u64 *reg) 239 { 240 struct kvm_s390_local_interrupt *li; 241 int rc; 242 243 if (!test_kvm_facility(vcpu->kvm, 9)) { 244 *reg &= 0xffffffff00000000UL; 245 *reg |= SIGP_STATUS_INVALID_ORDER; 246 return SIGP_CC_STATUS_STORED; 247 } 248 249 li = &dst_vcpu->arch.local_int; 250 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { 251 /* running */ 252 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 253 } else { 254 /* not running */ 255 *reg &= 0xffffffff00000000UL; 256 *reg |= SIGP_STATUS_NOT_RUNNING; 257 rc = SIGP_CC_STATUS_STORED; 258 } 259 260 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", 261 dst_vcpu->vcpu_id, rc); 262 263 return rc; 264 } 265 266 static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu, 267 struct kvm_vcpu *dst_vcpu, u8 order_code) 268 { 269 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; 270 /* handle (RE)START in user space */ 271 int rc = -EOPNOTSUPP; 272 273 /* make sure we don't race with STOP irq injection */ 274 spin_lock(&li->lock); 275 if (kvm_s390_is_stop_irq_pending(dst_vcpu)) 276 rc = SIGP_CC_BUSY; 277 spin_unlock(&li->lock); 278 279 return rc; 280 } 281 282 static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu, 283 struct kvm_vcpu *dst_vcpu, u8 order_code) 284 { 285 /* handle (INITIAL) CPU RESET in user space */ 286 return -EOPNOTSUPP; 287 } 288 289 static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu, 290 struct kvm_vcpu *dst_vcpu) 291 { 292 /* handle unknown orders in user space */ 293 return -EOPNOTSUPP; 294 } 295 296 static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, 297 u16 cpu_addr, u32 parameter, u64 *status_reg) 298 { 299 int rc; 300 struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); 301 302 if (!dst_vcpu) 303 return SIGP_CC_NOT_OPERATIONAL; 304 305 switch (order_code) { 306 case SIGP_SENSE: 307 vcpu->stat.instruction_sigp_sense++; 308 rc = __sigp_sense(vcpu, dst_vcpu, status_reg); 309 break; 310 case SIGP_EXTERNAL_CALL: 311 vcpu->stat.instruction_sigp_external_call++; 312 rc = __sigp_external_call(vcpu, dst_vcpu, status_reg); 313 break; 314 case SIGP_EMERGENCY_SIGNAL: 315 vcpu->stat.instruction_sigp_emergency++; 316 rc = __sigp_emergency(vcpu, dst_vcpu); 317 break; 318 case SIGP_STOP: 319 vcpu->stat.instruction_sigp_stop++; 320 rc = __sigp_stop(vcpu, dst_vcpu); 321 break; 322 case SIGP_STOP_AND_STORE_STATUS: 323 vcpu->stat.instruction_sigp_stop_store_status++; 324 rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg); 325 break; 326 case SIGP_STORE_STATUS_AT_ADDRESS: 327 vcpu->stat.instruction_sigp_store_status++; 328 rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter, 329 status_reg); 330 break; 331 case SIGP_SET_PREFIX: 332 vcpu->stat.instruction_sigp_prefix++; 333 rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg); 334 break; 335 case SIGP_COND_EMERGENCY_SIGNAL: 336 vcpu->stat.instruction_sigp_cond_emergency++; 337 rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter, 338 status_reg); 339 break; 340 case SIGP_SENSE_RUNNING: 341 vcpu->stat.instruction_sigp_sense_running++; 342 rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg); 343 break; 344 case SIGP_START: 345 vcpu->stat.instruction_sigp_start++; 346 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); 347 break; 348 case SIGP_RESTART: 349 vcpu->stat.instruction_sigp_restart++; 350 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); 351 break; 352 case SIGP_INITIAL_CPU_RESET: 353 vcpu->stat.instruction_sigp_init_cpu_reset++; 354 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); 355 break; 356 case SIGP_CPU_RESET: 357 vcpu->stat.instruction_sigp_cpu_reset++; 358 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); 359 break; 360 default: 361 vcpu->stat.instruction_sigp_unknown++; 362 rc = __prepare_sigp_unknown(vcpu, dst_vcpu); 363 } 364 365 if (rc == -EOPNOTSUPP) 366 VCPU_EVENT(vcpu, 4, 367 "sigp order %u -> cpu %x: handled in user space", 368 order_code, dst_vcpu->vcpu_id); 369 370 return rc; 371 } 372 373 static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code, 374 u16 cpu_addr) 375 { 376 if (!vcpu->kvm->arch.user_sigp) 377 return 0; 378 379 switch (order_code) { 380 case SIGP_SENSE: 381 case SIGP_EXTERNAL_CALL: 382 case SIGP_EMERGENCY_SIGNAL: 383 case SIGP_COND_EMERGENCY_SIGNAL: 384 case SIGP_SENSE_RUNNING: 385 return 0; 386 /* update counters as we're directly dropping to user space */ 387 case SIGP_STOP: 388 vcpu->stat.instruction_sigp_stop++; 389 break; 390 case SIGP_STOP_AND_STORE_STATUS: 391 vcpu->stat.instruction_sigp_stop_store_status++; 392 break; 393 case SIGP_STORE_STATUS_AT_ADDRESS: 394 vcpu->stat.instruction_sigp_store_status++; 395 break; 396 case SIGP_STORE_ADDITIONAL_STATUS: 397 vcpu->stat.instruction_sigp_store_adtl_status++; 398 break; 399 case SIGP_SET_PREFIX: 400 vcpu->stat.instruction_sigp_prefix++; 401 break; 402 case SIGP_START: 403 vcpu->stat.instruction_sigp_start++; 404 break; 405 case SIGP_RESTART: 406 vcpu->stat.instruction_sigp_restart++; 407 break; 408 case SIGP_INITIAL_CPU_RESET: 409 vcpu->stat.instruction_sigp_init_cpu_reset++; 410 break; 411 case SIGP_CPU_RESET: 412 vcpu->stat.instruction_sigp_cpu_reset++; 413 break; 414 default: 415 vcpu->stat.instruction_sigp_unknown++; 416 } 417 VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace", 418 order_code, cpu_addr); 419 420 return 1; 421 } 422 423 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) 424 { 425 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 426 int r3 = vcpu->arch.sie_block->ipa & 0x000f; 427 u32 parameter; 428 u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; 429 u8 order_code; 430 int rc; 431 432 /* sigp in userspace can exit */ 433 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 434 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 435 436 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); 437 if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr)) 438 return -EOPNOTSUPP; 439 440 if (r1 % 2) 441 parameter = vcpu->run->s.regs.gprs[r1]; 442 else 443 parameter = vcpu->run->s.regs.gprs[r1 + 1]; 444 445 trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter); 446 switch (order_code) { 447 case SIGP_SET_ARCHITECTURE: 448 vcpu->stat.instruction_sigp_arch++; 449 rc = __sigp_set_arch(vcpu, parameter); 450 break; 451 default: 452 rc = handle_sigp_dst(vcpu, order_code, cpu_addr, 453 parameter, 454 &vcpu->run->s.regs.gprs[r1]); 455 } 456 457 if (rc < 0) 458 return rc; 459 460 kvm_s390_set_psw_cc(vcpu, rc); 461 return 0; 462 } 463 464 /* 465 * Handle SIGP partial execution interception. 466 * 467 * This interception will occur at the source cpu when a source cpu sends an 468 * external call to a target cpu and the target cpu has the WAIT bit set in 469 * its cpuflags. Interception will occurr after the interrupt indicator bits at 470 * the target cpu have been set. All error cases will lead to instruction 471 * interception, therefore nothing is to be checked or prepared. 472 */ 473 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) 474 { 475 int r3 = vcpu->arch.sie_block->ipa & 0x000f; 476 u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; 477 struct kvm_vcpu *dest_vcpu; 478 u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); 479 480 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); 481 482 if (order_code == SIGP_EXTERNAL_CALL) { 483 dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); 484 BUG_ON(dest_vcpu == NULL); 485 486 kvm_s390_vcpu_wakeup(dest_vcpu); 487 kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); 488 return 0; 489 } 490 491 return -EOPNOTSUPP; 492 } 493