1 /* 2 * handling interprocessor communication 3 * 4 * Copyright IBM Corp. 2008, 2013 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 * Christian Ehrhardt <ehrhardt@de.ibm.com> 13 */ 14 15 #include <linux/kvm.h> 16 #include <linux/kvm_host.h> 17 #include <linux/slab.h> 18 #include <asm/sigp.h> 19 #include "gaccess.h" 20 #include "kvm-s390.h" 21 #include "trace.h" 22 23 static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, 24 u64 *reg) 25 { 26 struct kvm_s390_local_interrupt *li; 27 struct kvm_vcpu *dst_vcpu = NULL; 28 int cpuflags; 29 int rc; 30 31 if (cpu_addr >= KVM_MAX_VCPUS) 32 return SIGP_CC_NOT_OPERATIONAL; 33 34 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 35 if (!dst_vcpu) 36 return SIGP_CC_NOT_OPERATIONAL; 37 li = &dst_vcpu->arch.local_int; 38 39 cpuflags = atomic_read(li->cpuflags); 40 if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED))) 41 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 42 else { 43 *reg &= 0xffffffff00000000UL; 44 if (cpuflags & CPUSTAT_ECALL_PEND) 45 *reg |= SIGP_STATUS_EXT_CALL_PENDING; 46 if (cpuflags & CPUSTAT_STOPPED) 47 *reg |= SIGP_STATUS_STOPPED; 48 rc = SIGP_CC_STATUS_STORED; 49 } 50 51 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); 52 return rc; 53 } 54 55 static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) 56 { 57 struct kvm_s390_interrupt s390int = { 58 .type = KVM_S390_INT_EMERGENCY, 59 .parm = vcpu->vcpu_id, 60 }; 61 struct kvm_vcpu *dst_vcpu = NULL; 62 int rc = 0; 63 64 if (cpu_addr < KVM_MAX_VCPUS) 65 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 66 if (!dst_vcpu) 67 return SIGP_CC_NOT_OPERATIONAL; 68 69 rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int); 70 if (!rc) 71 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); 72 73 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; 74 } 75 76 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, 77 u16 asn, u64 *reg) 78 { 79 struct kvm_vcpu *dst_vcpu = NULL; 80 const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; 81 u16 p_asn, s_asn; 82 psw_t *psw; 83 u32 flags; 84 85 if (cpu_addr < KVM_MAX_VCPUS) 86 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 87 if (!dst_vcpu) 88 return SIGP_CC_NOT_OPERATIONAL; 89 flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags); 90 psw = &dst_vcpu->arch.sie_block->gpsw; 91 p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ 92 s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ 93 94 /* Deliver the emergency signal? */ 95 if (!(flags & CPUSTAT_STOPPED) 96 || (psw->mask & psw_int_mask) != psw_int_mask 97 || ((flags & CPUSTAT_WAIT) && psw->addr != 0) 98 || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) { 99 return __sigp_emergency(vcpu, cpu_addr); 100 } else { 101 *reg &= 0xffffffff00000000UL; 102 *reg |= SIGP_STATUS_INCORRECT_STATE; 103 return SIGP_CC_STATUS_STORED; 104 } 105 } 106 107 static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) 108 { 109 struct kvm_s390_interrupt s390int = { 110 .type = KVM_S390_INT_EXTERNAL_CALL, 111 .parm = vcpu->vcpu_id, 112 }; 113 struct kvm_vcpu *dst_vcpu = NULL; 114 int rc; 115 116 if (cpu_addr < KVM_MAX_VCPUS) 117 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 118 if (!dst_vcpu) 119 return SIGP_CC_NOT_OPERATIONAL; 120 121 rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int); 122 if (!rc) 123 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); 124 125 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; 126 } 127 128 static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action) 129 { 130 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; 131 struct kvm_s390_interrupt_info *inti; 132 int rc = SIGP_CC_ORDER_CODE_ACCEPTED; 133 134 inti = kzalloc(sizeof(*inti), GFP_ATOMIC); 135 if (!inti) 136 return -ENOMEM; 137 inti->type = KVM_S390_SIGP_STOP; 138 139 spin_lock(&li->lock); 140 if (li->action_bits & ACTION_STOP_ON_STOP) { 141 /* another SIGP STOP is pending */ 142 kfree(inti); 143 rc = SIGP_CC_BUSY; 144 goto out; 145 } 146 if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { 147 kfree(inti); 148 if ((action & ACTION_STORE_ON_STOP) != 0) 149 rc = -ESHUTDOWN; 150 goto out; 151 } 152 list_add_tail(&inti->list, &li->list); 153 atomic_set(&li->active, 1); 154 li->action_bits |= action; 155 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); 156 kvm_s390_vcpu_wakeup(dst_vcpu); 157 out: 158 spin_unlock(&li->lock); 159 160 return rc; 161 } 162 163 static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) 164 { 165 struct kvm_vcpu *dst_vcpu = NULL; 166 int rc; 167 168 if (cpu_addr >= KVM_MAX_VCPUS) 169 return SIGP_CC_NOT_OPERATIONAL; 170 171 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 172 if (!dst_vcpu) 173 return SIGP_CC_NOT_OPERATIONAL; 174 175 rc = __inject_sigp_stop(dst_vcpu, action); 176 177 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); 178 179 if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) { 180 /* If the CPU has already been stopped, we still have 181 * to save the status when doing stop-and-store. This 182 * has to be done after unlocking all spinlocks. */ 183 rc = kvm_s390_store_status_unloaded(dst_vcpu, 184 KVM_S390_STORE_STATUS_NOADDR); 185 } 186 187 return rc; 188 } 189 190 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) 191 { 192 int rc; 193 unsigned int i; 194 struct kvm_vcpu *v; 195 196 switch (parameter & 0xff) { 197 case 0: 198 rc = SIGP_CC_NOT_OPERATIONAL; 199 break; 200 case 1: 201 case 2: 202 kvm_for_each_vcpu(i, v, vcpu->kvm) { 203 v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 204 kvm_clear_async_pf_completion_queue(v); 205 } 206 207 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 208 break; 209 default: 210 rc = -EOPNOTSUPP; 211 } 212 return rc; 213 } 214 215 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, 216 u64 *reg) 217 { 218 struct kvm_s390_local_interrupt *li; 219 struct kvm_vcpu *dst_vcpu = NULL; 220 struct kvm_s390_interrupt_info *inti; 221 int rc; 222 223 if (cpu_addr < KVM_MAX_VCPUS) 224 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 225 if (!dst_vcpu) 226 return SIGP_CC_NOT_OPERATIONAL; 227 li = &dst_vcpu->arch.local_int; 228 229 /* 230 * Make sure the new value is valid memory. We only need to check the 231 * first page, since address is 8k aligned and memory pieces are always 232 * at least 1MB aligned and have at least a size of 1MB. 233 */ 234 address &= 0x7fffe000u; 235 if (kvm_is_error_gpa(vcpu->kvm, address)) { 236 *reg &= 0xffffffff00000000UL; 237 *reg |= SIGP_STATUS_INVALID_PARAMETER; 238 return SIGP_CC_STATUS_STORED; 239 } 240 241 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 242 if (!inti) 243 return SIGP_CC_BUSY; 244 245 spin_lock(&li->lock); 246 /* cpu must be in stopped state */ 247 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { 248 *reg &= 0xffffffff00000000UL; 249 *reg |= SIGP_STATUS_INCORRECT_STATE; 250 rc = SIGP_CC_STATUS_STORED; 251 kfree(inti); 252 goto out_li; 253 } 254 255 inti->type = KVM_S390_SIGP_SET_PREFIX; 256 inti->prefix.address = address; 257 258 list_add_tail(&inti->list, &li->list); 259 atomic_set(&li->active, 1); 260 kvm_s390_vcpu_wakeup(dst_vcpu); 261 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 262 263 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); 264 out_li: 265 spin_unlock(&li->lock); 266 return rc; 267 } 268 269 static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id, 270 u32 addr, u64 *reg) 271 { 272 struct kvm_vcpu *dst_vcpu = NULL; 273 int flags; 274 int rc; 275 276 if (cpu_id < KVM_MAX_VCPUS) 277 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id); 278 if (!dst_vcpu) 279 return SIGP_CC_NOT_OPERATIONAL; 280 281 spin_lock(&dst_vcpu->arch.local_int.lock); 282 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); 283 spin_unlock(&dst_vcpu->arch.local_int.lock); 284 if (!(flags & CPUSTAT_STOPPED)) { 285 *reg &= 0xffffffff00000000UL; 286 *reg |= SIGP_STATUS_INCORRECT_STATE; 287 return SIGP_CC_STATUS_STORED; 288 } 289 290 addr &= 0x7ffffe00; 291 rc = kvm_s390_store_status_unloaded(dst_vcpu, addr); 292 if (rc == -EFAULT) { 293 *reg &= 0xffffffff00000000UL; 294 *reg |= SIGP_STATUS_INVALID_PARAMETER; 295 rc = SIGP_CC_STATUS_STORED; 296 } 297 return rc; 298 } 299 300 static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, 301 u64 *reg) 302 { 303 struct kvm_s390_local_interrupt *li; 304 struct kvm_vcpu *dst_vcpu = NULL; 305 int rc; 306 307 if (cpu_addr >= KVM_MAX_VCPUS) 308 return SIGP_CC_NOT_OPERATIONAL; 309 310 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 311 if (!dst_vcpu) 312 return SIGP_CC_NOT_OPERATIONAL; 313 li = &dst_vcpu->arch.local_int; 314 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { 315 /* running */ 316 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 317 } else { 318 /* not running */ 319 *reg &= 0xffffffff00000000UL; 320 *reg |= SIGP_STATUS_NOT_RUNNING; 321 rc = SIGP_CC_STATUS_STORED; 322 } 323 324 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr, 325 rc); 326 327 return rc; 328 } 329 330 /* Test whether the destination CPU is available and not busy */ 331 static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) 332 { 333 struct kvm_s390_local_interrupt *li; 334 int rc = SIGP_CC_ORDER_CODE_ACCEPTED; 335 struct kvm_vcpu *dst_vcpu = NULL; 336 337 if (cpu_addr >= KVM_MAX_VCPUS) 338 return SIGP_CC_NOT_OPERATIONAL; 339 340 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 341 if (!dst_vcpu) 342 return SIGP_CC_NOT_OPERATIONAL; 343 li = &dst_vcpu->arch.local_int; 344 spin_lock(&li->lock); 345 if (li->action_bits & ACTION_STOP_ON_STOP) 346 rc = SIGP_CC_BUSY; 347 spin_unlock(&li->lock); 348 349 return rc; 350 } 351 352 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) 353 { 354 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 355 int r3 = vcpu->arch.sie_block->ipa & 0x000f; 356 u32 parameter; 357 u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; 358 u8 order_code; 359 int rc; 360 361 /* sigp in userspace can exit */ 362 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 363 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 364 365 order_code = kvm_s390_get_base_disp_rs(vcpu); 366 367 if (r1 % 2) 368 parameter = vcpu->run->s.regs.gprs[r1]; 369 else 370 parameter = vcpu->run->s.regs.gprs[r1 + 1]; 371 372 trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter); 373 switch (order_code) { 374 case SIGP_SENSE: 375 vcpu->stat.instruction_sigp_sense++; 376 rc = __sigp_sense(vcpu, cpu_addr, 377 &vcpu->run->s.regs.gprs[r1]); 378 break; 379 case SIGP_EXTERNAL_CALL: 380 vcpu->stat.instruction_sigp_external_call++; 381 rc = __sigp_external_call(vcpu, cpu_addr); 382 break; 383 case SIGP_EMERGENCY_SIGNAL: 384 vcpu->stat.instruction_sigp_emergency++; 385 rc = __sigp_emergency(vcpu, cpu_addr); 386 break; 387 case SIGP_STOP: 388 vcpu->stat.instruction_sigp_stop++; 389 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP); 390 break; 391 case SIGP_STOP_AND_STORE_STATUS: 392 vcpu->stat.instruction_sigp_stop++; 393 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP | 394 ACTION_STOP_ON_STOP); 395 break; 396 case SIGP_STORE_STATUS_AT_ADDRESS: 397 rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter, 398 &vcpu->run->s.regs.gprs[r1]); 399 break; 400 case SIGP_SET_ARCHITECTURE: 401 vcpu->stat.instruction_sigp_arch++; 402 rc = __sigp_set_arch(vcpu, parameter); 403 break; 404 case SIGP_SET_PREFIX: 405 vcpu->stat.instruction_sigp_prefix++; 406 rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, 407 &vcpu->run->s.regs.gprs[r1]); 408 break; 409 case SIGP_COND_EMERGENCY_SIGNAL: 410 rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter, 411 &vcpu->run->s.regs.gprs[r1]); 412 break; 413 case SIGP_SENSE_RUNNING: 414 vcpu->stat.instruction_sigp_sense_running++; 415 rc = __sigp_sense_running(vcpu, cpu_addr, 416 &vcpu->run->s.regs.gprs[r1]); 417 break; 418 case SIGP_START: 419 rc = sigp_check_callable(vcpu, cpu_addr); 420 if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) 421 rc = -EOPNOTSUPP; /* Handle START in user space */ 422 break; 423 case SIGP_RESTART: 424 vcpu->stat.instruction_sigp_restart++; 425 rc = sigp_check_callable(vcpu, cpu_addr); 426 if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) { 427 VCPU_EVENT(vcpu, 4, 428 "sigp restart %x to handle userspace", 429 cpu_addr); 430 /* user space must know about restart */ 431 rc = -EOPNOTSUPP; 432 } 433 break; 434 default: 435 return -EOPNOTSUPP; 436 } 437 438 if (rc < 0) 439 return rc; 440 441 kvm_s390_set_psw_cc(vcpu, rc); 442 return 0; 443 } 444 445 /* 446 * Handle SIGP partial execution interception. 447 * 448 * This interception will occur at the source cpu when a source cpu sends an 449 * external call to a target cpu and the target cpu has the WAIT bit set in 450 * its cpuflags. Interception will occurr after the interrupt indicator bits at 451 * the target cpu have been set. All error cases will lead to instruction 452 * interception, therefore nothing is to be checked or prepared. 453 */ 454 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) 455 { 456 int r3 = vcpu->arch.sie_block->ipa & 0x000f; 457 u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; 458 struct kvm_vcpu *dest_vcpu; 459 u8 order_code = kvm_s390_get_base_disp_rs(vcpu); 460 461 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); 462 463 if (order_code == SIGP_EXTERNAL_CALL) { 464 dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 465 BUG_ON(dest_vcpu == NULL); 466 467 kvm_s390_vcpu_wakeup(dest_vcpu); 468 kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); 469 return 0; 470 } 471 472 return -EOPNOTSUPP; 473 } 474