1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2012 Michael Ellerman, IBM Corporation. 4 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation. 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/kvm_host.h> 9 #include <linux/err.h> 10 #include <linux/gfp.h> 11 #include <linux/anon_inodes.h> 12 #include <linux/spinlock.h> 13 14 #include <linux/uaccess.h> 15 #include <asm/kvm_book3s.h> 16 #include <asm/kvm_ppc.h> 17 #include <asm/hvcall.h> 18 #include <asm/xics.h> 19 #include <asm/debugfs.h> 20 #include <asm/time.h> 21 22 #include <linux/seq_file.h> 23 24 #include "book3s_xics.h" 25 26 #if 1 27 #define XICS_DBG(fmt...) do { } while (0) 28 #else 29 #define XICS_DBG(fmt...) trace_printk(fmt) 30 #endif 31 32 #define ENABLE_REALMODE true 33 #define DEBUG_REALMODE false 34 35 /* 36 * LOCKING 37 * ======= 38 * 39 * Each ICS has a spin lock protecting the information about the IRQ 40 * sources and avoiding simultaneous deliveries of the same interrupt. 41 * 42 * ICP operations are done via a single compare & swap transaction 43 * (most ICP state fits in the union kvmppc_icp_state) 44 */ 45 46 /* 47 * TODO 48 * ==== 49 * 50 * - To speed up resends, keep a bitmap of "resend" set bits in the 51 * ICS 52 * 53 * - Speed up server# -> ICP lookup (array ? hash table ?) 54 * 55 * - Make ICS lockless as well, or at least a per-interrupt lock or hashed 56 * locks array to improve scalability 57 */ 58 59 /* -- ICS routines -- */ 60 61 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 62 u32 new_irq, bool check_resend); 63 64 /* 65 * Return value ideally indicates how the interrupt was handled, but no 66 * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS), 67 * so just return 0. 68 */ 69 static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level) 70 { 71 struct ics_irq_state *state; 72 struct kvmppc_ics *ics; 73 u16 src; 74 u32 pq_old, pq_new; 75 76 XICS_DBG("ics deliver %#x (level: %d)\n", irq, level); 77 78 ics = kvmppc_xics_find_ics(xics, irq, &src); 79 if (!ics) { 80 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq); 81 return -EINVAL; 82 } 83 state = &ics->irq_state[src]; 84 if (!state->exists) 85 return -EINVAL; 86 87 if (level == KVM_INTERRUPT_SET_LEVEL || level == KVM_INTERRUPT_SET) 88 level = 1; 89 else if (level == KVM_INTERRUPT_UNSET) 90 level = 0; 91 /* 92 * Take other values the same as 1, consistent with original code. 93 * maybe WARN here? 94 */ 95 96 if (!state->lsi && level == 0) /* noop for MSI */ 97 return 0; 98 99 do { 100 pq_old = state->pq_state; 101 if (state->lsi) { 102 if (level) { 103 if (pq_old & PQ_PRESENTED) 104 /* Setting already set LSI ... */ 105 return 0; 106 107 pq_new = PQ_PRESENTED; 108 } else 109 pq_new = 0; 110 } else 111 pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED; 112 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old); 113 114 /* Test P=1, Q=0, this is the only case where we present */ 115 if (pq_new == PQ_PRESENTED) 116 icp_deliver_irq(xics, NULL, irq, false); 117 118 /* Record which CPU this arrived on for passed-through interrupts */ 119 if (state->host_irq) 120 state->intr_cpu = raw_smp_processor_id(); 121 122 return 0; 123 } 124 125 static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics, 126 struct kvmppc_icp *icp) 127 { 128 int i; 129 130 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { 131 struct ics_irq_state *state = &ics->irq_state[i]; 132 if (state->resend) { 133 XICS_DBG("resend %#x prio %#x\n", state->number, 134 state->priority); 135 icp_deliver_irq(xics, icp, state->number, true); 136 } 137 } 138 } 139 140 static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics, 141 struct ics_irq_state *state, 142 u32 server, u32 priority, u32 saved_priority) 143 { 144 bool deliver; 145 unsigned long flags; 146 147 local_irq_save(flags); 148 arch_spin_lock(&ics->lock); 149 150 state->server = server; 151 state->priority = priority; 152 state->saved_priority = saved_priority; 153 deliver = false; 154 if ((state->masked_pending || state->resend) && priority != MASKED) { 155 state->masked_pending = 0; 156 state->resend = 0; 157 deliver = true; 158 } 159 160 arch_spin_unlock(&ics->lock); 161 local_irq_restore(flags); 162 163 return deliver; 164 } 165 166 int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority) 167 { 168 struct kvmppc_xics *xics = kvm->arch.xics; 169 struct kvmppc_icp *icp; 170 struct kvmppc_ics *ics; 171 struct ics_irq_state *state; 172 u16 src; 173 174 if (!xics) 175 return -ENODEV; 176 177 ics = kvmppc_xics_find_ics(xics, irq, &src); 178 if (!ics) 179 return -EINVAL; 180 state = &ics->irq_state[src]; 181 182 icp = kvmppc_xics_find_server(kvm, server); 183 if (!icp) 184 return -EINVAL; 185 186 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n", 187 irq, server, priority, 188 state->masked_pending, state->resend); 189 190 if (write_xive(xics, ics, state, server, priority, priority)) 191 icp_deliver_irq(xics, icp, irq, false); 192 193 return 0; 194 } 195 196 int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority) 197 { 198 struct kvmppc_xics *xics = kvm->arch.xics; 199 struct kvmppc_ics *ics; 200 struct ics_irq_state *state; 201 u16 src; 202 unsigned long flags; 203 204 if (!xics) 205 return -ENODEV; 206 207 ics = kvmppc_xics_find_ics(xics, irq, &src); 208 if (!ics) 209 return -EINVAL; 210 state = &ics->irq_state[src]; 211 212 local_irq_save(flags); 213 arch_spin_lock(&ics->lock); 214 *server = state->server; 215 *priority = state->priority; 216 arch_spin_unlock(&ics->lock); 217 local_irq_restore(flags); 218 219 return 0; 220 } 221 222 int kvmppc_xics_int_on(struct kvm *kvm, u32 irq) 223 { 224 struct kvmppc_xics *xics = kvm->arch.xics; 225 struct kvmppc_icp *icp; 226 struct kvmppc_ics *ics; 227 struct ics_irq_state *state; 228 u16 src; 229 230 if (!xics) 231 return -ENODEV; 232 233 ics = kvmppc_xics_find_ics(xics, irq, &src); 234 if (!ics) 235 return -EINVAL; 236 state = &ics->irq_state[src]; 237 238 icp = kvmppc_xics_find_server(kvm, state->server); 239 if (!icp) 240 return -EINVAL; 241 242 if (write_xive(xics, ics, state, state->server, state->saved_priority, 243 state->saved_priority)) 244 icp_deliver_irq(xics, icp, irq, false); 245 246 return 0; 247 } 248 249 int kvmppc_xics_int_off(struct kvm *kvm, u32 irq) 250 { 251 struct kvmppc_xics *xics = kvm->arch.xics; 252 struct kvmppc_ics *ics; 253 struct ics_irq_state *state; 254 u16 src; 255 256 if (!xics) 257 return -ENODEV; 258 259 ics = kvmppc_xics_find_ics(xics, irq, &src); 260 if (!ics) 261 return -EINVAL; 262 state = &ics->irq_state[src]; 263 264 write_xive(xics, ics, state, state->server, MASKED, state->priority); 265 266 return 0; 267 } 268 269 /* -- ICP routines, including hcalls -- */ 270 271 static inline bool icp_try_update(struct kvmppc_icp *icp, 272 union kvmppc_icp_state old, 273 union kvmppc_icp_state new, 274 bool change_self) 275 { 276 bool success; 277 278 /* Calculate new output value */ 279 new.out_ee = (new.xisr && (new.pending_pri < new.cppr)); 280 281 /* Attempt atomic update */ 282 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw; 283 if (!success) 284 goto bail; 285 286 XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n", 287 icp->server_num, 288 old.cppr, old.mfrr, old.pending_pri, old.xisr, 289 old.need_resend, old.out_ee); 290 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n", 291 new.cppr, new.mfrr, new.pending_pri, new.xisr, 292 new.need_resend, new.out_ee); 293 /* 294 * Check for output state update 295 * 296 * Note that this is racy since another processor could be updating 297 * the state already. This is why we never clear the interrupt output 298 * here, we only ever set it. The clear only happens prior to doing 299 * an update and only by the processor itself. Currently we do it 300 * in Accept (H_XIRR) and Up_Cppr (H_XPPR). 301 * 302 * We also do not try to figure out whether the EE state has changed, 303 * we unconditionally set it if the new state calls for it. The reason 304 * for that is that we opportunistically remove the pending interrupt 305 * flag when raising CPPR, so we need to set it back here if an 306 * interrupt is still pending. 307 */ 308 if (new.out_ee) { 309 kvmppc_book3s_queue_irqprio(icp->vcpu, 310 BOOK3S_INTERRUPT_EXTERNAL); 311 if (!change_self) 312 kvmppc_fast_vcpu_kick(icp->vcpu); 313 } 314 bail: 315 return success; 316 } 317 318 static void icp_check_resend(struct kvmppc_xics *xics, 319 struct kvmppc_icp *icp) 320 { 321 u32 icsid; 322 323 /* Order this load with the test for need_resend in the caller */ 324 smp_rmb(); 325 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) { 326 struct kvmppc_ics *ics = xics->ics[icsid]; 327 328 if (!test_and_clear_bit(icsid, icp->resend_map)) 329 continue; 330 if (!ics) 331 continue; 332 ics_check_resend(xics, ics, icp); 333 } 334 } 335 336 static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority, 337 u32 *reject) 338 { 339 union kvmppc_icp_state old_state, new_state; 340 bool success; 341 342 XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority, 343 icp->server_num); 344 345 do { 346 old_state = new_state = READ_ONCE(icp->state); 347 348 *reject = 0; 349 350 /* See if we can deliver */ 351 success = new_state.cppr > priority && 352 new_state.mfrr > priority && 353 new_state.pending_pri > priority; 354 355 /* 356 * If we can, check for a rejection and perform the 357 * delivery 358 */ 359 if (success) { 360 *reject = new_state.xisr; 361 new_state.xisr = irq; 362 new_state.pending_pri = priority; 363 } else { 364 /* 365 * If we failed to deliver we set need_resend 366 * so a subsequent CPPR state change causes us 367 * to try a new delivery. 368 */ 369 new_state.need_resend = true; 370 } 371 372 } while (!icp_try_update(icp, old_state, new_state, false)); 373 374 return success; 375 } 376 377 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 378 u32 new_irq, bool check_resend) 379 { 380 struct ics_irq_state *state; 381 struct kvmppc_ics *ics; 382 u32 reject; 383 u16 src; 384 unsigned long flags; 385 386 /* 387 * This is used both for initial delivery of an interrupt and 388 * for subsequent rejection. 389 * 390 * Rejection can be racy vs. resends. We have evaluated the 391 * rejection in an atomic ICP transaction which is now complete, 392 * so potentially the ICP can already accept the interrupt again. 393 * 394 * So we need to retry the delivery. Essentially the reject path 395 * boils down to a failed delivery. Always. 396 * 397 * Now the interrupt could also have moved to a different target, 398 * thus we may need to re-do the ICP lookup as well 399 */ 400 401 again: 402 /* Get the ICS state and lock it */ 403 ics = kvmppc_xics_find_ics(xics, new_irq, &src); 404 if (!ics) { 405 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq); 406 return; 407 } 408 state = &ics->irq_state[src]; 409 410 /* Get a lock on the ICS */ 411 local_irq_save(flags); 412 arch_spin_lock(&ics->lock); 413 414 /* Get our server */ 415 if (!icp || state->server != icp->server_num) { 416 icp = kvmppc_xics_find_server(xics->kvm, state->server); 417 if (!icp) { 418 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n", 419 new_irq, state->server); 420 goto out; 421 } 422 } 423 424 if (check_resend) 425 if (!state->resend) 426 goto out; 427 428 /* Clear the resend bit of that interrupt */ 429 state->resend = 0; 430 431 /* 432 * If masked, bail out 433 * 434 * Note: PAPR doesn't mention anything about masked pending 435 * when doing a resend, only when doing a delivery. 436 * 437 * However that would have the effect of losing a masked 438 * interrupt that was rejected and isn't consistent with 439 * the whole masked_pending business which is about not 440 * losing interrupts that occur while masked. 441 * 442 * I don't differentiate normal deliveries and resends, this 443 * implementation will differ from PAPR and not lose such 444 * interrupts. 445 */ 446 if (state->priority == MASKED) { 447 XICS_DBG("irq %#x masked pending\n", new_irq); 448 state->masked_pending = 1; 449 goto out; 450 } 451 452 /* 453 * Try the delivery, this will set the need_resend flag 454 * in the ICP as part of the atomic transaction if the 455 * delivery is not possible. 456 * 457 * Note that if successful, the new delivery might have itself 458 * rejected an interrupt that was "delivered" before we took the 459 * ics spin lock. 460 * 461 * In this case we do the whole sequence all over again for the 462 * new guy. We cannot assume that the rejected interrupt is less 463 * favored than the new one, and thus doesn't need to be delivered, 464 * because by the time we exit icp_try_to_deliver() the target 465 * processor may well have alrady consumed & completed it, and thus 466 * the rejected interrupt might actually be already acceptable. 467 */ 468 if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) { 469 /* 470 * Delivery was successful, did we reject somebody else ? 471 */ 472 if (reject && reject != XICS_IPI) { 473 arch_spin_unlock(&ics->lock); 474 local_irq_restore(flags); 475 new_irq = reject; 476 check_resend = false; 477 goto again; 478 } 479 } else { 480 /* 481 * We failed to deliver the interrupt we need to set the 482 * resend map bit and mark the ICS state as needing a resend 483 */ 484 state->resend = 1; 485 486 /* 487 * Make sure when checking resend, we don't miss the resend 488 * if resend_map bit is seen and cleared. 489 */ 490 smp_wmb(); 491 set_bit(ics->icsid, icp->resend_map); 492 493 /* 494 * If the need_resend flag got cleared in the ICP some time 495 * between icp_try_to_deliver() atomic update and now, then 496 * we know it might have missed the resend_map bit. So we 497 * retry 498 */ 499 smp_mb(); 500 if (!icp->state.need_resend) { 501 state->resend = 0; 502 arch_spin_unlock(&ics->lock); 503 local_irq_restore(flags); 504 check_resend = false; 505 goto again; 506 } 507 } 508 out: 509 arch_spin_unlock(&ics->lock); 510 local_irq_restore(flags); 511 } 512 513 static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 514 u8 new_cppr) 515 { 516 union kvmppc_icp_state old_state, new_state; 517 bool resend; 518 519 /* 520 * This handles several related states in one operation: 521 * 522 * ICP State: Down_CPPR 523 * 524 * Load CPPR with new value and if the XISR is 0 525 * then check for resends: 526 * 527 * ICP State: Resend 528 * 529 * If MFRR is more favored than CPPR, check for IPIs 530 * and notify ICS of a potential resend. This is done 531 * asynchronously (when used in real mode, we will have 532 * to exit here). 533 * 534 * We do not handle the complete Check_IPI as documented 535 * here. In the PAPR, this state will be used for both 536 * Set_MFRR and Down_CPPR. However, we know that we aren't 537 * changing the MFRR state here so we don't need to handle 538 * the case of an MFRR causing a reject of a pending irq, 539 * this will have been handled when the MFRR was set in the 540 * first place. 541 * 542 * Thus we don't have to handle rejects, only resends. 543 * 544 * When implementing real mode for HV KVM, resend will lead to 545 * a H_TOO_HARD return and the whole transaction will be handled 546 * in virtual mode. 547 */ 548 do { 549 old_state = new_state = READ_ONCE(icp->state); 550 551 /* Down_CPPR */ 552 new_state.cppr = new_cppr; 553 554 /* 555 * Cut down Resend / Check_IPI / IPI 556 * 557 * The logic is that we cannot have a pending interrupt 558 * trumped by an IPI at this point (see above), so we 559 * know that either the pending interrupt is already an 560 * IPI (in which case we don't care to override it) or 561 * it's either more favored than us or non existent 562 */ 563 if (new_state.mfrr < new_cppr && 564 new_state.mfrr <= new_state.pending_pri) { 565 WARN_ON(new_state.xisr != XICS_IPI && 566 new_state.xisr != 0); 567 new_state.pending_pri = new_state.mfrr; 568 new_state.xisr = XICS_IPI; 569 } 570 571 /* Latch/clear resend bit */ 572 resend = new_state.need_resend; 573 new_state.need_resend = 0; 574 575 } while (!icp_try_update(icp, old_state, new_state, true)); 576 577 /* 578 * Now handle resend checks. Those are asynchronous to the ICP 579 * state update in HW (ie bus transactions) so we can handle them 580 * separately here too 581 */ 582 if (resend) 583 icp_check_resend(xics, icp); 584 } 585 586 static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu) 587 { 588 union kvmppc_icp_state old_state, new_state; 589 struct kvmppc_icp *icp = vcpu->arch.icp; 590 u32 xirr; 591 592 /* First, remove EE from the processor */ 593 kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL); 594 595 /* 596 * ICP State: Accept_Interrupt 597 * 598 * Return the pending interrupt (if any) along with the 599 * current CPPR, then clear the XISR & set CPPR to the 600 * pending priority 601 */ 602 do { 603 old_state = new_state = READ_ONCE(icp->state); 604 605 xirr = old_state.xisr | (((u32)old_state.cppr) << 24); 606 if (!old_state.xisr) 607 break; 608 new_state.cppr = new_state.pending_pri; 609 new_state.pending_pri = 0xff; 610 new_state.xisr = 0; 611 612 } while (!icp_try_update(icp, old_state, new_state, true)); 613 614 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr); 615 616 return xirr; 617 } 618 619 static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, 620 unsigned long mfrr) 621 { 622 union kvmppc_icp_state old_state, new_state; 623 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 624 struct kvmppc_icp *icp; 625 u32 reject; 626 bool resend; 627 bool local; 628 629 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n", 630 vcpu->vcpu_id, server, mfrr); 631 632 icp = vcpu->arch.icp; 633 local = icp->server_num == server; 634 if (!local) { 635 icp = kvmppc_xics_find_server(vcpu->kvm, server); 636 if (!icp) 637 return H_PARAMETER; 638 } 639 640 /* 641 * ICP state: Set_MFRR 642 * 643 * If the CPPR is more favored than the new MFRR, then 644 * nothing needs to be rejected as there can be no XISR to 645 * reject. If the MFRR is being made less favored then 646 * there might be a previously-rejected interrupt needing 647 * to be resent. 648 * 649 * ICP state: Check_IPI 650 * 651 * If the CPPR is less favored, then we might be replacing 652 * an interrupt, and thus need to possibly reject it. 653 * 654 * ICP State: IPI 655 * 656 * Besides rejecting any pending interrupts, we also 657 * update XISR and pending_pri to mark IPI as pending. 658 * 659 * PAPR does not describe this state, but if the MFRR is being 660 * made less favored than its earlier value, there might be 661 * a previously-rejected interrupt needing to be resent. 662 * Ideally, we would want to resend only if 663 * prio(pending_interrupt) < mfrr && 664 * prio(pending_interrupt) < cppr 665 * where pending interrupt is the one that was rejected. But 666 * we don't have that state, so we simply trigger a resend 667 * whenever the MFRR is made less favored. 668 */ 669 do { 670 old_state = new_state = READ_ONCE(icp->state); 671 672 /* Set_MFRR */ 673 new_state.mfrr = mfrr; 674 675 /* Check_IPI */ 676 reject = 0; 677 resend = false; 678 if (mfrr < new_state.cppr) { 679 /* Reject a pending interrupt if not an IPI */ 680 if (mfrr <= new_state.pending_pri) { 681 reject = new_state.xisr; 682 new_state.pending_pri = mfrr; 683 new_state.xisr = XICS_IPI; 684 } 685 } 686 687 if (mfrr > old_state.mfrr) { 688 resend = new_state.need_resend; 689 new_state.need_resend = 0; 690 } 691 } while (!icp_try_update(icp, old_state, new_state, local)); 692 693 /* Handle reject */ 694 if (reject && reject != XICS_IPI) 695 icp_deliver_irq(xics, icp, reject, false); 696 697 /* Handle resend */ 698 if (resend) 699 icp_check_resend(xics, icp); 700 701 return H_SUCCESS; 702 } 703 704 static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) 705 { 706 union kvmppc_icp_state state; 707 struct kvmppc_icp *icp; 708 709 icp = vcpu->arch.icp; 710 if (icp->server_num != server) { 711 icp = kvmppc_xics_find_server(vcpu->kvm, server); 712 if (!icp) 713 return H_PARAMETER; 714 } 715 state = READ_ONCE(icp->state); 716 kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr); 717 kvmppc_set_gpr(vcpu, 5, state.mfrr); 718 return H_SUCCESS; 719 } 720 721 static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) 722 { 723 union kvmppc_icp_state old_state, new_state; 724 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 725 struct kvmppc_icp *icp = vcpu->arch.icp; 726 u32 reject; 727 728 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr); 729 730 /* 731 * ICP State: Set_CPPR 732 * 733 * We can safely compare the new value with the current 734 * value outside of the transaction as the CPPR is only 735 * ever changed by the processor on itself 736 */ 737 if (cppr > icp->state.cppr) 738 icp_down_cppr(xics, icp, cppr); 739 else if (cppr == icp->state.cppr) 740 return; 741 742 /* 743 * ICP State: Up_CPPR 744 * 745 * The processor is raising its priority, this can result 746 * in a rejection of a pending interrupt: 747 * 748 * ICP State: Reject_Current 749 * 750 * We can remove EE from the current processor, the update 751 * transaction will set it again if needed 752 */ 753 kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL); 754 755 do { 756 old_state = new_state = READ_ONCE(icp->state); 757 758 reject = 0; 759 new_state.cppr = cppr; 760 761 if (cppr <= new_state.pending_pri) { 762 reject = new_state.xisr; 763 new_state.xisr = 0; 764 new_state.pending_pri = 0xff; 765 } 766 767 } while (!icp_try_update(icp, old_state, new_state, true)); 768 769 /* 770 * Check for rejects. They are handled by doing a new delivery 771 * attempt (see comments in icp_deliver_irq). 772 */ 773 if (reject && reject != XICS_IPI) 774 icp_deliver_irq(xics, icp, reject, false); 775 } 776 777 static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq) 778 { 779 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 780 struct kvmppc_icp *icp = vcpu->arch.icp; 781 struct kvmppc_ics *ics; 782 struct ics_irq_state *state; 783 u16 src; 784 u32 pq_old, pq_new; 785 786 /* 787 * ICS EOI handling: For LSI, if P bit is still set, we need to 788 * resend it. 789 * 790 * For MSI, we move Q bit into P (and clear Q). If it is set, 791 * resend it. 792 */ 793 794 ics = kvmppc_xics_find_ics(xics, irq, &src); 795 if (!ics) { 796 XICS_DBG("ios_eoi: IRQ 0x%06x not found !\n", irq); 797 return H_PARAMETER; 798 } 799 state = &ics->irq_state[src]; 800 801 if (state->lsi) 802 pq_new = state->pq_state; 803 else 804 do { 805 pq_old = state->pq_state; 806 pq_new = pq_old >> 1; 807 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old); 808 809 if (pq_new & PQ_PRESENTED) 810 icp_deliver_irq(xics, icp, irq, false); 811 812 kvm_notify_acked_irq(vcpu->kvm, 0, irq); 813 814 return H_SUCCESS; 815 } 816 817 static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) 818 { 819 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 820 struct kvmppc_icp *icp = vcpu->arch.icp; 821 u32 irq = xirr & 0x00ffffff; 822 823 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr); 824 825 /* 826 * ICP State: EOI 827 * 828 * Note: If EOI is incorrectly used by SW to lower the CPPR 829 * value (ie more favored), we do not check for rejection of 830 * a pending interrupt, this is a SW error and PAPR specifies 831 * that we don't have to deal with it. 832 * 833 * The sending of an EOI to the ICS is handled after the 834 * CPPR update 835 * 836 * ICP State: Down_CPPR which we handle 837 * in a separate function as it's shared with H_CPPR. 838 */ 839 icp_down_cppr(xics, icp, xirr >> 24); 840 841 /* IPIs have no EOI */ 842 if (irq == XICS_IPI) 843 return H_SUCCESS; 844 845 return ics_eoi(vcpu, irq); 846 } 847 848 int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall) 849 { 850 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 851 struct kvmppc_icp *icp = vcpu->arch.icp; 852 853 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n", 854 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt); 855 856 if (icp->rm_action & XICS_RM_KICK_VCPU) { 857 icp->n_rm_kick_vcpu++; 858 kvmppc_fast_vcpu_kick(icp->rm_kick_target); 859 } 860 if (icp->rm_action & XICS_RM_CHECK_RESEND) { 861 icp->n_rm_check_resend++; 862 icp_check_resend(xics, icp->rm_resend_icp); 863 } 864 if (icp->rm_action & XICS_RM_NOTIFY_EOI) { 865 icp->n_rm_notify_eoi++; 866 kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq); 867 } 868 869 icp->rm_action = 0; 870 871 return H_SUCCESS; 872 } 873 EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete); 874 875 int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req) 876 { 877 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 878 unsigned long res; 879 int rc = H_SUCCESS; 880 881 /* Check if we have an ICP */ 882 if (!xics || !vcpu->arch.icp) 883 return H_HARDWARE; 884 885 /* These requests don't have real-mode implementations at present */ 886 switch (req) { 887 case H_XIRR_X: 888 res = kvmppc_h_xirr(vcpu); 889 kvmppc_set_gpr(vcpu, 4, res); 890 kvmppc_set_gpr(vcpu, 5, get_tb()); 891 return rc; 892 case H_IPOLL: 893 rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4)); 894 return rc; 895 } 896 897 /* Check for real mode returning too hard */ 898 if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm)) 899 return kvmppc_xics_rm_complete(vcpu, req); 900 901 switch (req) { 902 case H_XIRR: 903 res = kvmppc_h_xirr(vcpu); 904 kvmppc_set_gpr(vcpu, 4, res); 905 break; 906 case H_CPPR: 907 kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4)); 908 break; 909 case H_EOI: 910 rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4)); 911 break; 912 case H_IPI: 913 rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4), 914 kvmppc_get_gpr(vcpu, 5)); 915 break; 916 } 917 918 return rc; 919 } 920 EXPORT_SYMBOL_GPL(kvmppc_xics_hcall); 921 922 923 /* -- Initialisation code etc. -- */ 924 925 static void xics_debugfs_irqmap(struct seq_file *m, 926 struct kvmppc_passthru_irqmap *pimap) 927 { 928 int i; 929 930 if (!pimap) 931 return; 932 seq_printf(m, "========\nPIRQ mappings: %d maps\n===========\n", 933 pimap->n_mapped); 934 for (i = 0; i < pimap->n_mapped; i++) { 935 seq_printf(m, "r_hwirq=%x, v_hwirq=%x\n", 936 pimap->mapped[i].r_hwirq, pimap->mapped[i].v_hwirq); 937 } 938 } 939 940 static int xics_debug_show(struct seq_file *m, void *private) 941 { 942 struct kvmppc_xics *xics = m->private; 943 struct kvm *kvm = xics->kvm; 944 struct kvm_vcpu *vcpu; 945 int icsid, i; 946 unsigned long flags; 947 unsigned long t_rm_kick_vcpu, t_rm_check_resend; 948 unsigned long t_rm_notify_eoi; 949 unsigned long t_reject, t_check_resend; 950 951 if (!kvm) 952 return 0; 953 954 t_rm_kick_vcpu = 0; 955 t_rm_notify_eoi = 0; 956 t_rm_check_resend = 0; 957 t_check_resend = 0; 958 t_reject = 0; 959 960 xics_debugfs_irqmap(m, kvm->arch.pimap); 961 962 seq_printf(m, "=========\nICP state\n=========\n"); 963 964 kvm_for_each_vcpu(i, vcpu, kvm) { 965 struct kvmppc_icp *icp = vcpu->arch.icp; 966 union kvmppc_icp_state state; 967 968 if (!icp) 969 continue; 970 971 state.raw = READ_ONCE(icp->state.raw); 972 seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n", 973 icp->server_num, state.xisr, 974 state.pending_pri, state.cppr, state.mfrr, 975 state.out_ee, state.need_resend); 976 t_rm_kick_vcpu += icp->n_rm_kick_vcpu; 977 t_rm_notify_eoi += icp->n_rm_notify_eoi; 978 t_rm_check_resend += icp->n_rm_check_resend; 979 t_check_resend += icp->n_check_resend; 980 t_reject += icp->n_reject; 981 } 982 983 seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu notify_eoi=%lu\n", 984 t_rm_kick_vcpu, t_rm_check_resend, 985 t_rm_notify_eoi); 986 seq_printf(m, "ICP Real Mode totals: check_resend=%lu resend=%lu\n", 987 t_check_resend, t_reject); 988 for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) { 989 struct kvmppc_ics *ics = xics->ics[icsid]; 990 991 if (!ics) 992 continue; 993 994 seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n", 995 icsid); 996 997 local_irq_save(flags); 998 arch_spin_lock(&ics->lock); 999 1000 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { 1001 struct ics_irq_state *irq = &ics->irq_state[i]; 1002 1003 seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x pq_state %d resend %d masked pending %d\n", 1004 irq->number, irq->server, irq->priority, 1005 irq->saved_priority, irq->pq_state, 1006 irq->resend, irq->masked_pending); 1007 1008 } 1009 arch_spin_unlock(&ics->lock); 1010 local_irq_restore(flags); 1011 } 1012 return 0; 1013 } 1014 1015 DEFINE_SHOW_ATTRIBUTE(xics_debug); 1016 1017 static void xics_debugfs_init(struct kvmppc_xics *xics) 1018 { 1019 char *name; 1020 1021 name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics); 1022 if (!name) { 1023 pr_err("%s: no memory for name\n", __func__); 1024 return; 1025 } 1026 1027 xics->dentry = debugfs_create_file(name, 0444, powerpc_debugfs_root, 1028 xics, &xics_debug_fops); 1029 1030 pr_debug("%s: created %s\n", __func__, name); 1031 kfree(name); 1032 } 1033 1034 static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm, 1035 struct kvmppc_xics *xics, int irq) 1036 { 1037 struct kvmppc_ics *ics; 1038 int i, icsid; 1039 1040 icsid = irq >> KVMPPC_XICS_ICS_SHIFT; 1041 1042 mutex_lock(&kvm->lock); 1043 1044 /* ICS already exists - somebody else got here first */ 1045 if (xics->ics[icsid]) 1046 goto out; 1047 1048 /* Create the ICS */ 1049 ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL); 1050 if (!ics) 1051 goto out; 1052 1053 ics->icsid = icsid; 1054 1055 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { 1056 ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i; 1057 ics->irq_state[i].priority = MASKED; 1058 ics->irq_state[i].saved_priority = MASKED; 1059 } 1060 smp_wmb(); 1061 xics->ics[icsid] = ics; 1062 1063 if (icsid > xics->max_icsid) 1064 xics->max_icsid = icsid; 1065 1066 out: 1067 mutex_unlock(&kvm->lock); 1068 return xics->ics[icsid]; 1069 } 1070 1071 static int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num) 1072 { 1073 struct kvmppc_icp *icp; 1074 1075 if (!vcpu->kvm->arch.xics) 1076 return -ENODEV; 1077 1078 if (kvmppc_xics_find_server(vcpu->kvm, server_num)) 1079 return -EEXIST; 1080 1081 icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL); 1082 if (!icp) 1083 return -ENOMEM; 1084 1085 icp->vcpu = vcpu; 1086 icp->server_num = server_num; 1087 icp->state.mfrr = MASKED; 1088 icp->state.pending_pri = MASKED; 1089 vcpu->arch.icp = icp; 1090 1091 XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id); 1092 1093 return 0; 1094 } 1095 1096 u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu) 1097 { 1098 struct kvmppc_icp *icp = vcpu->arch.icp; 1099 union kvmppc_icp_state state; 1100 1101 if (!icp) 1102 return 0; 1103 state = icp->state; 1104 return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) | 1105 ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) | 1106 ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) | 1107 ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT); 1108 } 1109 1110 int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval) 1111 { 1112 struct kvmppc_icp *icp = vcpu->arch.icp; 1113 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 1114 union kvmppc_icp_state old_state, new_state; 1115 struct kvmppc_ics *ics; 1116 u8 cppr, mfrr, pending_pri; 1117 u32 xisr; 1118 u16 src; 1119 bool resend; 1120 1121 if (!icp || !xics) 1122 return -ENOENT; 1123 1124 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT; 1125 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) & 1126 KVM_REG_PPC_ICP_XISR_MASK; 1127 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT; 1128 pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT; 1129 1130 /* Require the new state to be internally consistent */ 1131 if (xisr == 0) { 1132 if (pending_pri != 0xff) 1133 return -EINVAL; 1134 } else if (xisr == XICS_IPI) { 1135 if (pending_pri != mfrr || pending_pri >= cppr) 1136 return -EINVAL; 1137 } else { 1138 if (pending_pri >= mfrr || pending_pri >= cppr) 1139 return -EINVAL; 1140 ics = kvmppc_xics_find_ics(xics, xisr, &src); 1141 if (!ics) 1142 return -EINVAL; 1143 } 1144 1145 new_state.raw = 0; 1146 new_state.cppr = cppr; 1147 new_state.xisr = xisr; 1148 new_state.mfrr = mfrr; 1149 new_state.pending_pri = pending_pri; 1150 1151 /* 1152 * Deassert the CPU interrupt request. 1153 * icp_try_update will reassert it if necessary. 1154 */ 1155 kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL); 1156 1157 /* 1158 * Note that if we displace an interrupt from old_state.xisr, 1159 * we don't mark it as rejected. We expect userspace to set 1160 * the state of the interrupt sources to be consistent with 1161 * the ICP states (either before or afterwards, which doesn't 1162 * matter). We do handle resends due to CPPR becoming less 1163 * favoured because that is necessary to end up with a 1164 * consistent state in the situation where userspace restores 1165 * the ICS states before the ICP states. 1166 */ 1167 do { 1168 old_state = READ_ONCE(icp->state); 1169 1170 if (new_state.mfrr <= old_state.mfrr) { 1171 resend = false; 1172 new_state.need_resend = old_state.need_resend; 1173 } else { 1174 resend = old_state.need_resend; 1175 new_state.need_resend = 0; 1176 } 1177 } while (!icp_try_update(icp, old_state, new_state, false)); 1178 1179 if (resend) 1180 icp_check_resend(xics, icp); 1181 1182 return 0; 1183 } 1184 1185 static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr) 1186 { 1187 int ret; 1188 struct kvmppc_ics *ics; 1189 struct ics_irq_state *irqp; 1190 u64 __user *ubufp = (u64 __user *) addr; 1191 u16 idx; 1192 u64 val, prio; 1193 unsigned long flags; 1194 1195 ics = kvmppc_xics_find_ics(xics, irq, &idx); 1196 if (!ics) 1197 return -ENOENT; 1198 1199 irqp = &ics->irq_state[idx]; 1200 local_irq_save(flags); 1201 arch_spin_lock(&ics->lock); 1202 ret = -ENOENT; 1203 if (irqp->exists) { 1204 val = irqp->server; 1205 prio = irqp->priority; 1206 if (prio == MASKED) { 1207 val |= KVM_XICS_MASKED; 1208 prio = irqp->saved_priority; 1209 } 1210 val |= prio << KVM_XICS_PRIORITY_SHIFT; 1211 if (irqp->lsi) { 1212 val |= KVM_XICS_LEVEL_SENSITIVE; 1213 if (irqp->pq_state & PQ_PRESENTED) 1214 val |= KVM_XICS_PENDING; 1215 } else if (irqp->masked_pending || irqp->resend) 1216 val |= KVM_XICS_PENDING; 1217 1218 if (irqp->pq_state & PQ_PRESENTED) 1219 val |= KVM_XICS_PRESENTED; 1220 1221 if (irqp->pq_state & PQ_QUEUED) 1222 val |= KVM_XICS_QUEUED; 1223 1224 ret = 0; 1225 } 1226 arch_spin_unlock(&ics->lock); 1227 local_irq_restore(flags); 1228 1229 if (!ret && put_user(val, ubufp)) 1230 ret = -EFAULT; 1231 1232 return ret; 1233 } 1234 1235 static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr) 1236 { 1237 struct kvmppc_ics *ics; 1238 struct ics_irq_state *irqp; 1239 u64 __user *ubufp = (u64 __user *) addr; 1240 u16 idx; 1241 u64 val; 1242 u8 prio; 1243 u32 server; 1244 unsigned long flags; 1245 1246 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS) 1247 return -ENOENT; 1248 1249 ics = kvmppc_xics_find_ics(xics, irq, &idx); 1250 if (!ics) { 1251 ics = kvmppc_xics_create_ics(xics->kvm, xics, irq); 1252 if (!ics) 1253 return -ENOMEM; 1254 } 1255 irqp = &ics->irq_state[idx]; 1256 if (get_user(val, ubufp)) 1257 return -EFAULT; 1258 1259 server = val & KVM_XICS_DESTINATION_MASK; 1260 prio = val >> KVM_XICS_PRIORITY_SHIFT; 1261 if (prio != MASKED && 1262 kvmppc_xics_find_server(xics->kvm, server) == NULL) 1263 return -EINVAL; 1264 1265 local_irq_save(flags); 1266 arch_spin_lock(&ics->lock); 1267 irqp->server = server; 1268 irqp->saved_priority = prio; 1269 if (val & KVM_XICS_MASKED) 1270 prio = MASKED; 1271 irqp->priority = prio; 1272 irqp->resend = 0; 1273 irqp->masked_pending = 0; 1274 irqp->lsi = 0; 1275 irqp->pq_state = 0; 1276 if (val & KVM_XICS_LEVEL_SENSITIVE) 1277 irqp->lsi = 1; 1278 /* If PENDING, set P in case P is not saved because of old code */ 1279 if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING) 1280 irqp->pq_state |= PQ_PRESENTED; 1281 if (val & KVM_XICS_QUEUED) 1282 irqp->pq_state |= PQ_QUEUED; 1283 irqp->exists = 1; 1284 arch_spin_unlock(&ics->lock); 1285 local_irq_restore(flags); 1286 1287 if (val & KVM_XICS_PENDING) 1288 icp_deliver_irq(xics, NULL, irqp->number, false); 1289 1290 return 0; 1291 } 1292 1293 int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, 1294 bool line_status) 1295 { 1296 struct kvmppc_xics *xics = kvm->arch.xics; 1297 1298 if (!xics) 1299 return -ENODEV; 1300 return ics_deliver_irq(xics, irq, level); 1301 } 1302 1303 static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1304 { 1305 struct kvmppc_xics *xics = dev->private; 1306 1307 switch (attr->group) { 1308 case KVM_DEV_XICS_GRP_SOURCES: 1309 return xics_set_source(xics, attr->attr, attr->addr); 1310 } 1311 return -ENXIO; 1312 } 1313 1314 static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1315 { 1316 struct kvmppc_xics *xics = dev->private; 1317 1318 switch (attr->group) { 1319 case KVM_DEV_XICS_GRP_SOURCES: 1320 return xics_get_source(xics, attr->attr, attr->addr); 1321 } 1322 return -ENXIO; 1323 } 1324 1325 static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1326 { 1327 switch (attr->group) { 1328 case KVM_DEV_XICS_GRP_SOURCES: 1329 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ && 1330 attr->attr < KVMPPC_XICS_NR_IRQS) 1331 return 0; 1332 break; 1333 } 1334 return -ENXIO; 1335 } 1336 1337 /* 1338 * Called when device fd is closed. kvm->lock is held. 1339 */ 1340 static void kvmppc_xics_release(struct kvm_device *dev) 1341 { 1342 struct kvmppc_xics *xics = dev->private; 1343 int i; 1344 struct kvm *kvm = xics->kvm; 1345 struct kvm_vcpu *vcpu; 1346 1347 pr_devel("Releasing xics device\n"); 1348 1349 /* 1350 * Since this is the device release function, we know that 1351 * userspace does not have any open fd referring to the 1352 * device. Therefore there can not be any of the device 1353 * attribute set/get functions being executed concurrently, 1354 * and similarly, the connect_vcpu and set/clr_mapped 1355 * functions also cannot be being executed. 1356 */ 1357 1358 debugfs_remove(xics->dentry); 1359 1360 /* 1361 * We should clean up the vCPU interrupt presenters first. 1362 */ 1363 kvm_for_each_vcpu(i, vcpu, kvm) { 1364 /* 1365 * Take vcpu->mutex to ensure that no one_reg get/set ioctl 1366 * (i.e. kvmppc_xics_[gs]et_icp) can be done concurrently. 1367 * Holding the vcpu->mutex also means that execution is 1368 * excluded for the vcpu until the ICP was freed. When the vcpu 1369 * can execute again, vcpu->arch.icp and vcpu->arch.irq_type 1370 * have been cleared and the vcpu will not be going into the 1371 * XICS code anymore. 1372 */ 1373 mutex_lock(&vcpu->mutex); 1374 kvmppc_xics_free_icp(vcpu); 1375 mutex_unlock(&vcpu->mutex); 1376 } 1377 1378 if (kvm) 1379 kvm->arch.xics = NULL; 1380 1381 for (i = 0; i <= xics->max_icsid; i++) { 1382 kfree(xics->ics[i]); 1383 xics->ics[i] = NULL; 1384 } 1385 /* 1386 * A reference of the kvmppc_xics pointer is now kept under 1387 * the xics_device pointer of the machine for reuse. It is 1388 * freed when the VM is destroyed for now until we fix all the 1389 * execution paths. 1390 */ 1391 kfree(dev); 1392 } 1393 1394 static struct kvmppc_xics *kvmppc_xics_get_device(struct kvm *kvm) 1395 { 1396 struct kvmppc_xics **kvm_xics_device = &kvm->arch.xics_device; 1397 struct kvmppc_xics *xics = *kvm_xics_device; 1398 1399 if (!xics) { 1400 xics = kzalloc(sizeof(*xics), GFP_KERNEL); 1401 *kvm_xics_device = xics; 1402 } else { 1403 memset(xics, 0, sizeof(*xics)); 1404 } 1405 1406 return xics; 1407 } 1408 1409 static int kvmppc_xics_create(struct kvm_device *dev, u32 type) 1410 { 1411 struct kvmppc_xics *xics; 1412 struct kvm *kvm = dev->kvm; 1413 1414 pr_devel("Creating xics for partition\n"); 1415 1416 /* Already there ? */ 1417 if (kvm->arch.xics) 1418 return -EEXIST; 1419 1420 xics = kvmppc_xics_get_device(kvm); 1421 if (!xics) 1422 return -ENOMEM; 1423 1424 dev->private = xics; 1425 xics->dev = dev; 1426 xics->kvm = kvm; 1427 kvm->arch.xics = xics; 1428 1429 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1430 if (cpu_has_feature(CPU_FTR_ARCH_206) && 1431 cpu_has_feature(CPU_FTR_HVMODE)) { 1432 /* Enable real mode support */ 1433 xics->real_mode = ENABLE_REALMODE; 1434 xics->real_mode_dbg = DEBUG_REALMODE; 1435 } 1436 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 1437 1438 return 0; 1439 } 1440 1441 static void kvmppc_xics_init(struct kvm_device *dev) 1442 { 1443 struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private; 1444 1445 xics_debugfs_init(xics); 1446 } 1447 1448 struct kvm_device_ops kvm_xics_ops = { 1449 .name = "kvm-xics", 1450 .create = kvmppc_xics_create, 1451 .init = kvmppc_xics_init, 1452 .release = kvmppc_xics_release, 1453 .set_attr = xics_set_attr, 1454 .get_attr = xics_get_attr, 1455 .has_attr = xics_has_attr, 1456 }; 1457 1458 int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu, 1459 u32 xcpu) 1460 { 1461 struct kvmppc_xics *xics = dev->private; 1462 int r = -EBUSY; 1463 1464 if (dev->ops != &kvm_xics_ops) 1465 return -EPERM; 1466 if (xics->kvm != vcpu->kvm) 1467 return -EPERM; 1468 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) 1469 return -EBUSY; 1470 1471 r = kvmppc_xics_create_icp(vcpu, xcpu); 1472 if (!r) 1473 vcpu->arch.irq_type = KVMPPC_IRQ_XICS; 1474 1475 return r; 1476 } 1477 1478 void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) 1479 { 1480 if (!vcpu->arch.icp) 1481 return; 1482 kfree(vcpu->arch.icp); 1483 vcpu->arch.icp = NULL; 1484 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; 1485 } 1486 1487 void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq, 1488 unsigned long host_irq) 1489 { 1490 struct kvmppc_xics *xics = kvm->arch.xics; 1491 struct kvmppc_ics *ics; 1492 u16 idx; 1493 1494 ics = kvmppc_xics_find_ics(xics, irq, &idx); 1495 if (!ics) 1496 return; 1497 1498 ics->irq_state[idx].host_irq = host_irq; 1499 ics->irq_state[idx].intr_cpu = -1; 1500 } 1501 EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped); 1502 1503 void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq, 1504 unsigned long host_irq) 1505 { 1506 struct kvmppc_xics *xics = kvm->arch.xics; 1507 struct kvmppc_ics *ics; 1508 u16 idx; 1509 1510 ics = kvmppc_xics_find_ics(xics, irq, &idx); 1511 if (!ics) 1512 return; 1513 1514 ics->irq_state[idx].host_irq = 0; 1515 } 1516 EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped); 1517