1 /* 2 * Copyright 2012 Michael Ellerman, IBM Corporation. 3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License, version 2, as 7 * published by the Free Software Foundation. 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/kvm_host.h> 12 #include <linux/err.h> 13 #include <linux/gfp.h> 14 #include <linux/anon_inodes.h> 15 16 #include <asm/uaccess.h> 17 #include <asm/kvm_book3s.h> 18 #include <asm/kvm_ppc.h> 19 #include <asm/hvcall.h> 20 #include <asm/xics.h> 21 #include <asm/debug.h> 22 #include <asm/time.h> 23 24 #include <linux/debugfs.h> 25 #include <linux/seq_file.h> 26 27 #include "book3s_xics.h" 28 29 #if 1 30 #define XICS_DBG(fmt...) do { } while (0) 31 #else 32 #define XICS_DBG(fmt...) trace_printk(fmt) 33 #endif 34 35 #define ENABLE_REALMODE true 36 #define DEBUG_REALMODE false 37 38 /* 39 * LOCKING 40 * ======= 41 * 42 * Each ICS has a mutex protecting the information about the IRQ 43 * sources and avoiding simultaneous deliveries if the same interrupt. 44 * 45 * ICP operations are done via a single compare & swap transaction 46 * (most ICP state fits in the union kvmppc_icp_state) 47 */ 48 49 /* 50 * TODO 51 * ==== 52 * 53 * - To speed up resends, keep a bitmap of "resend" set bits in the 54 * ICS 55 * 56 * - Speed up server# -> ICP lookup (array ? hash table ?) 57 * 58 * - Make ICS lockless as well, or at least a per-interrupt lock or hashed 59 * locks array to improve scalability 60 */ 61 62 /* -- ICS routines -- */ 63 64 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 65 u32 new_irq); 66 67 static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level, 68 bool report_status) 69 { 70 struct ics_irq_state *state; 71 struct kvmppc_ics *ics; 72 u16 src; 73 74 XICS_DBG("ics deliver %#x (level: %d)\n", irq, level); 75 76 ics = kvmppc_xics_find_ics(xics, irq, &src); 77 if (!ics) { 78 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq); 79 return -EINVAL; 80 } 81 state = &ics->irq_state[src]; 82 if (!state->exists) 83 return -EINVAL; 84 85 if (report_status) 86 return state->asserted; 87 88 /* 89 * We set state->asserted locklessly. This should be fine as 90 * we are the only setter, thus concurrent access is undefined 91 * to begin with. 92 */ 93 if (level == KVM_INTERRUPT_SET_LEVEL) 94 state->asserted = 1; 95 else if (level == KVM_INTERRUPT_UNSET) { 96 state->asserted = 0; 97 return 0; 98 } 99 100 /* Attempt delivery */ 101 icp_deliver_irq(xics, NULL, irq); 102 103 return state->asserted; 104 } 105 106 static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics, 107 struct kvmppc_icp *icp) 108 { 109 int i; 110 111 mutex_lock(&ics->lock); 112 113 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { 114 struct ics_irq_state *state = &ics->irq_state[i]; 115 116 if (!state->resend) 117 continue; 118 119 XICS_DBG("resend %#x prio %#x\n", state->number, 120 state->priority); 121 122 mutex_unlock(&ics->lock); 123 icp_deliver_irq(xics, icp, state->number); 124 mutex_lock(&ics->lock); 125 } 126 127 mutex_unlock(&ics->lock); 128 } 129 130 static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics, 131 struct ics_irq_state *state, 132 u32 server, u32 priority, u32 saved_priority) 133 { 134 bool deliver; 135 136 mutex_lock(&ics->lock); 137 138 state->server = server; 139 state->priority = priority; 140 state->saved_priority = saved_priority; 141 deliver = false; 142 if ((state->masked_pending || state->resend) && priority != MASKED) { 143 state->masked_pending = 0; 144 deliver = true; 145 } 146 147 mutex_unlock(&ics->lock); 148 149 return deliver; 150 } 151 152 int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority) 153 { 154 struct kvmppc_xics *xics = kvm->arch.xics; 155 struct kvmppc_icp *icp; 156 struct kvmppc_ics *ics; 157 struct ics_irq_state *state; 158 u16 src; 159 160 if (!xics) 161 return -ENODEV; 162 163 ics = kvmppc_xics_find_ics(xics, irq, &src); 164 if (!ics) 165 return -EINVAL; 166 state = &ics->irq_state[src]; 167 168 icp = kvmppc_xics_find_server(kvm, server); 169 if (!icp) 170 return -EINVAL; 171 172 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n", 173 irq, server, priority, 174 state->masked_pending, state->resend); 175 176 if (write_xive(xics, ics, state, server, priority, priority)) 177 icp_deliver_irq(xics, icp, irq); 178 179 return 0; 180 } 181 182 int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority) 183 { 184 struct kvmppc_xics *xics = kvm->arch.xics; 185 struct kvmppc_ics *ics; 186 struct ics_irq_state *state; 187 u16 src; 188 189 if (!xics) 190 return -ENODEV; 191 192 ics = kvmppc_xics_find_ics(xics, irq, &src); 193 if (!ics) 194 return -EINVAL; 195 state = &ics->irq_state[src]; 196 197 mutex_lock(&ics->lock); 198 *server = state->server; 199 *priority = state->priority; 200 mutex_unlock(&ics->lock); 201 202 return 0; 203 } 204 205 int kvmppc_xics_int_on(struct kvm *kvm, u32 irq) 206 { 207 struct kvmppc_xics *xics = kvm->arch.xics; 208 struct kvmppc_icp *icp; 209 struct kvmppc_ics *ics; 210 struct ics_irq_state *state; 211 u16 src; 212 213 if (!xics) 214 return -ENODEV; 215 216 ics = kvmppc_xics_find_ics(xics, irq, &src); 217 if (!ics) 218 return -EINVAL; 219 state = &ics->irq_state[src]; 220 221 icp = kvmppc_xics_find_server(kvm, state->server); 222 if (!icp) 223 return -EINVAL; 224 225 if (write_xive(xics, ics, state, state->server, state->saved_priority, 226 state->saved_priority)) 227 icp_deliver_irq(xics, icp, irq); 228 229 return 0; 230 } 231 232 int kvmppc_xics_int_off(struct kvm *kvm, u32 irq) 233 { 234 struct kvmppc_xics *xics = kvm->arch.xics; 235 struct kvmppc_ics *ics; 236 struct ics_irq_state *state; 237 u16 src; 238 239 if (!xics) 240 return -ENODEV; 241 242 ics = kvmppc_xics_find_ics(xics, irq, &src); 243 if (!ics) 244 return -EINVAL; 245 state = &ics->irq_state[src]; 246 247 write_xive(xics, ics, state, state->server, MASKED, state->priority); 248 249 return 0; 250 } 251 252 /* -- ICP routines, including hcalls -- */ 253 254 static inline bool icp_try_update(struct kvmppc_icp *icp, 255 union kvmppc_icp_state old, 256 union kvmppc_icp_state new, 257 bool change_self) 258 { 259 bool success; 260 261 /* Calculate new output value */ 262 new.out_ee = (new.xisr && (new.pending_pri < new.cppr)); 263 264 /* Attempt atomic update */ 265 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw; 266 if (!success) 267 goto bail; 268 269 XICS_DBG("UPD [%04x] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n", 270 icp->server_num, 271 old.cppr, old.mfrr, old.pending_pri, old.xisr, 272 old.need_resend, old.out_ee); 273 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n", 274 new.cppr, new.mfrr, new.pending_pri, new.xisr, 275 new.need_resend, new.out_ee); 276 /* 277 * Check for output state update 278 * 279 * Note that this is racy since another processor could be updating 280 * the state already. This is why we never clear the interrupt output 281 * here, we only ever set it. The clear only happens prior to doing 282 * an update and only by the processor itself. Currently we do it 283 * in Accept (H_XIRR) and Up_Cppr (H_XPPR). 284 * 285 * We also do not try to figure out whether the EE state has changed, 286 * we unconditionally set it if the new state calls for it. The reason 287 * for that is that we opportunistically remove the pending interrupt 288 * flag when raising CPPR, so we need to set it back here if an 289 * interrupt is still pending. 290 */ 291 if (new.out_ee) { 292 kvmppc_book3s_queue_irqprio(icp->vcpu, 293 BOOK3S_INTERRUPT_EXTERNAL_LEVEL); 294 if (!change_self) 295 kvmppc_fast_vcpu_kick(icp->vcpu); 296 } 297 bail: 298 return success; 299 } 300 301 static void icp_check_resend(struct kvmppc_xics *xics, 302 struct kvmppc_icp *icp) 303 { 304 u32 icsid; 305 306 /* Order this load with the test for need_resend in the caller */ 307 smp_rmb(); 308 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) { 309 struct kvmppc_ics *ics = xics->ics[icsid]; 310 311 if (!test_and_clear_bit(icsid, icp->resend_map)) 312 continue; 313 if (!ics) 314 continue; 315 ics_check_resend(xics, ics, icp); 316 } 317 } 318 319 static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority, 320 u32 *reject) 321 { 322 union kvmppc_icp_state old_state, new_state; 323 bool success; 324 325 XICS_DBG("try deliver %#x(P:%#x) to server %#x\n", irq, priority, 326 icp->server_num); 327 328 do { 329 old_state = new_state = ACCESS_ONCE(icp->state); 330 331 *reject = 0; 332 333 /* See if we can deliver */ 334 success = new_state.cppr > priority && 335 new_state.mfrr > priority && 336 new_state.pending_pri > priority; 337 338 /* 339 * If we can, check for a rejection and perform the 340 * delivery 341 */ 342 if (success) { 343 *reject = new_state.xisr; 344 new_state.xisr = irq; 345 new_state.pending_pri = priority; 346 } else { 347 /* 348 * If we failed to deliver we set need_resend 349 * so a subsequent CPPR state change causes us 350 * to try a new delivery. 351 */ 352 new_state.need_resend = true; 353 } 354 355 } while (!icp_try_update(icp, old_state, new_state, false)); 356 357 return success; 358 } 359 360 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 361 u32 new_irq) 362 { 363 struct ics_irq_state *state; 364 struct kvmppc_ics *ics; 365 u32 reject; 366 u16 src; 367 368 /* 369 * This is used both for initial delivery of an interrupt and 370 * for subsequent rejection. 371 * 372 * Rejection can be racy vs. resends. We have evaluated the 373 * rejection in an atomic ICP transaction which is now complete, 374 * so potentially the ICP can already accept the interrupt again. 375 * 376 * So we need to retry the delivery. Essentially the reject path 377 * boils down to a failed delivery. Always. 378 * 379 * Now the interrupt could also have moved to a different target, 380 * thus we may need to re-do the ICP lookup as well 381 */ 382 383 again: 384 /* Get the ICS state and lock it */ 385 ics = kvmppc_xics_find_ics(xics, new_irq, &src); 386 if (!ics) { 387 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq); 388 return; 389 } 390 state = &ics->irq_state[src]; 391 392 /* Get a lock on the ICS */ 393 mutex_lock(&ics->lock); 394 395 /* Get our server */ 396 if (!icp || state->server != icp->server_num) { 397 icp = kvmppc_xics_find_server(xics->kvm, state->server); 398 if (!icp) { 399 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n", 400 new_irq, state->server); 401 goto out; 402 } 403 } 404 405 /* Clear the resend bit of that interrupt */ 406 state->resend = 0; 407 408 /* 409 * If masked, bail out 410 * 411 * Note: PAPR doesn't mention anything about masked pending 412 * when doing a resend, only when doing a delivery. 413 * 414 * However that would have the effect of losing a masked 415 * interrupt that was rejected and isn't consistent with 416 * the whole masked_pending business which is about not 417 * losing interrupts that occur while masked. 418 * 419 * I don't differenciate normal deliveries and resends, this 420 * implementation will differ from PAPR and not lose such 421 * interrupts. 422 */ 423 if (state->priority == MASKED) { 424 XICS_DBG("irq %#x masked pending\n", new_irq); 425 state->masked_pending = 1; 426 goto out; 427 } 428 429 /* 430 * Try the delivery, this will set the need_resend flag 431 * in the ICP as part of the atomic transaction if the 432 * delivery is not possible. 433 * 434 * Note that if successful, the new delivery might have itself 435 * rejected an interrupt that was "delivered" before we took the 436 * icp mutex. 437 * 438 * In this case we do the whole sequence all over again for the 439 * new guy. We cannot assume that the rejected interrupt is less 440 * favored than the new one, and thus doesn't need to be delivered, 441 * because by the time we exit icp_try_to_deliver() the target 442 * processor may well have alrady consumed & completed it, and thus 443 * the rejected interrupt might actually be already acceptable. 444 */ 445 if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) { 446 /* 447 * Delivery was successful, did we reject somebody else ? 448 */ 449 if (reject && reject != XICS_IPI) { 450 mutex_unlock(&ics->lock); 451 new_irq = reject; 452 goto again; 453 } 454 } else { 455 /* 456 * We failed to deliver the interrupt we need to set the 457 * resend map bit and mark the ICS state as needing a resend 458 */ 459 set_bit(ics->icsid, icp->resend_map); 460 state->resend = 1; 461 462 /* 463 * If the need_resend flag got cleared in the ICP some time 464 * between icp_try_to_deliver() atomic update and now, then 465 * we know it might have missed the resend_map bit. So we 466 * retry 467 */ 468 smp_mb(); 469 if (!icp->state.need_resend) { 470 mutex_unlock(&ics->lock); 471 goto again; 472 } 473 } 474 out: 475 mutex_unlock(&ics->lock); 476 } 477 478 static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 479 u8 new_cppr) 480 { 481 union kvmppc_icp_state old_state, new_state; 482 bool resend; 483 484 /* 485 * This handles several related states in one operation: 486 * 487 * ICP State: Down_CPPR 488 * 489 * Load CPPR with new value and if the XISR is 0 490 * then check for resends: 491 * 492 * ICP State: Resend 493 * 494 * If MFRR is more favored than CPPR, check for IPIs 495 * and notify ICS of a potential resend. This is done 496 * asynchronously (when used in real mode, we will have 497 * to exit here). 498 * 499 * We do not handle the complete Check_IPI as documented 500 * here. In the PAPR, this state will be used for both 501 * Set_MFRR and Down_CPPR. However, we know that we aren't 502 * changing the MFRR state here so we don't need to handle 503 * the case of an MFRR causing a reject of a pending irq, 504 * this will have been handled when the MFRR was set in the 505 * first place. 506 * 507 * Thus we don't have to handle rejects, only resends. 508 * 509 * When implementing real mode for HV KVM, resend will lead to 510 * a H_TOO_HARD return and the whole transaction will be handled 511 * in virtual mode. 512 */ 513 do { 514 old_state = new_state = ACCESS_ONCE(icp->state); 515 516 /* Down_CPPR */ 517 new_state.cppr = new_cppr; 518 519 /* 520 * Cut down Resend / Check_IPI / IPI 521 * 522 * The logic is that we cannot have a pending interrupt 523 * trumped by an IPI at this point (see above), so we 524 * know that either the pending interrupt is already an 525 * IPI (in which case we don't care to override it) or 526 * it's either more favored than us or non existent 527 */ 528 if (new_state.mfrr < new_cppr && 529 new_state.mfrr <= new_state.pending_pri) { 530 WARN_ON(new_state.xisr != XICS_IPI && 531 new_state.xisr != 0); 532 new_state.pending_pri = new_state.mfrr; 533 new_state.xisr = XICS_IPI; 534 } 535 536 /* Latch/clear resend bit */ 537 resend = new_state.need_resend; 538 new_state.need_resend = 0; 539 540 } while (!icp_try_update(icp, old_state, new_state, true)); 541 542 /* 543 * Now handle resend checks. Those are asynchronous to the ICP 544 * state update in HW (ie bus transactions) so we can handle them 545 * separately here too 546 */ 547 if (resend) 548 icp_check_resend(xics, icp); 549 } 550 551 static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu) 552 { 553 union kvmppc_icp_state old_state, new_state; 554 struct kvmppc_icp *icp = vcpu->arch.icp; 555 u32 xirr; 556 557 /* First, remove EE from the processor */ 558 kvmppc_book3s_dequeue_irqprio(icp->vcpu, 559 BOOK3S_INTERRUPT_EXTERNAL_LEVEL); 560 561 /* 562 * ICP State: Accept_Interrupt 563 * 564 * Return the pending interrupt (if any) along with the 565 * current CPPR, then clear the XISR & set CPPR to the 566 * pending priority 567 */ 568 do { 569 old_state = new_state = ACCESS_ONCE(icp->state); 570 571 xirr = old_state.xisr | (((u32)old_state.cppr) << 24); 572 if (!old_state.xisr) 573 break; 574 new_state.cppr = new_state.pending_pri; 575 new_state.pending_pri = 0xff; 576 new_state.xisr = 0; 577 578 } while (!icp_try_update(icp, old_state, new_state, true)); 579 580 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr); 581 582 return xirr; 583 } 584 585 static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, 586 unsigned long mfrr) 587 { 588 union kvmppc_icp_state old_state, new_state; 589 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 590 struct kvmppc_icp *icp; 591 u32 reject; 592 bool resend; 593 bool local; 594 595 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n", 596 vcpu->vcpu_id, server, mfrr); 597 598 icp = vcpu->arch.icp; 599 local = icp->server_num == server; 600 if (!local) { 601 icp = kvmppc_xics_find_server(vcpu->kvm, server); 602 if (!icp) 603 return H_PARAMETER; 604 } 605 606 /* 607 * ICP state: Set_MFRR 608 * 609 * If the CPPR is more favored than the new MFRR, then 610 * nothing needs to be rejected as there can be no XISR to 611 * reject. If the MFRR is being made less favored then 612 * there might be a previously-rejected interrupt needing 613 * to be resent. 614 * 615 * If the CPPR is less favored, then we might be replacing 616 * an interrupt, and thus need to possibly reject it as in 617 * 618 * ICP state: Check_IPI 619 */ 620 do { 621 old_state = new_state = ACCESS_ONCE(icp->state); 622 623 /* Set_MFRR */ 624 new_state.mfrr = mfrr; 625 626 /* Check_IPI */ 627 reject = 0; 628 resend = false; 629 if (mfrr < new_state.cppr) { 630 /* Reject a pending interrupt if not an IPI */ 631 if (mfrr <= new_state.pending_pri) 632 reject = new_state.xisr; 633 new_state.pending_pri = mfrr; 634 new_state.xisr = XICS_IPI; 635 } 636 637 if (mfrr > old_state.mfrr && mfrr > new_state.cppr) { 638 resend = new_state.need_resend; 639 new_state.need_resend = 0; 640 } 641 } while (!icp_try_update(icp, old_state, new_state, local)); 642 643 /* Handle reject */ 644 if (reject && reject != XICS_IPI) 645 icp_deliver_irq(xics, icp, reject); 646 647 /* Handle resend */ 648 if (resend) 649 icp_check_resend(xics, icp); 650 651 return H_SUCCESS; 652 } 653 654 static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) 655 { 656 union kvmppc_icp_state state; 657 struct kvmppc_icp *icp; 658 659 icp = vcpu->arch.icp; 660 if (icp->server_num != server) { 661 icp = kvmppc_xics_find_server(vcpu->kvm, server); 662 if (!icp) 663 return H_PARAMETER; 664 } 665 state = ACCESS_ONCE(icp->state); 666 kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr); 667 kvmppc_set_gpr(vcpu, 5, state.mfrr); 668 return H_SUCCESS; 669 } 670 671 static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) 672 { 673 union kvmppc_icp_state old_state, new_state; 674 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 675 struct kvmppc_icp *icp = vcpu->arch.icp; 676 u32 reject; 677 678 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr); 679 680 /* 681 * ICP State: Set_CPPR 682 * 683 * We can safely compare the new value with the current 684 * value outside of the transaction as the CPPR is only 685 * ever changed by the processor on itself 686 */ 687 if (cppr > icp->state.cppr) 688 icp_down_cppr(xics, icp, cppr); 689 else if (cppr == icp->state.cppr) 690 return; 691 692 /* 693 * ICP State: Up_CPPR 694 * 695 * The processor is raising its priority, this can result 696 * in a rejection of a pending interrupt: 697 * 698 * ICP State: Reject_Current 699 * 700 * We can remove EE from the current processor, the update 701 * transaction will set it again if needed 702 */ 703 kvmppc_book3s_dequeue_irqprio(icp->vcpu, 704 BOOK3S_INTERRUPT_EXTERNAL_LEVEL); 705 706 do { 707 old_state = new_state = ACCESS_ONCE(icp->state); 708 709 reject = 0; 710 new_state.cppr = cppr; 711 712 if (cppr <= new_state.pending_pri) { 713 reject = new_state.xisr; 714 new_state.xisr = 0; 715 new_state.pending_pri = 0xff; 716 } 717 718 } while (!icp_try_update(icp, old_state, new_state, true)); 719 720 /* 721 * Check for rejects. They are handled by doing a new delivery 722 * attempt (see comments in icp_deliver_irq). 723 */ 724 if (reject && reject != XICS_IPI) 725 icp_deliver_irq(xics, icp, reject); 726 } 727 728 static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) 729 { 730 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 731 struct kvmppc_icp *icp = vcpu->arch.icp; 732 struct kvmppc_ics *ics; 733 struct ics_irq_state *state; 734 u32 irq = xirr & 0x00ffffff; 735 u16 src; 736 737 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr); 738 739 /* 740 * ICP State: EOI 741 * 742 * Note: If EOI is incorrectly used by SW to lower the CPPR 743 * value (ie more favored), we do not check for rejection of 744 * a pending interrupt, this is a SW error and PAPR sepcifies 745 * that we don't have to deal with it. 746 * 747 * The sending of an EOI to the ICS is handled after the 748 * CPPR update 749 * 750 * ICP State: Down_CPPR which we handle 751 * in a separate function as it's shared with H_CPPR. 752 */ 753 icp_down_cppr(xics, icp, xirr >> 24); 754 755 /* IPIs have no EOI */ 756 if (irq == XICS_IPI) 757 return H_SUCCESS; 758 /* 759 * EOI handling: If the interrupt is still asserted, we need to 760 * resend it. We can take a lockless "peek" at the ICS state here. 761 * 762 * "Message" interrupts will never have "asserted" set 763 */ 764 ics = kvmppc_xics_find_ics(xics, irq, &src); 765 if (!ics) { 766 XICS_DBG("h_eoi: IRQ 0x%06x not found !\n", irq); 767 return H_PARAMETER; 768 } 769 state = &ics->irq_state[src]; 770 771 /* Still asserted, resend it */ 772 if (state->asserted) 773 icp_deliver_irq(xics, icp, irq); 774 775 return H_SUCCESS; 776 } 777 778 static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall) 779 { 780 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 781 struct kvmppc_icp *icp = vcpu->arch.icp; 782 783 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n", 784 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt); 785 786 if (icp->rm_action & XICS_RM_KICK_VCPU) 787 kvmppc_fast_vcpu_kick(icp->rm_kick_target); 788 if (icp->rm_action & XICS_RM_CHECK_RESEND) 789 icp_check_resend(xics, icp); 790 if (icp->rm_action & XICS_RM_REJECT) 791 icp_deliver_irq(xics, icp, icp->rm_reject); 792 793 icp->rm_action = 0; 794 795 return H_SUCCESS; 796 } 797 798 int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req) 799 { 800 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 801 unsigned long res; 802 int rc = H_SUCCESS; 803 804 /* Check if we have an ICP */ 805 if (!xics || !vcpu->arch.icp) 806 return H_HARDWARE; 807 808 /* These requests don't have real-mode implementations at present */ 809 switch (req) { 810 case H_XIRR_X: 811 res = kvmppc_h_xirr(vcpu); 812 kvmppc_set_gpr(vcpu, 4, res); 813 kvmppc_set_gpr(vcpu, 5, get_tb()); 814 return rc; 815 case H_IPOLL: 816 rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4)); 817 return rc; 818 } 819 820 /* Check for real mode returning too hard */ 821 if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm)) 822 return kvmppc_xics_rm_complete(vcpu, req); 823 824 switch (req) { 825 case H_XIRR: 826 res = kvmppc_h_xirr(vcpu); 827 kvmppc_set_gpr(vcpu, 4, res); 828 break; 829 case H_CPPR: 830 kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4)); 831 break; 832 case H_EOI: 833 rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4)); 834 break; 835 case H_IPI: 836 rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4), 837 kvmppc_get_gpr(vcpu, 5)); 838 break; 839 } 840 841 return rc; 842 } 843 EXPORT_SYMBOL_GPL(kvmppc_xics_hcall); 844 845 846 /* -- Initialisation code etc. -- */ 847 848 static int xics_debug_show(struct seq_file *m, void *private) 849 { 850 struct kvmppc_xics *xics = m->private; 851 struct kvm *kvm = xics->kvm; 852 struct kvm_vcpu *vcpu; 853 int icsid, i; 854 855 if (!kvm) 856 return 0; 857 858 seq_printf(m, "=========\nICP state\n=========\n"); 859 860 kvm_for_each_vcpu(i, vcpu, kvm) { 861 struct kvmppc_icp *icp = vcpu->arch.icp; 862 union kvmppc_icp_state state; 863 864 if (!icp) 865 continue; 866 867 state.raw = ACCESS_ONCE(icp->state.raw); 868 seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n", 869 icp->server_num, state.xisr, 870 state.pending_pri, state.cppr, state.mfrr, 871 state.out_ee, state.need_resend); 872 } 873 874 for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) { 875 struct kvmppc_ics *ics = xics->ics[icsid]; 876 877 if (!ics) 878 continue; 879 880 seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n", 881 icsid); 882 883 mutex_lock(&ics->lock); 884 885 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { 886 struct ics_irq_state *irq = &ics->irq_state[i]; 887 888 seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x asserted %d resend %d masked pending %d\n", 889 irq->number, irq->server, irq->priority, 890 irq->saved_priority, irq->asserted, 891 irq->resend, irq->masked_pending); 892 893 } 894 mutex_unlock(&ics->lock); 895 } 896 return 0; 897 } 898 899 static int xics_debug_open(struct inode *inode, struct file *file) 900 { 901 return single_open(file, xics_debug_show, inode->i_private); 902 } 903 904 static const struct file_operations xics_debug_fops = { 905 .open = xics_debug_open, 906 .read = seq_read, 907 .llseek = seq_lseek, 908 .release = single_release, 909 }; 910 911 static void xics_debugfs_init(struct kvmppc_xics *xics) 912 { 913 char *name; 914 915 name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics); 916 if (!name) { 917 pr_err("%s: no memory for name\n", __func__); 918 return; 919 } 920 921 xics->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root, 922 xics, &xics_debug_fops); 923 924 pr_debug("%s: created %s\n", __func__, name); 925 kfree(name); 926 } 927 928 static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm, 929 struct kvmppc_xics *xics, int irq) 930 { 931 struct kvmppc_ics *ics; 932 int i, icsid; 933 934 icsid = irq >> KVMPPC_XICS_ICS_SHIFT; 935 936 mutex_lock(&kvm->lock); 937 938 /* ICS already exists - somebody else got here first */ 939 if (xics->ics[icsid]) 940 goto out; 941 942 /* Create the ICS */ 943 ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL); 944 if (!ics) 945 goto out; 946 947 mutex_init(&ics->lock); 948 ics->icsid = icsid; 949 950 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { 951 ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i; 952 ics->irq_state[i].priority = MASKED; 953 ics->irq_state[i].saved_priority = MASKED; 954 } 955 smp_wmb(); 956 xics->ics[icsid] = ics; 957 958 if (icsid > xics->max_icsid) 959 xics->max_icsid = icsid; 960 961 out: 962 mutex_unlock(&kvm->lock); 963 return xics->ics[icsid]; 964 } 965 966 int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num) 967 { 968 struct kvmppc_icp *icp; 969 970 if (!vcpu->kvm->arch.xics) 971 return -ENODEV; 972 973 if (kvmppc_xics_find_server(vcpu->kvm, server_num)) 974 return -EEXIST; 975 976 icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL); 977 if (!icp) 978 return -ENOMEM; 979 980 icp->vcpu = vcpu; 981 icp->server_num = server_num; 982 icp->state.mfrr = MASKED; 983 icp->state.pending_pri = MASKED; 984 vcpu->arch.icp = icp; 985 986 XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id); 987 988 return 0; 989 } 990 991 u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu) 992 { 993 struct kvmppc_icp *icp = vcpu->arch.icp; 994 union kvmppc_icp_state state; 995 996 if (!icp) 997 return 0; 998 state = icp->state; 999 return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) | 1000 ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) | 1001 ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) | 1002 ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT); 1003 } 1004 1005 int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval) 1006 { 1007 struct kvmppc_icp *icp = vcpu->arch.icp; 1008 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 1009 union kvmppc_icp_state old_state, new_state; 1010 struct kvmppc_ics *ics; 1011 u8 cppr, mfrr, pending_pri; 1012 u32 xisr; 1013 u16 src; 1014 bool resend; 1015 1016 if (!icp || !xics) 1017 return -ENOENT; 1018 1019 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT; 1020 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) & 1021 KVM_REG_PPC_ICP_XISR_MASK; 1022 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT; 1023 pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT; 1024 1025 /* Require the new state to be internally consistent */ 1026 if (xisr == 0) { 1027 if (pending_pri != 0xff) 1028 return -EINVAL; 1029 } else if (xisr == XICS_IPI) { 1030 if (pending_pri != mfrr || pending_pri >= cppr) 1031 return -EINVAL; 1032 } else { 1033 if (pending_pri >= mfrr || pending_pri >= cppr) 1034 return -EINVAL; 1035 ics = kvmppc_xics_find_ics(xics, xisr, &src); 1036 if (!ics) 1037 return -EINVAL; 1038 } 1039 1040 new_state.raw = 0; 1041 new_state.cppr = cppr; 1042 new_state.xisr = xisr; 1043 new_state.mfrr = mfrr; 1044 new_state.pending_pri = pending_pri; 1045 1046 /* 1047 * Deassert the CPU interrupt request. 1048 * icp_try_update will reassert it if necessary. 1049 */ 1050 kvmppc_book3s_dequeue_irqprio(icp->vcpu, 1051 BOOK3S_INTERRUPT_EXTERNAL_LEVEL); 1052 1053 /* 1054 * Note that if we displace an interrupt from old_state.xisr, 1055 * we don't mark it as rejected. We expect userspace to set 1056 * the state of the interrupt sources to be consistent with 1057 * the ICP states (either before or afterwards, which doesn't 1058 * matter). We do handle resends due to CPPR becoming less 1059 * favoured because that is necessary to end up with a 1060 * consistent state in the situation where userspace restores 1061 * the ICS states before the ICP states. 1062 */ 1063 do { 1064 old_state = ACCESS_ONCE(icp->state); 1065 1066 if (new_state.mfrr <= old_state.mfrr) { 1067 resend = false; 1068 new_state.need_resend = old_state.need_resend; 1069 } else { 1070 resend = old_state.need_resend; 1071 new_state.need_resend = 0; 1072 } 1073 } while (!icp_try_update(icp, old_state, new_state, false)); 1074 1075 if (resend) 1076 icp_check_resend(xics, icp); 1077 1078 return 0; 1079 } 1080 1081 static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr) 1082 { 1083 int ret; 1084 struct kvmppc_ics *ics; 1085 struct ics_irq_state *irqp; 1086 u64 __user *ubufp = (u64 __user *) addr; 1087 u16 idx; 1088 u64 val, prio; 1089 1090 ics = kvmppc_xics_find_ics(xics, irq, &idx); 1091 if (!ics) 1092 return -ENOENT; 1093 1094 irqp = &ics->irq_state[idx]; 1095 mutex_lock(&ics->lock); 1096 ret = -ENOENT; 1097 if (irqp->exists) { 1098 val = irqp->server; 1099 prio = irqp->priority; 1100 if (prio == MASKED) { 1101 val |= KVM_XICS_MASKED; 1102 prio = irqp->saved_priority; 1103 } 1104 val |= prio << KVM_XICS_PRIORITY_SHIFT; 1105 if (irqp->asserted) 1106 val |= KVM_XICS_LEVEL_SENSITIVE | KVM_XICS_PENDING; 1107 else if (irqp->masked_pending || irqp->resend) 1108 val |= KVM_XICS_PENDING; 1109 ret = 0; 1110 } 1111 mutex_unlock(&ics->lock); 1112 1113 if (!ret && put_user(val, ubufp)) 1114 ret = -EFAULT; 1115 1116 return ret; 1117 } 1118 1119 static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr) 1120 { 1121 struct kvmppc_ics *ics; 1122 struct ics_irq_state *irqp; 1123 u64 __user *ubufp = (u64 __user *) addr; 1124 u16 idx; 1125 u64 val; 1126 u8 prio; 1127 u32 server; 1128 1129 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS) 1130 return -ENOENT; 1131 1132 ics = kvmppc_xics_find_ics(xics, irq, &idx); 1133 if (!ics) { 1134 ics = kvmppc_xics_create_ics(xics->kvm, xics, irq); 1135 if (!ics) 1136 return -ENOMEM; 1137 } 1138 irqp = &ics->irq_state[idx]; 1139 if (get_user(val, ubufp)) 1140 return -EFAULT; 1141 1142 server = val & KVM_XICS_DESTINATION_MASK; 1143 prio = val >> KVM_XICS_PRIORITY_SHIFT; 1144 if (prio != MASKED && 1145 kvmppc_xics_find_server(xics->kvm, server) == NULL) 1146 return -EINVAL; 1147 1148 mutex_lock(&ics->lock); 1149 irqp->server = server; 1150 irqp->saved_priority = prio; 1151 if (val & KVM_XICS_MASKED) 1152 prio = MASKED; 1153 irqp->priority = prio; 1154 irqp->resend = 0; 1155 irqp->masked_pending = 0; 1156 irqp->asserted = 0; 1157 if ((val & KVM_XICS_PENDING) && (val & KVM_XICS_LEVEL_SENSITIVE)) 1158 irqp->asserted = 1; 1159 irqp->exists = 1; 1160 mutex_unlock(&ics->lock); 1161 1162 if (val & KVM_XICS_PENDING) 1163 icp_deliver_irq(xics, NULL, irqp->number); 1164 1165 return 0; 1166 } 1167 1168 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, 1169 bool line_status) 1170 { 1171 struct kvmppc_xics *xics = kvm->arch.xics; 1172 1173 return ics_deliver_irq(xics, irq, level, line_status); 1174 } 1175 1176 static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1177 { 1178 struct kvmppc_xics *xics = dev->private; 1179 1180 switch (attr->group) { 1181 case KVM_DEV_XICS_GRP_SOURCES: 1182 return xics_set_source(xics, attr->attr, attr->addr); 1183 } 1184 return -ENXIO; 1185 } 1186 1187 static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1188 { 1189 struct kvmppc_xics *xics = dev->private; 1190 1191 switch (attr->group) { 1192 case KVM_DEV_XICS_GRP_SOURCES: 1193 return xics_get_source(xics, attr->attr, attr->addr); 1194 } 1195 return -ENXIO; 1196 } 1197 1198 static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1199 { 1200 switch (attr->group) { 1201 case KVM_DEV_XICS_GRP_SOURCES: 1202 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ && 1203 attr->attr < KVMPPC_XICS_NR_IRQS) 1204 return 0; 1205 break; 1206 } 1207 return -ENXIO; 1208 } 1209 1210 static void kvmppc_xics_free(struct kvm_device *dev) 1211 { 1212 struct kvmppc_xics *xics = dev->private; 1213 int i; 1214 struct kvm *kvm = xics->kvm; 1215 1216 debugfs_remove(xics->dentry); 1217 1218 if (kvm) 1219 kvm->arch.xics = NULL; 1220 1221 for (i = 0; i <= xics->max_icsid; i++) 1222 kfree(xics->ics[i]); 1223 kfree(xics); 1224 kfree(dev); 1225 } 1226 1227 static int kvmppc_xics_create(struct kvm_device *dev, u32 type) 1228 { 1229 struct kvmppc_xics *xics; 1230 struct kvm *kvm = dev->kvm; 1231 int ret = 0; 1232 1233 xics = kzalloc(sizeof(*xics), GFP_KERNEL); 1234 if (!xics) 1235 return -ENOMEM; 1236 1237 dev->private = xics; 1238 xics->dev = dev; 1239 xics->kvm = kvm; 1240 1241 /* Already there ? */ 1242 mutex_lock(&kvm->lock); 1243 if (kvm->arch.xics) 1244 ret = -EEXIST; 1245 else 1246 kvm->arch.xics = xics; 1247 mutex_unlock(&kvm->lock); 1248 1249 if (ret) 1250 return ret; 1251 1252 xics_debugfs_init(xics); 1253 1254 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1255 if (cpu_has_feature(CPU_FTR_ARCH_206)) { 1256 /* Enable real mode support */ 1257 xics->real_mode = ENABLE_REALMODE; 1258 xics->real_mode_dbg = DEBUG_REALMODE; 1259 } 1260 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 1261 1262 return 0; 1263 } 1264 1265 struct kvm_device_ops kvm_xics_ops = { 1266 .name = "kvm-xics", 1267 .create = kvmppc_xics_create, 1268 .destroy = kvmppc_xics_free, 1269 .set_attr = xics_set_attr, 1270 .get_attr = xics_get_attr, 1271 .has_attr = xics_has_attr, 1272 }; 1273 1274 int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu, 1275 u32 xcpu) 1276 { 1277 struct kvmppc_xics *xics = dev->private; 1278 int r = -EBUSY; 1279 1280 if (dev->ops != &kvm_xics_ops) 1281 return -EPERM; 1282 if (xics->kvm != vcpu->kvm) 1283 return -EPERM; 1284 if (vcpu->arch.irq_type) 1285 return -EBUSY; 1286 1287 r = kvmppc_xics_create_icp(vcpu, xcpu); 1288 if (!r) 1289 vcpu->arch.irq_type = KVMPPC_IRQ_XICS; 1290 1291 return r; 1292 } 1293 1294 void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) 1295 { 1296 if (!vcpu->arch.icp) 1297 return; 1298 kfree(vcpu->arch.icp); 1299 vcpu->arch.icp = NULL; 1300 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; 1301 } 1302