1 /* 2 * Copyright 2012 Michael Ellerman, IBM Corporation. 3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License, version 2, as 7 * published by the Free Software Foundation. 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/kvm_host.h> 12 #include <linux/err.h> 13 #include <linux/gfp.h> 14 #include <linux/anon_inodes.h> 15 16 #include <asm/uaccess.h> 17 #include <asm/kvm_book3s.h> 18 #include <asm/kvm_ppc.h> 19 #include <asm/hvcall.h> 20 #include <asm/xics.h> 21 #include <asm/debug.h> 22 #include <asm/time.h> 23 24 #include <linux/debugfs.h> 25 #include <linux/seq_file.h> 26 27 #include "book3s_xics.h" 28 29 #if 1 30 #define XICS_DBG(fmt...) do { } while (0) 31 #else 32 #define XICS_DBG(fmt...) trace_printk(fmt) 33 #endif 34 35 #define ENABLE_REALMODE true 36 #define DEBUG_REALMODE false 37 38 /* 39 * LOCKING 40 * ======= 41 * 42 * Each ICS has a mutex protecting the information about the IRQ 43 * sources and avoiding simultaneous deliveries if the same interrupt. 44 * 45 * ICP operations are done via a single compare & swap transaction 46 * (most ICP state fits in the union kvmppc_icp_state) 47 */ 48 49 /* 50 * TODO 51 * ==== 52 * 53 * - To speed up resends, keep a bitmap of "resend" set bits in the 54 * ICS 55 * 56 * - Speed up server# -> ICP lookup (array ? hash table ?) 57 * 58 * - Make ICS lockless as well, or at least a per-interrupt lock or hashed 59 * locks array to improve scalability 60 */ 61 62 /* -- ICS routines -- */ 63 64 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 65 u32 new_irq); 66 67 static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level, 68 bool report_status) 69 { 70 struct ics_irq_state *state; 71 struct kvmppc_ics *ics; 72 u16 src; 73 74 XICS_DBG("ics deliver %#x (level: %d)\n", irq, level); 75 76 ics = kvmppc_xics_find_ics(xics, irq, &src); 77 if (!ics) { 78 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq); 79 return -EINVAL; 80 } 81 state = &ics->irq_state[src]; 82 if (!state->exists) 83 return -EINVAL; 84 85 if (report_status) 86 return state->asserted; 87 88 /* 89 * We set state->asserted locklessly. This should be fine as 90 * we are the only setter, thus concurrent access is undefined 91 * to begin with. 92 */ 93 if (level == KVM_INTERRUPT_SET_LEVEL) 94 state->asserted = 1; 95 else if (level == KVM_INTERRUPT_UNSET) { 96 state->asserted = 0; 97 return 0; 98 } 99 100 /* Attempt delivery */ 101 icp_deliver_irq(xics, NULL, irq); 102 103 return state->asserted; 104 } 105 106 static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics, 107 struct kvmppc_icp *icp) 108 { 109 int i; 110 111 mutex_lock(&ics->lock); 112 113 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { 114 struct ics_irq_state *state = &ics->irq_state[i]; 115 116 if (!state->resend) 117 continue; 118 119 XICS_DBG("resend %#x prio %#x\n", state->number, 120 state->priority); 121 122 mutex_unlock(&ics->lock); 123 icp_deliver_irq(xics, icp, state->number); 124 mutex_lock(&ics->lock); 125 } 126 127 mutex_unlock(&ics->lock); 128 } 129 130 static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics, 131 struct ics_irq_state *state, 132 u32 server, u32 priority, u32 saved_priority) 133 { 134 bool deliver; 135 136 mutex_lock(&ics->lock); 137 138 state->server = server; 139 state->priority = priority; 140 state->saved_priority = saved_priority; 141 deliver = false; 142 if ((state->masked_pending || state->resend) && priority != MASKED) { 143 state->masked_pending = 0; 144 deliver = true; 145 } 146 147 mutex_unlock(&ics->lock); 148 149 return deliver; 150 } 151 152 int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority) 153 { 154 struct kvmppc_xics *xics = kvm->arch.xics; 155 struct kvmppc_icp *icp; 156 struct kvmppc_ics *ics; 157 struct ics_irq_state *state; 158 u16 src; 159 160 if (!xics) 161 return -ENODEV; 162 163 ics = kvmppc_xics_find_ics(xics, irq, &src); 164 if (!ics) 165 return -EINVAL; 166 state = &ics->irq_state[src]; 167 168 icp = kvmppc_xics_find_server(kvm, server); 169 if (!icp) 170 return -EINVAL; 171 172 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n", 173 irq, server, priority, 174 state->masked_pending, state->resend); 175 176 if (write_xive(xics, ics, state, server, priority, priority)) 177 icp_deliver_irq(xics, icp, irq); 178 179 return 0; 180 } 181 182 int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority) 183 { 184 struct kvmppc_xics *xics = kvm->arch.xics; 185 struct kvmppc_ics *ics; 186 struct ics_irq_state *state; 187 u16 src; 188 189 if (!xics) 190 return -ENODEV; 191 192 ics = kvmppc_xics_find_ics(xics, irq, &src); 193 if (!ics) 194 return -EINVAL; 195 state = &ics->irq_state[src]; 196 197 mutex_lock(&ics->lock); 198 *server = state->server; 199 *priority = state->priority; 200 mutex_unlock(&ics->lock); 201 202 return 0; 203 } 204 205 int kvmppc_xics_int_on(struct kvm *kvm, u32 irq) 206 { 207 struct kvmppc_xics *xics = kvm->arch.xics; 208 struct kvmppc_icp *icp; 209 struct kvmppc_ics *ics; 210 struct ics_irq_state *state; 211 u16 src; 212 213 if (!xics) 214 return -ENODEV; 215 216 ics = kvmppc_xics_find_ics(xics, irq, &src); 217 if (!ics) 218 return -EINVAL; 219 state = &ics->irq_state[src]; 220 221 icp = kvmppc_xics_find_server(kvm, state->server); 222 if (!icp) 223 return -EINVAL; 224 225 if (write_xive(xics, ics, state, state->server, state->saved_priority, 226 state->saved_priority)) 227 icp_deliver_irq(xics, icp, irq); 228 229 return 0; 230 } 231 232 int kvmppc_xics_int_off(struct kvm *kvm, u32 irq) 233 { 234 struct kvmppc_xics *xics = kvm->arch.xics; 235 struct kvmppc_ics *ics; 236 struct ics_irq_state *state; 237 u16 src; 238 239 if (!xics) 240 return -ENODEV; 241 242 ics = kvmppc_xics_find_ics(xics, irq, &src); 243 if (!ics) 244 return -EINVAL; 245 state = &ics->irq_state[src]; 246 247 write_xive(xics, ics, state, state->server, MASKED, state->priority); 248 249 return 0; 250 } 251 252 /* -- ICP routines, including hcalls -- */ 253 254 static inline bool icp_try_update(struct kvmppc_icp *icp, 255 union kvmppc_icp_state old, 256 union kvmppc_icp_state new, 257 bool change_self) 258 { 259 bool success; 260 261 /* Calculate new output value */ 262 new.out_ee = (new.xisr && (new.pending_pri < new.cppr)); 263 264 /* Attempt atomic update */ 265 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw; 266 if (!success) 267 goto bail; 268 269 XICS_DBG("UPD [%04x] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n", 270 icp->server_num, 271 old.cppr, old.mfrr, old.pending_pri, old.xisr, 272 old.need_resend, old.out_ee); 273 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n", 274 new.cppr, new.mfrr, new.pending_pri, new.xisr, 275 new.need_resend, new.out_ee); 276 /* 277 * Check for output state update 278 * 279 * Note that this is racy since another processor could be updating 280 * the state already. This is why we never clear the interrupt output 281 * here, we only ever set it. The clear only happens prior to doing 282 * an update and only by the processor itself. Currently we do it 283 * in Accept (H_XIRR) and Up_Cppr (H_XPPR). 284 * 285 * We also do not try to figure out whether the EE state has changed, 286 * we unconditionally set it if the new state calls for it. The reason 287 * for that is that we opportunistically remove the pending interrupt 288 * flag when raising CPPR, so we need to set it back here if an 289 * interrupt is still pending. 290 */ 291 if (new.out_ee) { 292 kvmppc_book3s_queue_irqprio(icp->vcpu, 293 BOOK3S_INTERRUPT_EXTERNAL_LEVEL); 294 if (!change_self) 295 kvmppc_fast_vcpu_kick(icp->vcpu); 296 } 297 bail: 298 return success; 299 } 300 301 static void icp_check_resend(struct kvmppc_xics *xics, 302 struct kvmppc_icp *icp) 303 { 304 u32 icsid; 305 306 /* Order this load with the test for need_resend in the caller */ 307 smp_rmb(); 308 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) { 309 struct kvmppc_ics *ics = xics->ics[icsid]; 310 311 if (!test_and_clear_bit(icsid, icp->resend_map)) 312 continue; 313 if (!ics) 314 continue; 315 ics_check_resend(xics, ics, icp); 316 } 317 } 318 319 static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority, 320 u32 *reject) 321 { 322 union kvmppc_icp_state old_state, new_state; 323 bool success; 324 325 XICS_DBG("try deliver %#x(P:%#x) to server %#x\n", irq, priority, 326 icp->server_num); 327 328 do { 329 old_state = new_state = ACCESS_ONCE(icp->state); 330 331 *reject = 0; 332 333 /* See if we can deliver */ 334 success = new_state.cppr > priority && 335 new_state.mfrr > priority && 336 new_state.pending_pri > priority; 337 338 /* 339 * If we can, check for a rejection and perform the 340 * delivery 341 */ 342 if (success) { 343 *reject = new_state.xisr; 344 new_state.xisr = irq; 345 new_state.pending_pri = priority; 346 } else { 347 /* 348 * If we failed to deliver we set need_resend 349 * so a subsequent CPPR state change causes us 350 * to try a new delivery. 351 */ 352 new_state.need_resend = true; 353 } 354 355 } while (!icp_try_update(icp, old_state, new_state, false)); 356 357 return success; 358 } 359 360 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 361 u32 new_irq) 362 { 363 struct ics_irq_state *state; 364 struct kvmppc_ics *ics; 365 u32 reject; 366 u16 src; 367 368 /* 369 * This is used both for initial delivery of an interrupt and 370 * for subsequent rejection. 371 * 372 * Rejection can be racy vs. resends. We have evaluated the 373 * rejection in an atomic ICP transaction which is now complete, 374 * so potentially the ICP can already accept the interrupt again. 375 * 376 * So we need to retry the delivery. Essentially the reject path 377 * boils down to a failed delivery. Always. 378 * 379 * Now the interrupt could also have moved to a different target, 380 * thus we may need to re-do the ICP lookup as well 381 */ 382 383 again: 384 /* Get the ICS state and lock it */ 385 ics = kvmppc_xics_find_ics(xics, new_irq, &src); 386 if (!ics) { 387 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq); 388 return; 389 } 390 state = &ics->irq_state[src]; 391 392 /* Get a lock on the ICS */ 393 mutex_lock(&ics->lock); 394 395 /* Get our server */ 396 if (!icp || state->server != icp->server_num) { 397 icp = kvmppc_xics_find_server(xics->kvm, state->server); 398 if (!icp) { 399 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n", 400 new_irq, state->server); 401 goto out; 402 } 403 } 404 405 /* Clear the resend bit of that interrupt */ 406 state->resend = 0; 407 408 /* 409 * If masked, bail out 410 * 411 * Note: PAPR doesn't mention anything about masked pending 412 * when doing a resend, only when doing a delivery. 413 * 414 * However that would have the effect of losing a masked 415 * interrupt that was rejected and isn't consistent with 416 * the whole masked_pending business which is about not 417 * losing interrupts that occur while masked. 418 * 419 * I don't differenciate normal deliveries and resends, this 420 * implementation will differ from PAPR and not lose such 421 * interrupts. 422 */ 423 if (state->priority == MASKED) { 424 XICS_DBG("irq %#x masked pending\n", new_irq); 425 state->masked_pending = 1; 426 goto out; 427 } 428 429 /* 430 * Try the delivery, this will set the need_resend flag 431 * in the ICP as part of the atomic transaction if the 432 * delivery is not possible. 433 * 434 * Note that if successful, the new delivery might have itself 435 * rejected an interrupt that was "delivered" before we took the 436 * icp mutex. 437 * 438 * In this case we do the whole sequence all over again for the 439 * new guy. We cannot assume that the rejected interrupt is less 440 * favored than the new one, and thus doesn't need to be delivered, 441 * because by the time we exit icp_try_to_deliver() the target 442 * processor may well have alrady consumed & completed it, and thus 443 * the rejected interrupt might actually be already acceptable. 444 */ 445 if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) { 446 /* 447 * Delivery was successful, did we reject somebody else ? 448 */ 449 if (reject && reject != XICS_IPI) { 450 mutex_unlock(&ics->lock); 451 new_irq = reject; 452 goto again; 453 } 454 } else { 455 /* 456 * We failed to deliver the interrupt we need to set the 457 * resend map bit and mark the ICS state as needing a resend 458 */ 459 set_bit(ics->icsid, icp->resend_map); 460 state->resend = 1; 461 462 /* 463 * If the need_resend flag got cleared in the ICP some time 464 * between icp_try_to_deliver() atomic update and now, then 465 * we know it might have missed the resend_map bit. So we 466 * retry 467 */ 468 smp_mb(); 469 if (!icp->state.need_resend) { 470 mutex_unlock(&ics->lock); 471 goto again; 472 } 473 } 474 out: 475 mutex_unlock(&ics->lock); 476 } 477 478 static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 479 u8 new_cppr) 480 { 481 union kvmppc_icp_state old_state, new_state; 482 bool resend; 483 484 /* 485 * This handles several related states in one operation: 486 * 487 * ICP State: Down_CPPR 488 * 489 * Load CPPR with new value and if the XISR is 0 490 * then check for resends: 491 * 492 * ICP State: Resend 493 * 494 * If MFRR is more favored than CPPR, check for IPIs 495 * and notify ICS of a potential resend. This is done 496 * asynchronously (when used in real mode, we will have 497 * to exit here). 498 * 499 * We do not handle the complete Check_IPI as documented 500 * here. In the PAPR, this state will be used for both 501 * Set_MFRR and Down_CPPR. However, we know that we aren't 502 * changing the MFRR state here so we don't need to handle 503 * the case of an MFRR causing a reject of a pending irq, 504 * this will have been handled when the MFRR was set in the 505 * first place. 506 * 507 * Thus we don't have to handle rejects, only resends. 508 * 509 * When implementing real mode for HV KVM, resend will lead to 510 * a H_TOO_HARD return and the whole transaction will be handled 511 * in virtual mode. 512 */ 513 do { 514 old_state = new_state = ACCESS_ONCE(icp->state); 515 516 /* Down_CPPR */ 517 new_state.cppr = new_cppr; 518 519 /* 520 * Cut down Resend / Check_IPI / IPI 521 * 522 * The logic is that we cannot have a pending interrupt 523 * trumped by an IPI at this point (see above), so we 524 * know that either the pending interrupt is already an 525 * IPI (in which case we don't care to override it) or 526 * it's either more favored than us or non existent 527 */ 528 if (new_state.mfrr < new_cppr && 529 new_state.mfrr <= new_state.pending_pri) { 530 WARN_ON(new_state.xisr != XICS_IPI && 531 new_state.xisr != 0); 532 new_state.pending_pri = new_state.mfrr; 533 new_state.xisr = XICS_IPI; 534 } 535 536 /* Latch/clear resend bit */ 537 resend = new_state.need_resend; 538 new_state.need_resend = 0; 539 540 } while (!icp_try_update(icp, old_state, new_state, true)); 541 542 /* 543 * Now handle resend checks. Those are asynchronous to the ICP 544 * state update in HW (ie bus transactions) so we can handle them 545 * separately here too 546 */ 547 if (resend) 548 icp_check_resend(xics, icp); 549 } 550 551 static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu) 552 { 553 union kvmppc_icp_state old_state, new_state; 554 struct kvmppc_icp *icp = vcpu->arch.icp; 555 u32 xirr; 556 557 /* First, remove EE from the processor */ 558 kvmppc_book3s_dequeue_irqprio(icp->vcpu, 559 BOOK3S_INTERRUPT_EXTERNAL_LEVEL); 560 561 /* 562 * ICP State: Accept_Interrupt 563 * 564 * Return the pending interrupt (if any) along with the 565 * current CPPR, then clear the XISR & set CPPR to the 566 * pending priority 567 */ 568 do { 569 old_state = new_state = ACCESS_ONCE(icp->state); 570 571 xirr = old_state.xisr | (((u32)old_state.cppr) << 24); 572 if (!old_state.xisr) 573 break; 574 new_state.cppr = new_state.pending_pri; 575 new_state.pending_pri = 0xff; 576 new_state.xisr = 0; 577 578 } while (!icp_try_update(icp, old_state, new_state, true)); 579 580 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr); 581 582 return xirr; 583 } 584 585 static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, 586 unsigned long mfrr) 587 { 588 union kvmppc_icp_state old_state, new_state; 589 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 590 struct kvmppc_icp *icp; 591 u32 reject; 592 bool resend; 593 bool local; 594 595 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n", 596 vcpu->vcpu_id, server, mfrr); 597 598 icp = vcpu->arch.icp; 599 local = icp->server_num == server; 600 if (!local) { 601 icp = kvmppc_xics_find_server(vcpu->kvm, server); 602 if (!icp) 603 return H_PARAMETER; 604 } 605 606 /* 607 * ICP state: Set_MFRR 608 * 609 * If the CPPR is more favored than the new MFRR, then 610 * nothing needs to be rejected as there can be no XISR to 611 * reject. If the MFRR is being made less favored then 612 * there might be a previously-rejected interrupt needing 613 * to be resent. 614 * 615 * If the CPPR is less favored, then we might be replacing 616 * an interrupt, and thus need to possibly reject it as in 617 * 618 * ICP state: Check_IPI 619 */ 620 do { 621 old_state = new_state = ACCESS_ONCE(icp->state); 622 623 /* Set_MFRR */ 624 new_state.mfrr = mfrr; 625 626 /* Check_IPI */ 627 reject = 0; 628 resend = false; 629 if (mfrr < new_state.cppr) { 630 /* Reject a pending interrupt if not an IPI */ 631 if (mfrr <= new_state.pending_pri) 632 reject = new_state.xisr; 633 new_state.pending_pri = mfrr; 634 new_state.xisr = XICS_IPI; 635 } 636 637 if (mfrr > old_state.mfrr && mfrr > new_state.cppr) { 638 resend = new_state.need_resend; 639 new_state.need_resend = 0; 640 } 641 } while (!icp_try_update(icp, old_state, new_state, local)); 642 643 /* Handle reject */ 644 if (reject && reject != XICS_IPI) 645 icp_deliver_irq(xics, icp, reject); 646 647 /* Handle resend */ 648 if (resend) 649 icp_check_resend(xics, icp); 650 651 return H_SUCCESS; 652 } 653 654 static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) 655 { 656 union kvmppc_icp_state state; 657 struct kvmppc_icp *icp; 658 659 icp = vcpu->arch.icp; 660 if (icp->server_num != server) { 661 icp = kvmppc_xics_find_server(vcpu->kvm, server); 662 if (!icp) 663 return H_PARAMETER; 664 } 665 state = ACCESS_ONCE(icp->state); 666 kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr); 667 kvmppc_set_gpr(vcpu, 5, state.mfrr); 668 return H_SUCCESS; 669 } 670 671 static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) 672 { 673 union kvmppc_icp_state old_state, new_state; 674 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 675 struct kvmppc_icp *icp = vcpu->arch.icp; 676 u32 reject; 677 678 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr); 679 680 /* 681 * ICP State: Set_CPPR 682 * 683 * We can safely compare the new value with the current 684 * value outside of the transaction as the CPPR is only 685 * ever changed by the processor on itself 686 */ 687 if (cppr > icp->state.cppr) 688 icp_down_cppr(xics, icp, cppr); 689 else if (cppr == icp->state.cppr) 690 return; 691 692 /* 693 * ICP State: Up_CPPR 694 * 695 * The processor is raising its priority, this can result 696 * in a rejection of a pending interrupt: 697 * 698 * ICP State: Reject_Current 699 * 700 * We can remove EE from the current processor, the update 701 * transaction will set it again if needed 702 */ 703 kvmppc_book3s_dequeue_irqprio(icp->vcpu, 704 BOOK3S_INTERRUPT_EXTERNAL_LEVEL); 705 706 do { 707 old_state = new_state = ACCESS_ONCE(icp->state); 708 709 reject = 0; 710 new_state.cppr = cppr; 711 712 if (cppr <= new_state.pending_pri) { 713 reject = new_state.xisr; 714 new_state.xisr = 0; 715 new_state.pending_pri = 0xff; 716 } 717 718 } while (!icp_try_update(icp, old_state, new_state, true)); 719 720 /* 721 * Check for rejects. They are handled by doing a new delivery 722 * attempt (see comments in icp_deliver_irq). 723 */ 724 if (reject && reject != XICS_IPI) 725 icp_deliver_irq(xics, icp, reject); 726 } 727 728 static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) 729 { 730 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 731 struct kvmppc_icp *icp = vcpu->arch.icp; 732 struct kvmppc_ics *ics; 733 struct ics_irq_state *state; 734 u32 irq = xirr & 0x00ffffff; 735 u16 src; 736 737 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr); 738 739 /* 740 * ICP State: EOI 741 * 742 * Note: If EOI is incorrectly used by SW to lower the CPPR 743 * value (ie more favored), we do not check for rejection of 744 * a pending interrupt, this is a SW error and PAPR sepcifies 745 * that we don't have to deal with it. 746 * 747 * The sending of an EOI to the ICS is handled after the 748 * CPPR update 749 * 750 * ICP State: Down_CPPR which we handle 751 * in a separate function as it's shared with H_CPPR. 752 */ 753 icp_down_cppr(xics, icp, xirr >> 24); 754 755 /* IPIs have no EOI */ 756 if (irq == XICS_IPI) 757 return H_SUCCESS; 758 /* 759 * EOI handling: If the interrupt is still asserted, we need to 760 * resend it. We can take a lockless "peek" at the ICS state here. 761 * 762 * "Message" interrupts will never have "asserted" set 763 */ 764 ics = kvmppc_xics_find_ics(xics, irq, &src); 765 if (!ics) { 766 XICS_DBG("h_eoi: IRQ 0x%06x not found !\n", irq); 767 return H_PARAMETER; 768 } 769 state = &ics->irq_state[src]; 770 771 /* Still asserted, resend it */ 772 if (state->asserted) 773 icp_deliver_irq(xics, icp, irq); 774 775 return H_SUCCESS; 776 } 777 778 static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall) 779 { 780 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 781 struct kvmppc_icp *icp = vcpu->arch.icp; 782 783 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n", 784 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt); 785 786 if (icp->rm_action & XICS_RM_KICK_VCPU) 787 kvmppc_fast_vcpu_kick(icp->rm_kick_target); 788 if (icp->rm_action & XICS_RM_CHECK_RESEND) 789 icp_check_resend(xics, icp); 790 if (icp->rm_action & XICS_RM_REJECT) 791 icp_deliver_irq(xics, icp, icp->rm_reject); 792 793 icp->rm_action = 0; 794 795 return H_SUCCESS; 796 } 797 798 int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req) 799 { 800 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 801 unsigned long res; 802 int rc = H_SUCCESS; 803 804 /* Check if we have an ICP */ 805 if (!xics || !vcpu->arch.icp) 806 return H_HARDWARE; 807 808 /* These requests don't have real-mode implementations at present */ 809 switch (req) { 810 case H_XIRR_X: 811 res = kvmppc_h_xirr(vcpu); 812 kvmppc_set_gpr(vcpu, 4, res); 813 kvmppc_set_gpr(vcpu, 5, get_tb()); 814 return rc; 815 case H_IPOLL: 816 rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4)); 817 return rc; 818 } 819 820 /* Check for real mode returning too hard */ 821 if (xics->real_mode) 822 return kvmppc_xics_rm_complete(vcpu, req); 823 824 switch (req) { 825 case H_XIRR: 826 res = kvmppc_h_xirr(vcpu); 827 kvmppc_set_gpr(vcpu, 4, res); 828 break; 829 case H_CPPR: 830 kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4)); 831 break; 832 case H_EOI: 833 rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4)); 834 break; 835 case H_IPI: 836 rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4), 837 kvmppc_get_gpr(vcpu, 5)); 838 break; 839 } 840 841 return rc; 842 } 843 844 845 /* -- Initialisation code etc. -- */ 846 847 static int xics_debug_show(struct seq_file *m, void *private) 848 { 849 struct kvmppc_xics *xics = m->private; 850 struct kvm *kvm = xics->kvm; 851 struct kvm_vcpu *vcpu; 852 int icsid, i; 853 854 if (!kvm) 855 return 0; 856 857 seq_printf(m, "=========\nICP state\n=========\n"); 858 859 kvm_for_each_vcpu(i, vcpu, kvm) { 860 struct kvmppc_icp *icp = vcpu->arch.icp; 861 union kvmppc_icp_state state; 862 863 if (!icp) 864 continue; 865 866 state.raw = ACCESS_ONCE(icp->state.raw); 867 seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n", 868 icp->server_num, state.xisr, 869 state.pending_pri, state.cppr, state.mfrr, 870 state.out_ee, state.need_resend); 871 } 872 873 for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) { 874 struct kvmppc_ics *ics = xics->ics[icsid]; 875 876 if (!ics) 877 continue; 878 879 seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n", 880 icsid); 881 882 mutex_lock(&ics->lock); 883 884 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { 885 struct ics_irq_state *irq = &ics->irq_state[i]; 886 887 seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x asserted %d resend %d masked pending %d\n", 888 irq->number, irq->server, irq->priority, 889 irq->saved_priority, irq->asserted, 890 irq->resend, irq->masked_pending); 891 892 } 893 mutex_unlock(&ics->lock); 894 } 895 return 0; 896 } 897 898 static int xics_debug_open(struct inode *inode, struct file *file) 899 { 900 return single_open(file, xics_debug_show, inode->i_private); 901 } 902 903 static const struct file_operations xics_debug_fops = { 904 .open = xics_debug_open, 905 .read = seq_read, 906 .llseek = seq_lseek, 907 .release = single_release, 908 }; 909 910 static void xics_debugfs_init(struct kvmppc_xics *xics) 911 { 912 char *name; 913 914 name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics); 915 if (!name) { 916 pr_err("%s: no memory for name\n", __func__); 917 return; 918 } 919 920 xics->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root, 921 xics, &xics_debug_fops); 922 923 pr_debug("%s: created %s\n", __func__, name); 924 kfree(name); 925 } 926 927 static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm, 928 struct kvmppc_xics *xics, int irq) 929 { 930 struct kvmppc_ics *ics; 931 int i, icsid; 932 933 icsid = irq >> KVMPPC_XICS_ICS_SHIFT; 934 935 mutex_lock(&kvm->lock); 936 937 /* ICS already exists - somebody else got here first */ 938 if (xics->ics[icsid]) 939 goto out; 940 941 /* Create the ICS */ 942 ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL); 943 if (!ics) 944 goto out; 945 946 mutex_init(&ics->lock); 947 ics->icsid = icsid; 948 949 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { 950 ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i; 951 ics->irq_state[i].priority = MASKED; 952 ics->irq_state[i].saved_priority = MASKED; 953 } 954 smp_wmb(); 955 xics->ics[icsid] = ics; 956 957 if (icsid > xics->max_icsid) 958 xics->max_icsid = icsid; 959 960 out: 961 mutex_unlock(&kvm->lock); 962 return xics->ics[icsid]; 963 } 964 965 int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num) 966 { 967 struct kvmppc_icp *icp; 968 969 if (!vcpu->kvm->arch.xics) 970 return -ENODEV; 971 972 if (kvmppc_xics_find_server(vcpu->kvm, server_num)) 973 return -EEXIST; 974 975 icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL); 976 if (!icp) 977 return -ENOMEM; 978 979 icp->vcpu = vcpu; 980 icp->server_num = server_num; 981 icp->state.mfrr = MASKED; 982 icp->state.pending_pri = MASKED; 983 vcpu->arch.icp = icp; 984 985 XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id); 986 987 return 0; 988 } 989 990 u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu) 991 { 992 struct kvmppc_icp *icp = vcpu->arch.icp; 993 union kvmppc_icp_state state; 994 995 if (!icp) 996 return 0; 997 state = icp->state; 998 return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) | 999 ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) | 1000 ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) | 1001 ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT); 1002 } 1003 1004 int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval) 1005 { 1006 struct kvmppc_icp *icp = vcpu->arch.icp; 1007 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 1008 union kvmppc_icp_state old_state, new_state; 1009 struct kvmppc_ics *ics; 1010 u8 cppr, mfrr, pending_pri; 1011 u32 xisr; 1012 u16 src; 1013 bool resend; 1014 1015 if (!icp || !xics) 1016 return -ENOENT; 1017 1018 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT; 1019 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) & 1020 KVM_REG_PPC_ICP_XISR_MASK; 1021 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT; 1022 pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT; 1023 1024 /* Require the new state to be internally consistent */ 1025 if (xisr == 0) { 1026 if (pending_pri != 0xff) 1027 return -EINVAL; 1028 } else if (xisr == XICS_IPI) { 1029 if (pending_pri != mfrr || pending_pri >= cppr) 1030 return -EINVAL; 1031 } else { 1032 if (pending_pri >= mfrr || pending_pri >= cppr) 1033 return -EINVAL; 1034 ics = kvmppc_xics_find_ics(xics, xisr, &src); 1035 if (!ics) 1036 return -EINVAL; 1037 } 1038 1039 new_state.raw = 0; 1040 new_state.cppr = cppr; 1041 new_state.xisr = xisr; 1042 new_state.mfrr = mfrr; 1043 new_state.pending_pri = pending_pri; 1044 1045 /* 1046 * Deassert the CPU interrupt request. 1047 * icp_try_update will reassert it if necessary. 1048 */ 1049 kvmppc_book3s_dequeue_irqprio(icp->vcpu, 1050 BOOK3S_INTERRUPT_EXTERNAL_LEVEL); 1051 1052 /* 1053 * Note that if we displace an interrupt from old_state.xisr, 1054 * we don't mark it as rejected. We expect userspace to set 1055 * the state of the interrupt sources to be consistent with 1056 * the ICP states (either before or afterwards, which doesn't 1057 * matter). We do handle resends due to CPPR becoming less 1058 * favoured because that is necessary to end up with a 1059 * consistent state in the situation where userspace restores 1060 * the ICS states before the ICP states. 1061 */ 1062 do { 1063 old_state = ACCESS_ONCE(icp->state); 1064 1065 if (new_state.mfrr <= old_state.mfrr) { 1066 resend = false; 1067 new_state.need_resend = old_state.need_resend; 1068 } else { 1069 resend = old_state.need_resend; 1070 new_state.need_resend = 0; 1071 } 1072 } while (!icp_try_update(icp, old_state, new_state, false)); 1073 1074 if (resend) 1075 icp_check_resend(xics, icp); 1076 1077 return 0; 1078 } 1079 1080 static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr) 1081 { 1082 int ret; 1083 struct kvmppc_ics *ics; 1084 struct ics_irq_state *irqp; 1085 u64 __user *ubufp = (u64 __user *) addr; 1086 u16 idx; 1087 u64 val, prio; 1088 1089 ics = kvmppc_xics_find_ics(xics, irq, &idx); 1090 if (!ics) 1091 return -ENOENT; 1092 1093 irqp = &ics->irq_state[idx]; 1094 mutex_lock(&ics->lock); 1095 ret = -ENOENT; 1096 if (irqp->exists) { 1097 val = irqp->server; 1098 prio = irqp->priority; 1099 if (prio == MASKED) { 1100 val |= KVM_XICS_MASKED; 1101 prio = irqp->saved_priority; 1102 } 1103 val |= prio << KVM_XICS_PRIORITY_SHIFT; 1104 if (irqp->asserted) 1105 val |= KVM_XICS_LEVEL_SENSITIVE | KVM_XICS_PENDING; 1106 else if (irqp->masked_pending || irqp->resend) 1107 val |= KVM_XICS_PENDING; 1108 ret = 0; 1109 } 1110 mutex_unlock(&ics->lock); 1111 1112 if (!ret && put_user(val, ubufp)) 1113 ret = -EFAULT; 1114 1115 return ret; 1116 } 1117 1118 static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr) 1119 { 1120 struct kvmppc_ics *ics; 1121 struct ics_irq_state *irqp; 1122 u64 __user *ubufp = (u64 __user *) addr; 1123 u16 idx; 1124 u64 val; 1125 u8 prio; 1126 u32 server; 1127 1128 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS) 1129 return -ENOENT; 1130 1131 ics = kvmppc_xics_find_ics(xics, irq, &idx); 1132 if (!ics) { 1133 ics = kvmppc_xics_create_ics(xics->kvm, xics, irq); 1134 if (!ics) 1135 return -ENOMEM; 1136 } 1137 irqp = &ics->irq_state[idx]; 1138 if (get_user(val, ubufp)) 1139 return -EFAULT; 1140 1141 server = val & KVM_XICS_DESTINATION_MASK; 1142 prio = val >> KVM_XICS_PRIORITY_SHIFT; 1143 if (prio != MASKED && 1144 kvmppc_xics_find_server(xics->kvm, server) == NULL) 1145 return -EINVAL; 1146 1147 mutex_lock(&ics->lock); 1148 irqp->server = server; 1149 irqp->saved_priority = prio; 1150 if (val & KVM_XICS_MASKED) 1151 prio = MASKED; 1152 irqp->priority = prio; 1153 irqp->resend = 0; 1154 irqp->masked_pending = 0; 1155 irqp->asserted = 0; 1156 if ((val & KVM_XICS_PENDING) && (val & KVM_XICS_LEVEL_SENSITIVE)) 1157 irqp->asserted = 1; 1158 irqp->exists = 1; 1159 mutex_unlock(&ics->lock); 1160 1161 if (val & KVM_XICS_PENDING) 1162 icp_deliver_irq(xics, NULL, irqp->number); 1163 1164 return 0; 1165 } 1166 1167 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, 1168 bool line_status) 1169 { 1170 struct kvmppc_xics *xics = kvm->arch.xics; 1171 1172 return ics_deliver_irq(xics, irq, level, line_status); 1173 } 1174 1175 static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1176 { 1177 struct kvmppc_xics *xics = dev->private; 1178 1179 switch (attr->group) { 1180 case KVM_DEV_XICS_GRP_SOURCES: 1181 return xics_set_source(xics, attr->attr, attr->addr); 1182 } 1183 return -ENXIO; 1184 } 1185 1186 static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1187 { 1188 struct kvmppc_xics *xics = dev->private; 1189 1190 switch (attr->group) { 1191 case KVM_DEV_XICS_GRP_SOURCES: 1192 return xics_get_source(xics, attr->attr, attr->addr); 1193 } 1194 return -ENXIO; 1195 } 1196 1197 static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1198 { 1199 switch (attr->group) { 1200 case KVM_DEV_XICS_GRP_SOURCES: 1201 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ && 1202 attr->attr < KVMPPC_XICS_NR_IRQS) 1203 return 0; 1204 break; 1205 } 1206 return -ENXIO; 1207 } 1208 1209 static void kvmppc_xics_free(struct kvm_device *dev) 1210 { 1211 struct kvmppc_xics *xics = dev->private; 1212 int i; 1213 struct kvm *kvm = xics->kvm; 1214 1215 debugfs_remove(xics->dentry); 1216 1217 if (kvm) 1218 kvm->arch.xics = NULL; 1219 1220 for (i = 0; i <= xics->max_icsid; i++) 1221 kfree(xics->ics[i]); 1222 kfree(xics); 1223 kfree(dev); 1224 } 1225 1226 static int kvmppc_xics_create(struct kvm_device *dev, u32 type) 1227 { 1228 struct kvmppc_xics *xics; 1229 struct kvm *kvm = dev->kvm; 1230 int ret = 0; 1231 1232 xics = kzalloc(sizeof(*xics), GFP_KERNEL); 1233 if (!xics) 1234 return -ENOMEM; 1235 1236 dev->private = xics; 1237 xics->dev = dev; 1238 xics->kvm = kvm; 1239 1240 /* Already there ? */ 1241 mutex_lock(&kvm->lock); 1242 if (kvm->arch.xics) 1243 ret = -EEXIST; 1244 else 1245 kvm->arch.xics = xics; 1246 mutex_unlock(&kvm->lock); 1247 1248 if (ret) 1249 return ret; 1250 1251 xics_debugfs_init(xics); 1252 1253 #ifdef CONFIG_KVM_BOOK3S_64_HV 1254 if (cpu_has_feature(CPU_FTR_ARCH_206)) { 1255 /* Enable real mode support */ 1256 xics->real_mode = ENABLE_REALMODE; 1257 xics->real_mode_dbg = DEBUG_REALMODE; 1258 } 1259 #endif /* CONFIG_KVM_BOOK3S_64_HV */ 1260 1261 return 0; 1262 } 1263 1264 struct kvm_device_ops kvm_xics_ops = { 1265 .name = "kvm-xics", 1266 .create = kvmppc_xics_create, 1267 .destroy = kvmppc_xics_free, 1268 .set_attr = xics_set_attr, 1269 .get_attr = xics_get_attr, 1270 .has_attr = xics_has_attr, 1271 }; 1272 1273 int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu, 1274 u32 xcpu) 1275 { 1276 struct kvmppc_xics *xics = dev->private; 1277 int r = -EBUSY; 1278 1279 if (dev->ops != &kvm_xics_ops) 1280 return -EPERM; 1281 if (xics->kvm != vcpu->kvm) 1282 return -EPERM; 1283 if (vcpu->arch.irq_type) 1284 return -EBUSY; 1285 1286 r = kvmppc_xics_create_icp(vcpu, xcpu); 1287 if (!r) 1288 vcpu->arch.irq_type = KVMPPC_IRQ_XICS; 1289 1290 return r; 1291 } 1292 1293 void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) 1294 { 1295 if (!vcpu->arch.icp) 1296 return; 1297 kfree(vcpu->arch.icp); 1298 vcpu->arch.icp = NULL; 1299 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; 1300 } 1301