Lines Matching +full:0 +full:xc
39 static void xive_vm_ack_pending(struct kvmppc_xive_vcpu *xc) in xive_vm_ack_pending() argument
63 cppr = ack & 0xff; in xive_vm_ack_pending()
65 xc->pending |= 1 << cppr; in xive_vm_ack_pending()
68 if (cppr >= xc->hw_cppr) in xive_vm_ack_pending()
70 smp_processor_id(), cppr, xc->hw_cppr); in xive_vm_ack_pending()
74 * xc->cppr, this will be done as we scan for interrupts in xive_vm_ack_pending()
77 xc->hw_cppr = cppr; in xive_vm_ack_pending()
99 __raw_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI); in xive_vm_source_eoi()
123 __raw_writeq(0, __x_trig_page(xd)); in xive_vm_source_eoi()
133 static u32 xive_vm_scan_interrupts(struct kvmppc_xive_vcpu *xc, in xive_vm_scan_interrupts() argument
136 u32 hirq = 0; in xive_vm_scan_interrupts()
137 u8 prio = 0xff; in xive_vm_scan_interrupts()
140 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) { in xive_vm_scan_interrupts()
146 * If pending is 0 this will return 0xff which is what in xive_vm_scan_interrupts()
152 if (prio >= xc->cppr || prio > 7) { in xive_vm_scan_interrupts()
153 if (xc->mfrr < xc->cppr) { in xive_vm_scan_interrupts()
154 prio = xc->mfrr; in xive_vm_scan_interrupts()
161 q = &xc->queues[prio]; in xive_vm_scan_interrupts()
175 * Try to fetch from the queue. Will return 0 for a in xive_vm_scan_interrupts()
176 * non-queueing priority (ie, qpage = 0). in xive_vm_scan_interrupts()
186 * We also need to do that if prio is 0 and we had no in xive_vm_scan_interrupts()
194 if (hirq == XICS_IPI || (prio == 0 && !qpage)) { in xive_vm_scan_interrupts()
196 xive_vm_source_eoi(xc->vp_ipi, in xive_vm_scan_interrupts()
197 &xc->vp_ipi_data); in xive_vm_scan_interrupts()
220 int p = atomic_xchg(&q->pending_count, 0); in xive_vm_scan_interrupts()
234 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) { in xive_vm_scan_interrupts()
235 prio = xc->mfrr; in xive_vm_scan_interrupts()
252 xc->pending = pending; in xive_vm_scan_interrupts()
266 * Note: This can only make xc->cppr smaller as the previous in xive_vm_scan_interrupts()
267 * loop will only exit with hirq != 0 if prio is lower than in xive_vm_scan_interrupts()
268 * the current xc->cppr. Thus we don't need to re-check xc->mfrr in xive_vm_scan_interrupts()
272 xc->cppr = prio; in xive_vm_scan_interrupts()
275 * as the HW interrupt we use for IPIs is routed to priority 0. in xive_vm_scan_interrupts()
279 if (xc->cppr != xc->hw_cppr) { in xive_vm_scan_interrupts()
280 xc->hw_cppr = xc->cppr; in xive_vm_scan_interrupts()
281 __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR); in xive_vm_scan_interrupts()
289 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_xirr() local
295 xc->stat_vm_h_xirr++; in xive_vm_h_xirr()
298 xive_vm_ack_pending(xc); in xive_vm_h_xirr()
300 pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n", in xive_vm_h_xirr()
301 xc->pending, xc->hw_cppr, xc->cppr); in xive_vm_h_xirr()
304 old_cppr = xive_prio_to_guest(xc->cppr); in xive_vm_h_xirr()
307 hirq = xive_vm_scan_interrupts(xc, xc->pending, scan_fetch); in xive_vm_h_xirr()
309 pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n", in xive_vm_h_xirr()
310 hirq, xc->hw_cppr, xc->cppr); in xive_vm_h_xirr()
313 if (hirq & 0xff000000) in xive_vm_h_xirr()
314 pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq); in xive_vm_h_xirr()
324 * hirq = 0; in xive_vm_h_xirr()
338 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_ipoll() local
339 u8 pending = xc->pending; in xive_vm_h_ipoll()
344 xc->stat_vm_h_ipoll++; in xive_vm_h_ipoll()
347 if (xc->server_num != server) { in xive_vm_h_ipoll()
351 xc = vcpu->arch.xive_vcpu; in xive_vm_h_ipoll()
354 pending = 0xff; in xive_vm_h_ipoll()
358 u8 pipr = be64_to_cpu(qw1) & 0xff; in xive_vm_h_ipoll()
364 hirq = xive_vm_scan_interrupts(xc, pending, scan_poll); in xive_vm_h_ipoll()
367 vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24); in xive_vm_h_ipoll()
372 static void xive_vm_push_pending_to_hw(struct kvmppc_xive_vcpu *xc) in xive_vm_push_pending_to_hw() argument
376 pending = xc->pending; in xive_vm_push_pending_to_hw()
377 if (xc->mfrr != 0xff) { in xive_vm_push_pending_to_hw()
378 if (xc->mfrr < 8) in xive_vm_push_pending_to_hw()
379 pending |= 1 << xc->mfrr; in xive_vm_push_pending_to_hw()
381 pending |= 0x80; in xive_vm_push_pending_to_hw()
391 struct kvmppc_xive_vcpu *xc) in xive_vm_scan_for_rerouted_irqs() argument
396 for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) { in xive_vm_scan_for_rerouted_irqs()
397 struct xive_q *q = &xc->queues[prio]; in xive_vm_scan_for_rerouted_irqs()
418 irq = entry & 0x7fffffff; in xive_vm_scan_for_rerouted_irqs()
429 if (xc->server_num == state->act_server) in xive_vm_scan_for_rerouted_irqs()
436 qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY); in xive_vm_scan_for_rerouted_irqs()
450 if (idx == 0) in xive_vm_scan_for_rerouted_irqs()
458 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_cppr() local
464 xc->stat_vm_h_cppr++; in xive_vm_h_cppr()
470 old_cppr = xc->cppr; in xive_vm_h_cppr()
471 xc->cppr = cppr; in xive_vm_h_cppr()
474 * Order the above update of xc->cppr with the subsequent in xive_vm_h_cppr()
475 * read of xc->mfrr inside push_pending_to_hw() in xive_vm_h_cppr()
486 xive_vm_push_pending_to_hw(xc); in xive_vm_h_cppr()
505 xive_vm_scan_for_rerouted_irqs(xive, xc); in xive_vm_h_cppr()
509 xc->hw_cppr = cppr; in xive_vm_h_cppr()
520 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_eoi() local
523 u32 irq = xirr & 0x00ffffff, hw_num; in xive_vm_h_eoi()
525 int rc = 0; in xive_vm_h_eoi()
529 xc->stat_vm_h_eoi++; in xive_vm_h_eoi()
531 xc->cppr = xive_prio_from_guest(new_cppr); in xive_vm_h_eoi()
539 if (irq == XICS_IPI || irq == 0) { in xive_vm_h_eoi()
541 * This barrier orders the setting of xc->cppr vs. in xive_vm_h_eoi()
542 * subsequent test of xc->mfrr done inside in xive_vm_h_eoi()
566 * of xc->cppr vs. subsequent test of xc->mfrr done inside in xive_vm_h_eoi()
592 __raw_writeq(0, __x_trig_page(xd)); in xive_vm_h_eoi()
609 xive_vm_scan_interrupts(xc, xc->pending, scan_eoi); in xive_vm_h_eoi()
610 xive_vm_push_pending_to_hw(xc); in xive_vm_h_eoi()
611 pr_devel(" after scan pending=%02x\n", xc->pending); in xive_vm_h_eoi()
614 xc->hw_cppr = xc->cppr; in xive_vm_h_eoi()
615 __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR); in xive_vm_h_eoi()
623 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_vm_h_ipi() local
627 xc->stat_vm_h_ipi++; in xive_vm_h_ipi()
633 xc = vcpu->arch.xive_vcpu; in xive_vm_h_ipi()
636 xc->mfrr = mfrr; in xive_vm_h_ipi()
639 * The load of xc->cppr below and the subsequent MMIO store in xive_vm_h_ipi()
644 * updating xc->cppr then reading xc->mfrr. in xive_vm_h_ipi()
646 * - The target of the IPI sees the xc->mfrr update in xive_vm_h_ipi()
651 if (mfrr < xc->cppr) in xive_vm_h_ipi()
652 __raw_writeq(0, __x_trig_page(&xc->vp_ipi_data)); in xive_vm_h_ipi()
665 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_vcpu_has_save_restore() local
668 return xc->vp_cam & TM_QW1W2_HO; in kvmppc_xive_vcpu_has_save_restore()
673 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_check_save_restore() local
674 struct kvmppc_xive *xive = xc->xive; in kvmppc_xive_check_save_restore()
713 vcpu->arch.irq_pending = 0; in kvmppc_xive_push_vcpu()
746 /* Now P is 0, we can clear the flag */ in kvmppc_xive_push_vcpu()
747 vcpu->arch.xive_esc_on = 0; in kvmppc_xive_push_vcpu()
772 /* Second load to recover the context state (Words 0 and 1) */ in kvmppc_xive_pull_vcpu()
777 vcpu->arch.xive_saved_state.lsmfb = 0; in kvmppc_xive_pull_vcpu()
778 vcpu->arch.xive_saved_state.ack = 0xff; in kvmppc_xive_pull_vcpu()
779 vcpu->arch.xive_pushed = 0; in kvmppc_xive_pull_vcpu()
835 out_be64(xd->trig_mmio, 0); in xive_irq_trigger()
869 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_attach_escalation() local
870 struct xive_q *q = &xc->queues[prio]; in kvmppc_xive_attach_escalation()
875 if (xc->esc_virq[prio]) in kvmppc_xive_attach_escalation()
876 return 0; in kvmppc_xive_attach_escalation()
879 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq); in kvmppc_xive_attach_escalation()
880 if (!xc->esc_virq[prio]) { in kvmppc_xive_attach_escalation()
882 prio, xc->server_num); in kvmppc_xive_attach_escalation()
888 vcpu->kvm->arch.lpid, xc->server_num); in kvmppc_xive_attach_escalation()
891 vcpu->kvm->arch.lpid, xc->server_num, prio); in kvmppc_xive_attach_escalation()
894 prio, xc->server_num); in kvmppc_xive_attach_escalation()
899 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio); in kvmppc_xive_attach_escalation()
901 rc = request_irq(xc->esc_virq[prio], xive_esc_irq, in kvmppc_xive_attach_escalation()
905 prio, xc->server_num); in kvmppc_xive_attach_escalation()
908 xc->esc_virq_names[prio] = name; in kvmppc_xive_attach_escalation()
919 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]); in kvmppc_xive_attach_escalation()
928 return 0; in kvmppc_xive_attach_escalation()
930 irq_dispose_mapping(xc->esc_virq[prio]); in kvmppc_xive_attach_escalation()
931 xc->esc_virq[prio] = 0; in kvmppc_xive_attach_escalation()
938 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_provision_queue() local
939 struct kvmppc_xive *xive = xc->xive; in xive_provision_queue()
940 struct xive_q *q = &xc->queues[prio]; in xive_provision_queue()
945 return 0; in xive_provision_queue()
951 prio, xc->server_num); in xive_provision_queue()
954 memset(qpage, 0, 1 << xive->q_order); in xive_provision_queue()
958 * queue is fully configured. This is a requirement for prio 0 in xive_provision_queue()
961 * corresponding queue 0 entries in xive_provision_queue()
963 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage, in xive_provision_queue()
967 prio, xc->server_num); in xive_provision_queue()
983 return 0; in xive_check_provisioning()
992 if (rc == 0 && !kvmppc_xive_has_single_escalation(xive)) in xive_check_provisioning()
1002 return 0; in xive_check_provisioning()
1008 struct kvmppc_xive_vcpu *xc; in xive_inc_q_pending() local
1017 xc = vcpu->arch.xive_vcpu; in xive_inc_q_pending()
1018 if (WARN_ON(!xc)) in xive_inc_q_pending()
1021 q = &xc->queues[prio]; in xive_inc_q_pending()
1027 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_try_pick_queue() local
1031 if (WARN_ON(!xc)) in xive_try_pick_queue()
1033 if (!xc->valid) in xive_try_pick_queue()
1036 q = &xc->queues[prio]; in xive_try_pick_queue()
1042 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; in xive_try_pick_queue()
1058 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio); in kvmppc_xive_select_target()
1062 if (rc == 0) in kvmppc_xive_select_target()
1072 if (rc == 0) { in kvmppc_xive_select_target()
1074 pr_devel(" found on 0x%x/%d\n", *server, prio); in kvmppc_xive_select_target()
1275 int rc = 0; in kvmppc_xive_set_xive()
1281 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n", in kvmppc_xive_set_xive()
1337 * we have a valid new priority (new_act_prio is not 0xff) in kvmppc_xive_set_xive()
1389 return 0; in kvmppc_xive_get_xive()
1407 pr_devel("int_on(irq=0x%x)\n", irq); in kvmppc_xive_int_on()
1417 /* If saved_priority is 0xff, do nothing */ in kvmppc_xive_int_on()
1419 return 0; in kvmppc_xive_int_on()
1428 return 0; in kvmppc_xive_int_on()
1446 pr_devel("int_off(irq=0x%x)\n", irq); in kvmppc_xive_int_off()
1454 return 0; in kvmppc_xive_int_off()
1481 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_get_icp() local
1483 if (!xc) in kvmppc_xive_get_icp()
1484 return 0; in kvmppc_xive_get_icp()
1487 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | in kvmppc_xive_get_icp()
1488 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT | in kvmppc_xive_get_icp()
1489 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT; in kvmppc_xive_get_icp()
1494 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_set_icp() local
1499 if (!xc || !xive) in kvmppc_xive_set_icp()
1508 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n", in kvmppc_xive_set_icp()
1509 xc->server_num, cppr, mfrr, xisr); in kvmppc_xive_set_icp()
1521 xc->hw_cppr = xc->cppr = cppr; in kvmppc_xive_set_icp()
1524 * Update MFRR state. If it's not 0xff, we mark the VCPU as in kvmppc_xive_set_icp()
1529 xc->mfrr = mfrr; in kvmppc_xive_set_icp()
1531 xive_irq_trigger(&xc->vp_ipi_data); in kvmppc_xive_set_icp()
1543 xc->delayed_irq = xisr; in kvmppc_xive_set_icp()
1548 return 0; in kvmppc_xive_set_icp()
1567 pr_debug("%s: GIRQ 0x%lx host IRQ %ld XIVE HW IRQ 0x%x\n", in kvmppc_xive_set_mapped()
1620 * mask the interrupt in a lossy way (act_priority is 0xff) in kvmppc_xive_set_mapped()
1645 return 0; in kvmppc_xive_set_mapped()
1662 pr_debug("%s: GIRQ 0x%lx host IRQ %ld\n", __func__, guest_irq, host_irq); in kvmppc_xive_clr_mapped()
1694 state->pt_number = 0; in kvmppc_xive_clr_mapped()
1725 return 0; in kvmppc_xive_clr_mapped()
1731 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_disable_vcpu_interrupts() local
1736 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_disable_vcpu_interrupts()
1741 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) { in kvmppc_xive_disable_vcpu_interrupts()
1748 if (state->act_server != xc->server_num) in kvmppc_xive_disable_vcpu_interrupts()
1755 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); in kvmppc_xive_disable_vcpu_interrupts()
1758 xive_native_configure_irq(state->pt_number, 0, MASKED, 0); in kvmppc_xive_disable_vcpu_interrupts()
1776 vcpu->arch.xive_esc_vaddr = 0; in kvmppc_xive_disable_vcpu_interrupts()
1777 vcpu->arch.xive_esc_raddr = 0; in kvmppc_xive_disable_vcpu_interrupts()
1806 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_cleanup_vcpu() local
1813 if (!xc) in kvmppc_xive_cleanup_vcpu()
1816 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num); in kvmppc_xive_cleanup_vcpu()
1819 xc->valid = false; in kvmppc_xive_cleanup_vcpu()
1823 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_cleanup_vcpu()
1826 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_cleanup_vcpu()
1827 if (xc->esc_virq[i]) { in kvmppc_xive_cleanup_vcpu()
1828 if (kvmppc_xive_has_single_escalation(xc->xive)) in kvmppc_xive_cleanup_vcpu()
1829 xive_cleanup_single_escalation(vcpu, xc->esc_virq[i]); in kvmppc_xive_cleanup_vcpu()
1830 free_irq(xc->esc_virq[i], vcpu); in kvmppc_xive_cleanup_vcpu()
1831 irq_dispose_mapping(xc->esc_virq[i]); in kvmppc_xive_cleanup_vcpu()
1832 kfree(xc->esc_virq_names[i]); in kvmppc_xive_cleanup_vcpu()
1837 xive_native_disable_vp(xc->vp_id); in kvmppc_xive_cleanup_vcpu()
1840 vcpu->arch.xive_cam_word = 0; in kvmppc_xive_cleanup_vcpu()
1843 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_cleanup_vcpu()
1844 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_cleanup_vcpu()
1846 xive_native_disable_queue(xc->vp_id, q, i); in kvmppc_xive_cleanup_vcpu()
1855 if (xc->vp_ipi) { in kvmppc_xive_cleanup_vcpu()
1856 xive_cleanup_irq_data(&xc->vp_ipi_data); in kvmppc_xive_cleanup_vcpu()
1857 xive_native_free_irq(xc->vp_ipi); in kvmppc_xive_cleanup_vcpu()
1860 kfree(xc); in kvmppc_xive_cleanup_vcpu()
1900 return 0; in kvmppc_xive_compute_vp_id()
1907 struct kvmppc_xive_vcpu *xc; in kvmppc_xive_connect_vcpu() local
1929 xc = kzalloc(sizeof(*xc), GFP_KERNEL); in kvmppc_xive_connect_vcpu()
1930 if (!xc) { in kvmppc_xive_connect_vcpu()
1935 vcpu->arch.xive_vcpu = xc; in kvmppc_xive_connect_vcpu()
1936 xc->xive = xive; in kvmppc_xive_connect_vcpu()
1937 xc->vcpu = vcpu; in kvmppc_xive_connect_vcpu()
1938 xc->server_num = cpu; in kvmppc_xive_connect_vcpu()
1939 xc->vp_id = vp_id; in kvmppc_xive_connect_vcpu()
1940 xc->mfrr = 0xff; in kvmppc_xive_connect_vcpu()
1941 xc->valid = true; in kvmppc_xive_connect_vcpu()
1943 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); in kvmppc_xive_connect_vcpu()
1954 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); in kvmppc_xive_connect_vcpu()
1955 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); in kvmppc_xive_connect_vcpu()
1958 xc->vp_ipi = xive_native_alloc_irq(); in kvmppc_xive_connect_vcpu()
1959 if (!xc->vp_ipi) { in kvmppc_xive_connect_vcpu()
1964 pr_devel(" IPI=0x%x\n", xc->vp_ipi); in kvmppc_xive_connect_vcpu()
1966 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data); in kvmppc_xive_connect_vcpu()
1974 r = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive)); in kvmppc_xive_connect_vcpu()
1982 * and we enable escalation for queue 0 only which we'll use for in kvmppc_xive_connect_vcpu()
1987 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_connect_vcpu()
1988 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_connect_vcpu()
1997 if (r == 0 && !kvmppc_xive_has_single_escalation(xive)) in kvmppc_xive_connect_vcpu()
2003 r = xive_native_configure_queue(xc->vp_id, in kvmppc_xive_connect_vcpu()
2004 q, i, NULL, 0, true); in kvmppc_xive_connect_vcpu()
2013 /* If not done above, attach priority 0 escalation */ in kvmppc_xive_connect_vcpu()
2014 r = kvmppc_xive_attach_escalation(vcpu, 0, kvmppc_xive_has_single_escalation(xive)); in kvmppc_xive_connect_vcpu()
2019 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI); in kvmppc_xive_connect_vcpu()
2021 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00); in kvmppc_xive_connect_vcpu()
2031 return 0; in kvmppc_xive_connect_vcpu()
2051 pr_err("invalid irq 0x%x in cpu queue!\n", irq); in xive_pre_save_set_queued()
2061 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq); in xive_pre_save_set_queued()
2134 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
2138 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) in xive_pre_save_scan()
2144 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_pre_save_scan() local
2145 if (!xc) in xive_pre_save_scan()
2147 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) { in xive_pre_save_scan()
2148 if (xc->queues[j].qpage) in xive_pre_save_scan()
2149 xive_pre_save_queue(xive, &xc->queues[j]); in xive_pre_save_scan()
2154 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
2158 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) in xive_pre_save_scan()
2168 for (i = 0; i <= xive->max_sbid; i++) { in xive_post_save_scan()
2172 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) in xive_post_save_scan()
2177 xive->saved_src_count = 0; in xive_post_save_scan()
2218 if (xive->saved_src_count == 0) in xive_get_source()
2263 return 0; in xive_get_source()
2287 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { in kvmppc_xive_create_src_block()
2289 sb->irq_state[i].eisn = 0; in kvmppc_xive_create_src_block()
2312 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_check_delayed_irq() local
2314 if (!xc) in xive_check_delayed_irq()
2317 if (xc->delayed_irq == irq) { in xive_check_delayed_irq()
2318 xc->delayed_irq = 0; in xive_check_delayed_irq()
2335 int rc = 0; in xive_set_source()
2340 pr_devel("set_source(irq=0x%lx)\n", irq); in xive_set_source()
2363 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n", in xive_set_source()
2372 if (state->ipi_number == 0) { in xive_set_source()
2377 pr_devel(" src_ipi=0x%x\n", state->ipi_number); in xive_set_source()
2385 * 0 before calling it to ensure it actually performs the masking. in xive_set_source()
2387 state->guest_priority = 0; in xive_set_source()
2416 if (rc == 0) in xive_set_source()
2486 return 0; in xive_set_source()
2515 else if (level == 0 || level == KVM_INTERRUPT_UNSET) { in kvmppc_xive_set_irq()
2517 return 0; in kvmppc_xive_set_irq()
2523 return 0; in kvmppc_xive_set_irq()
2530 int rc = 0; in kvmppc_xive_set_nr_servers()
2599 return 0; in xive_has_attr()
2604 return 0; in xive_has_attr()
2613 xive_native_configure_irq(hw_num, 0, MASKED, 0); in kvmppc_xive_cleanup_irq()
2620 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { in kvmppc_xive_free_sources()
2687 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_release()
2727 memset(xive, 0, sizeof(*xive)); in kvmppc_xive_get_device()
2759 xive->q_page_order = 0; in kvmppc_xive_create()
2777 return 0; in kvmppc_xive_create()
2812 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_debug_show_queues() local
2815 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_debug_show_queues()
2816 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_debug_show_queues()
2819 if (!q->qpage && !xc->esc_virq[i]) in kvmppc_xive_debug_show_queues()
2831 if (xc->esc_virq[i]) { in kvmppc_xive_debug_show_queues()
2832 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); in kvmppc_xive_debug_show_queues()
2838 xc->esc_virq[i], in kvmppc_xive_debug_show_queues()
2845 return 0; in kvmppc_xive_debug_show_queues()
2854 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { in kvmppc_xive_debug_show_sources()
2890 u64 t_rm_h_xirr = 0; in xive_debug_show()
2891 u64 t_rm_h_ipoll = 0; in xive_debug_show()
2892 u64 t_rm_h_cppr = 0; in xive_debug_show()
2893 u64 t_rm_h_eoi = 0; in xive_debug_show()
2894 u64 t_rm_h_ipi = 0; in xive_debug_show()
2895 u64 t_vm_h_xirr = 0; in xive_debug_show()
2896 u64 t_vm_h_ipoll = 0; in xive_debug_show()
2897 u64 t_vm_h_cppr = 0; in xive_debug_show()
2898 u64 t_vm_h_eoi = 0; in xive_debug_show()
2899 u64 t_vm_h_ipi = 0; in xive_debug_show()
2903 return 0; in xive_debug_show()
2908 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_debug_show() local
2910 if (!xc) in xive_debug_show()
2915 xc->server_num, xc->vp_id, xc->vp_chip_id, in xive_debug_show()
2916 xc->cppr, xc->hw_cppr, in xive_debug_show()
2917 xc->mfrr, xc->pending, in xive_debug_show()
2918 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); in xive_debug_show()
2922 t_rm_h_xirr += xc->stat_rm_h_xirr; in xive_debug_show()
2923 t_rm_h_ipoll += xc->stat_rm_h_ipoll; in xive_debug_show()
2924 t_rm_h_cppr += xc->stat_rm_h_cppr; in xive_debug_show()
2925 t_rm_h_eoi += xc->stat_rm_h_eoi; in xive_debug_show()
2926 t_rm_h_ipi += xc->stat_rm_h_ipi; in xive_debug_show()
2927 t_vm_h_xirr += xc->stat_vm_h_xirr; in xive_debug_show()
2928 t_vm_h_ipoll += xc->stat_vm_h_ipoll; in xive_debug_show()
2929 t_vm_h_cppr += xc->stat_vm_h_cppr; in xive_debug_show()
2930 t_vm_h_eoi += xc->stat_vm_h_eoi; in xive_debug_show()
2931 t_vm_h_ipi += xc->stat_vm_h_ipi; in xive_debug_show()
2943 for (i = 0; i <= xive->max_sbid; i++) { in xive_debug_show()
2953 return 0; in xive_debug_show()