xref: /openbmc/linux/arch/x86/xen/irq.c (revision ecb93d1c)
1 #include <linux/hardirq.h>
2 
3 #include <xen/interface/xen.h>
4 #include <xen/interface/sched.h>
5 #include <xen/interface/vcpu.h>
6 
7 #include <asm/xen/hypercall.h>
8 #include <asm/xen/hypervisor.h>
9 
10 #include "xen-ops.h"
11 
12 /*
13  * Force a proper event-channel callback from Xen after clearing the
14  * callback mask. We do this in a very simple manner, by making a call
15  * down into Xen. The pending flag will be checked by Xen on return.
16  */
17 void xen_force_evtchn_callback(void)
18 {
19 	(void)HYPERVISOR_xen_version(0, NULL);
20 }
21 
22 static void __init __xen_init_IRQ(void)
23 {
24 	int i;
25 
26 	/* Create identity vector->irq map */
27 	for(i = 0; i < NR_VECTORS; i++) {
28 		int cpu;
29 
30 		for_each_possible_cpu(cpu)
31 			per_cpu(vector_irq, cpu)[i] = i;
32 	}
33 
34 	xen_init_IRQ();
35 }
36 
37 static unsigned long xen_save_fl(void)
38 {
39 	struct vcpu_info *vcpu;
40 	unsigned long flags;
41 
42 	vcpu = percpu_read(xen_vcpu);
43 
44 	/* flag has opposite sense of mask */
45 	flags = !vcpu->evtchn_upcall_mask;
46 
47 	/* convert to IF type flag
48 	   -0 -> 0x00000000
49 	   -1 -> 0xffffffff
50 	*/
51 	return (-flags) & X86_EFLAGS_IF;
52 }
53 PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
54 
55 static void xen_restore_fl(unsigned long flags)
56 {
57 	struct vcpu_info *vcpu;
58 
59 	/* convert from IF type flag */
60 	flags = !(flags & X86_EFLAGS_IF);
61 
62 	/* There's a one instruction preempt window here.  We need to
63 	   make sure we're don't switch CPUs between getting the vcpu
64 	   pointer and updating the mask. */
65 	preempt_disable();
66 	vcpu = percpu_read(xen_vcpu);
67 	vcpu->evtchn_upcall_mask = flags;
68 	preempt_enable_no_resched();
69 
70 	/* Doesn't matter if we get preempted here, because any
71 	   pending event will get dealt with anyway. */
72 
73 	if (flags == 0) {
74 		preempt_check_resched();
75 		barrier(); /* unmask then check (avoid races) */
76 		if (unlikely(vcpu->evtchn_upcall_pending))
77 			xen_force_evtchn_callback();
78 	}
79 }
80 PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
81 
82 static void xen_irq_disable(void)
83 {
84 	/* There's a one instruction preempt window here.  We need to
85 	   make sure we're don't switch CPUs between getting the vcpu
86 	   pointer and updating the mask. */
87 	preempt_disable();
88 	percpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
89 	preempt_enable_no_resched();
90 }
91 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
92 
93 static void xen_irq_enable(void)
94 {
95 	struct vcpu_info *vcpu;
96 
97 	/* We don't need to worry about being preempted here, since
98 	   either a) interrupts are disabled, so no preemption, or b)
99 	   the caller is confused and is trying to re-enable interrupts
100 	   on an indeterminate processor. */
101 
102 	vcpu = percpu_read(xen_vcpu);
103 	vcpu->evtchn_upcall_mask = 0;
104 
105 	/* Doesn't matter if we get preempted here, because any
106 	   pending event will get dealt with anyway. */
107 
108 	barrier(); /* unmask then check (avoid races) */
109 	if (unlikely(vcpu->evtchn_upcall_pending))
110 		xen_force_evtchn_callback();
111 }
112 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
113 
114 static void xen_safe_halt(void)
115 {
116 	/* Blocking includes an implicit local_irq_enable(). */
117 	if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
118 		BUG();
119 }
120 
121 static void xen_halt(void)
122 {
123 	if (irqs_disabled())
124 		HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
125 	else
126 		xen_safe_halt();
127 }
128 
129 static const struct pv_irq_ops xen_irq_ops __initdata = {
130 	.init_IRQ = __xen_init_IRQ,
131 
132 	.save_fl = PV_CALLEE_SAVE(xen_save_fl),
133 	.restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
134 	.irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
135 	.irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
136 
137 	.safe_halt = xen_safe_halt,
138 	.halt = xen_halt,
139 #ifdef CONFIG_X86_64
140 	.adjust_exception_frame = xen_adjust_exception_frame,
141 #endif
142 };
143 
144 void __init xen_init_irq_ops()
145 {
146 	pv_irq_ops = xen_irq_ops;
147 }
148